metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jongharyu/split-knn-rules",
"score": 3
} |
#### File: jongharyu/split-knn-rules/utils.py
```python
import argparse
import sys
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
# https://stackoverflow.com/questions/14906764/how-to-redirect-stdout-to-both-file-and-console-with-scripting
class Logger(object):
def __init__(self, filename, mode="a"):
self.terminal = sys.stdout
self.log = open(filename, mode)
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
# this flush method is needed for python 3 compatibility.
# this handles the flush command by doing nothing.
# you might want to specify some extra behavior here.
pass
def generate_keys(base_k):
base_keys = ['Msplit_select1', # proposed
'Msplit_select0', # \approx big k-NN
]
keys = []
for k in base_k:
keys.extend(['{}_{}NN'.format(key, k) for key in base_keys])
return keys
``` |
{
"source": "jongha/stock-ai-book",
"score": 3
} |
#### File: jongha/stock-ai-book/app.py
```python
import os
from flask import Flask, render_template, request, redirect, url_for, jsonify
import modules.base as base
import numpy as np
from modules.decorators.minified_response import minified_response
import config
import sqlite3
import csv
import config
app = Flask(__name__)
# from flask.ext.sqlalchemy import SQLAlchemy
# app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'sqlite:////tmp/flask_app.db')
# db = SQLAlchemy(app)
# class User(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# name = db.Column(db.String(100))
# email = db.Column(db.String(100))
# def __init__(self, name, email):
# self.name = name
# self.email = email
@app.route('/', methods=['GET'])
@minified_response
def index(code=None):
data = None
return render_template('index.html', data=data)
@app.route('/evaluate.json', methods=['GET'])
def evaluate(code=None):
# if request.method == 'POST':
# u = User(request.form['name'], request.form['email'])
# db.session.add(u)
# db.session.commit()
# return redirect(url_for('index'))
if code is None:
code = request.args.get('code')
if code is not None:
return evaluate(code)
else:
return None
else:
data = None
json = None
if code:
try:
data, json, score = base.load(code)
except:
pass
if data is None or json is None:
import modules.loader as loader
data, json = loader.load(code)
score = loader.score(data, json)
base.dump(code, (data, json, score))
return jsonify(score)
@app.route('/code.json', methods=['GET'])
def get_code(name=None):
result = []
if name is None:
name = request.args.get('name')
if name is not None:
connection = sqlite3.Connection(config.DATA_STOCKS_SQLITE)
rows = connection.execute(
'SELECT name, code FROM stocks WHERE name like ?',
('%' + name + '%', ))
for row in rows:
result.append({'name': row[0], 'code': row[1]})
return jsonify(result)
def setup_database():
try:
os.remove(config.DATA_STOCKS_SQLITE)
except:
pass
connection = None
csv_file = None
try:
connection = sqlite3.Connection(config.DATA_STOCKS_SQLITE)
cursor = connection.cursor()
cursor.execute(
'CREATE TABLE "stocks" ("name" varchar(50), "code" varchar(6), "category" varchar(50), "product" varchar(50), "date" varchar(10), "end" varchar(10), "ceo" varchar(20), "homepage" varchar(100), "location" varchar(30));'
)
cursor.execute('CREATE UNIQUE INDEX stocks_name ON stocks(name);')
cursor.execute('CREATE UNIQUE INDEX stocks_code ON stocks(code);')
csv_file = open(config.DATA_STOCKS_CSV)
csv_reader = csv.reader(csv_file, delimiter=',')
cursor.executemany('INSERT INTO stocks VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
csv_reader)
cursor.close()
connection.commit()
finally:
if connection is not None:
connection.close()
connection = None
if csv_file is not None:
csv_file.close()
csv_file = None
if __name__ == '__main__':
import sys
if len(sys.argv) <= 1:
port = int(os.environ.get('PORT', 8080))
app.run(host='0.0.0.0', port=port)
else:
print('Argument List:', str(sys.argv))
command = sys.argv[1]
if command == 'analytics':
analytics(sys.argv[2])
elif command == 'database':
setup_database()
```
#### File: modules/valuations/eps_bps.py
```python
import os
import pandas as pd
import config
import pandas
import re
import math
from modules.valuations.valuation import Valuation
# 5년후 BPS *BPS성장률 기업가치 할인
# 5년 EPS * EPS 성장률 년도별 할인된 흐름
# BPS(0.4)+EPS(0.6) = 기업가치
class EPS_BPS(Valuation):
def __init__(self, valuation):
data = valuation.get_data()
json = valuation.get_json()
Valuation.__init__(self, data, json)
self.set_json('5_EPS_BPS', self.valuate())
def valuate(self):
try:
data = self.get_data()
json = self.get_json()
bps = json['BPS']
bps_5_growth = json['BPS_5_GROWTH']
eps_5_growth = json['EPS_5_GROWTH']
# BPS 미래 기업가치의 할인법
bps_for_future = (bps * math.pow(1 + bps_5_growth, 5)) * math.pow(
1 - config.DATA_DISCOUNT_RATE, 5)
eps = data['EPS'].dropna()[:5]
sum_of_product = 0
for index in range(5): # 0: latest ~
sum_of_product += eps[index] * [0.4, 0.2, 0.2, 0.1, 0.1][index]
# 5년 EPS 할인된 가치
sum_of_5_year = 0
# 영구가치
value_of_fixed = 0
for i in range(1, 6):
value_year = sum_of_product * math.pow(1 + eps_5_growth, i) * (
1 - config.DATA_DISCOUNT_RATE)
sum_of_5_year += value_year
if i == 5:
value_of_fixed = value_year * config.DATA_FIXED_RATE
# 할인된 가치
value_of_discount = value_of_fixed / math.pow(
1 + config.DATA_DISCOUNT_RATE, 5)
# EPS 미래 기업가치 할인
value_of_future = sum_of_5_year + value_of_discount
# BPS 미래 기업가치의 할인법
value = (bps_for_future * config.DATA_VALUE_OF_BPS) + (
value_of_future * config.DATA_VALUE_OF_EPS)
return int(value)
except:
return None
```
#### File: modules/valuations/per.py
```python
import os
import pandas as pd
import config
import pandas
import re
import math
from modules.valuations.valuation import Valuation
# 현 EPS 과거 5년 PER 평균을 곱한 값
class PER(Valuation):
def __init__(self, valuation):
data = valuation.get_data()
json = valuation.get_json()
Valuation.__init__(self, data, json)
self.set_json('PER', self.valuate())
def valuate(self):
try:
json = self.get_json()
return int(json['EPS'] * json['PER_5'])
except:
return None
```
#### File: modules/venders/sejong.py
```python
import os
import re
import pandas as pd
from bs4 import BeautifulSoup
from modules.venders.vender import Vender
class Sejong(Vender):
URL = 'http://www.sejongdata.com/business_include_fr/table_main0_bus_01.html?&no=%s'
def __init__(self, code, vender=None):
Vender.__init__(self, self.URL, vender)
response = self.load_url(code)
html, soup = response['html'], response['soup']
tables = soup.find_all('table')
df = self.get_data_from_table(tables[1])
self.concat(df, 'SALES')
self.concat(df, 'BUSINESS_PROFITS')
self.concat(df, 'CAPITAL_TOTAL')
self.concat(df, 'DEBT_TOTAL')
self.set_debt_ratio()
def concat(self, df, column):
if column in df.columns:
data = self.get_data()
self.concat_data(df[column])
def get_data_from_table(self, table):
soup = BeautifulSoup(str(table), 'lxml')
for th in soup.find_all('th', class_='clf'):
items = th.find_all(['dd', 'a', 'span'])
for item in items:
item.extract()
df = pd.read_html(str(soup), header=0)[0]
columns = []
for index in range(len(df.columns)):
converted_column = self.date_column(df.columns[index])
if converted_column in columns:
converted_column += '.' + str(columns.count(converted_column))
columns.append(converted_column)
df.columns = columns
df = df.transpose()
df.columns = df.iloc[0]
df = df.reindex(df.index.drop('MONTH'))
columns = []
for index in range(len(df.columns)):
columns.append(self.date_column(df.columns[index]))
duplicate_postfix = {}
for i in range(len(columns)):
column = columns[i]
if columns.count(column) > 1:
count = 0
if column in duplicate_postfix:
count = duplicate_postfix[column] + 1
duplicate_postfix[column] = count
columns[i] = column + (str(duplicate_postfix[column])
if column in duplicate_postfix and
duplicate_postfix[column] > 0 else '')
df.columns = columns
return df
# 부채비율
def set_debt_ratio(self):
column_name = 'DEBT_RATIO'
df = pd.DataFrame(columns=[column_name], index=self.data.index.values)
data = self.get_data()
for month in data.index.values:
value = data['DEBT_TOTAL'][month] / self.data['CAPITAL_TOTAL'][
month] * 100
df[column_name][month] = int(value if not pd.isnull(value) else 0)
self.concat_data(df)
def date_column(self, data):
if not self.isNaN(data):
if bool(re.match('\d{4}\.\d{2}', data)):
data = data[0:4]
else:
data = self.column_name(data)
else:
data = self.id_generator()
return data
def column_name(self, name):
names = {
'Unnamed: 0': 'MONTH',
'매출액': 'SALES',
'영업이익': 'BUSINESS_PROFITS',
'자본총계': 'CAPITAL_TOTAL',
'부채총계': 'DEBT_TOTAL',
}
if name and name in names:
return names[name]
return name
``` |
{
"source": "jonghenhan/iotivity",
"score": 2
} |
#### File: test/test_manager/devunittest_reporter.py
```python
import os
import sys
from configuration import *
import optparse
import xlsxwriter
import xml.etree.ElementTree as ET
import glob
import datetime as dt
#import ntpath
import shutil
from os.path import basename
try :
import xlsxwriter
except :
print('Install xlsxwriter by sudo easy_install xlsxwriter or by another way')
oparser = optparse.OptionParser()
oparser.add_option('-b', action='store', dest='bit')
oparser.add_option('--bit', action='store', dest='bit')
oparser.add_option('-m', action='store', dest='mode')
oparser.add_option('--mode', action='store', dest='mode')
oparser.add_option('--iotivity_root', action='store', dest='iotivity_root')
#setting default value for command line arguments
oparser.set_defaults(iotivity_root = '../..', bit='64', mode= 'release')
opts, args = oparser.parse_args()
#Assigning command-line options value to global variables
iotivity_root = os.path.abspath(opts.iotivity_root)
build_mode = opts.mode
bit = opts.bit
#checking user input is 32 or 64
if bit == '32' :
bit = 'x86'
else :
bit = 'x86_64'
print ('Iotivity Root: '+ iotivity_root)
print ('Reporter Bit: '+ bit)
print ('Reporter Build Mode: '+ build_mode)
#set dev test result file directory
DEV_UNIT_TEST_RESULT_DIRECTORY = '%s/out/linux/%s/%s/test_out/'% (iotivity_root, bit, build_mode)
#constant varibales for execel column management
TEST_MODULE_COLUMN_INDEX = 0
TEST_TYPE_COLUMN_INDEX = 1
TEST_SUITE_COLUMN_INDEX = 2
TEST_CASE_COLUMN_INDEX = 3
TEST_RESULT_COLUMN_INDEX = 4
TEST_DURATION_COLUMN_INDEX = 5
TEST_MESSAGE_COLUMN_INDEX = 6
TEST_DEFECT_COLUMN_INDEX = 6
TEST_JIRAID_COLUMN_INDEX = 7
TEST_DESCRIPTION_COLUMN_INDEX = 8
#global count variables
total_count = 0
fail_count = 0
not_run_count = 0
pass_count = 0
error_count = 0
#global variable for
linux_work_sheet_row_index = 1
#list to keep catagory(binary) wise not run
not_run_binary_dict = {}
module = {
'resource':['unittests'],
'resource/c_common':['malloctests', 'randomtests', 'stringtests', 'timetests'],
'resource/csdk/connectivity': ['catests'],
'resource/csdk/security':['unittest'],
'resource/csdk/security/provisioning':['security_unittest'],
'resource/csdk/security/provisioning/ck_manager' :['unittest_1'],
'resource/csdk/stack':['stacktests'],
'resource/provisioning': ['provisiontests'],
'service/scene-manager': ['remote_scene_action_test','remote_scene_col_test','remote_scene_list_test','remote_scene_test', 'scene_action_test','scene_collection_test', 'scene_list_test', 'scene_test'],
'service/easy-setup/enrollee':['easysetup_enrollee_test'],
'service/easy-setup/mediator/richsdk':['easysetup_mediator_test'],
'service/resource-container':['container_test'],
'service/notification':['notification_consumer_test', 'notification_provider_test'],
'service/resource-encapsulation':['broker_test', 'cache_test', 'rcs_client_test', 'rcs_server_test', 'rcs_common_test']
}
def set_excel_style() :
global pass_header_format
global common_format
global common_bottom_format
global pass_header_format
global testcase_format
global test_suite_format
global merge_format
global description_format
pass_header_format = module_workbook.add_format({'bold': True,'border':True,'align':'center'})
pass_header_format.set_border(style=2)
common_format = module_workbook.add_format({'border':True,'align':'center','valign':'vcenter'})
common_format.set_border(style=1)
common_format.set_right(2)
common_bottom_format = module_workbook.add_format({'border':True,'align':'center','valign':'vcenter'})
common_bottom_format.set_bottom(2)
common_bottom_format.set_right(2)
description_format = module_workbook.add_format({'border':True})
description_format.set_border(style=2)
testcase_format = module_workbook.add_format({'border':True, 'align':'center','valign':'vcenter'})
testcase_format.set_border(style = 1)
testcase_format.set_right(2)
test_suite_format = module_workbook.add_format({'bottom':True,'align':'center','valign':'vcenter'})
test_suite_format.set_border(style = 2)
merge_format = module_workbook.add_format({'border':True,'align':'center','valign':'vcenter'})
merge_format.set_border(style=2)
def open_excel_workbook() :
global module_workbook
date = dt.datetime.today()
yy_mm_dd = date.strftime('%Y%m%d')
wk_no = date.isocalendar()[1]
MODULE_NAME ='testtsts'
module_workbook = xlsxwriter.Workbook(DEV_UNIT_TEST_RESULT_DIRECTORY +'TestResult_UnitTest_%s_W%s.xlsx' % (yy_mm_dd, str(wk_no)))
set_excel_style()
add_worksheet_to_excel_workbook()
def set_summary_work_sheet_header():
#set column
summary_work_sheet.set_column('A:A', 10)
summary_work_sheet.set_column('B:B', 40)
summary_work_sheet.set_column('C:C', 10)
summary_work_sheet.set_column('D:D', 10)
summary_work_sheet.set_column('E:E', 10)
summary_work_sheet.set_column('F:F', 10)
summary_work_sheet.set_column('G:G', 10)
#write header value in cloulmns
summary_work_sheet.write('A1', 'Platform', pass_header_format)
summary_work_sheet.write('B1', 'Module', pass_header_format)
summary_work_sheet.write('C1', 'Passed', pass_header_format)
summary_work_sheet.write('D1', 'Failed', pass_header_format)
summary_work_sheet.write('E1', 'Error', pass_header_format)
summary_work_sheet.write('F1', 'Not Run', pass_header_format)
summary_work_sheet.write('G1', 'Total', pass_header_format)
def set_pass_work_sheet_header():
pass_work_sheet.set_column('A:A', 30)
pass_work_sheet.set_column('B:B', 30)
pass_work_sheet.set_column('C:C', 30)
pass_work_sheet.set_column('D:D', 50)
pass_work_sheet.set_column('E:E', 12)
pass_work_sheet.set_column('F:F', 10)
pass_work_sheet.set_column('G:G', 60)
pass_work_sheet.write('A1', 'Module', pass_header_format)
pass_work_sheet.write('B1', 'Catagory', pass_header_format)
pass_work_sheet.write('C1', 'Test Suite', pass_header_format)
pass_work_sheet.write('D1', 'Test Case', pass_header_format)
pass_work_sheet.write('E1', 'Result', pass_header_format)
pass_work_sheet.write('F1', 'Time(ms)', pass_header_format)
pass_work_sheet.write('G1', 'Reason', pass_header_format)
#pass_work_sheet.autofilter('A1:D11')
def set_fail_work_sheet_header():
fail_work_sheet.set_column('A:A', 10)
fail_work_sheet.set_column('B:B', 10)
fail_work_sheet.set_column('C:C', 40)
fail_work_sheet.set_column('D:D', 50)
fail_work_sheet.set_column('E:E', 12)
fail_work_sheet.set_column('F:F', 10)
fail_work_sheet.set_column('G:G', 50)
fail_work_sheet.write('A1', 'Module', pass_header_format)
fail_work_sheet.write('B1', 'Type', pass_header_format)
fail_work_sheet.write('C1', 'Test Suite', pass_header_format)
fail_work_sheet.write('D1', 'Test Case', pass_header_format)
fail_work_sheet.write('E1', 'Result', pass_header_format)
fail_work_sheet.write('F1', 'Time(ms)', pass_header_format)
fail_work_sheet.write('G1', 'Message', pass_header_format)
def defect_work_sheet_header() :
defect_work_sheet.set_column('A:A', 10)
defect_work_sheet.set_column('B:B', 10)
defect_work_sheet.set_column('C:C', 40)
defect_work_sheet.set_column('D:D', 50)
defect_work_sheet.set_column('E:E', 12)
defect_work_sheet.set_column('F:F', 10)
defect_work_sheet.set_column('G:G', 50)
defect_work_sheet.set_column('H:H', 10)
defect_work_sheet.set_column('I:I', 50)
defect_work_sheet.write('A1', 'Module', pass_header_format)
defect_work_sheet.write('B1', 'Type', pass_header_format)
defect_work_sheet.write('C1', 'Test Suite', pass_header_format)
defect_work_sheet.write('D1', 'Test Case', pass_header_format)
defect_work_sheet.write('E1', 'Result', pass_header_format)
defect_work_sheet.write('F1', 'Time(ms)', pass_header_format)
defect_work_sheet.write('G1', 'Defect', pass_header_format)
defect_work_sheet.write('H1', 'Jira ID', pass_header_format)
defect_work_sheet.write('I1', 'Description', pass_header_format)
def add_worksheet_to_excel_workbook() :
global summary_work_sheet
global pass_work_sheet
global fail_work_sheet
global defect_work_sheet
summary_work_sheet = module_workbook.add_worksheet('Summary')
pass_work_sheet = module_workbook.add_worksheet('Linux')
fail_work_sheet = module_workbook.add_worksheet('Java')
defect_work_sheet = module_workbook.add_worksheet('DefectReport')
set_summary_work_sheet_header()
set_pass_work_sheet_header()
set_fail_work_sheet_header()
defect_work_sheet_header()
summary_work_sheet_row_index = 1
def parse_xml_file(file_list, module_name):
global summary_work_sheet_row_index
global linux_work_sheet_row_index
global total_count
global fail_count
global not_run_count
global pass_count
global error_count
module_wise_total_count = 0
module_wise_fail_count = 0
module_wise_not_run_count = 0
module_wise_pass_count = 0
module_wise_error_count = 0
summary_module_row_start_index = summary_work_sheet_row_index
module_name_column_start_index = linux_work_sheet_row_index
missing_binary_list = []
type_wise_total_count = 0
type_wise_pass_count = 0
type_wise_fail_count = 0
type_wise_not_run_count = 0
type_wise_error_count = 0
for file_name in file_list :
full_file_name = DEV_UNIT_TEST_RESULT_DIRECTORY+file_name+'.xml'
print('File Name :'+ file_name)
temp_dict = {}
pass_work_sheet_module_row_start_index = linux_work_sheet_row_index
#file_base_name = basename(filename).split('.')[0]
pass_work_sheet.write(linux_work_sheet_row_index, TEST_MODULE_COLUMN_INDEX, file_name, test_suite_format)
if not os.path.isfile(full_file_name) :
missing_binary_list.append(file_name)
continue
if not os.path.getsize(full_file_name) > 0 :
continue
tree = ET.parse(full_file_name)
#Getting root that is testsuites
root = tree.getroot()
test_count = root.get('tests')
fail_cnt = root.get('failures')
not_run_cnt = root.get('disabled')
error_cnt = root.get('errors')
pass_cnt = int(test_count) - int(fail_cnt)-int(not_run_cnt)-int(error_cnt )
#module wise count operation
type_wise_total_count += int(test_count)
type_wise_pass_count += pass_cnt
type_wise_fail_count += int(fail_cnt)
type_wise_error_count += int(error_cnt)
type_wise_not_run_count += int(not_run_cnt)
print('Total Test Cases count : '+test_count)
print('Total TEst Cases count : '+test_count)
#Find all tags named <testsuite> and store to testsuites list
testsuites = root.findall('testsuite')
#Iterating all test suite from testsuites list
for test_suite in testsuites :
merge_pass_row_suite_start_index = linux_work_sheet_row_index
test_suite_name = test_suite.get('name')
print ('Suite Name : '+test_suite_name+'\n')
pass_work_sheet.write(linux_work_sheet_row_index, TEST_SUITE_COLUMN_INDEX, test_suite_name, test_suite_format)
testcases = test_suite.findall('testcase')
failure_message = 'N/A'
for test_case in testcases :
test_result = 'Pass'
failure_message = 'N/A'
test_name = test_case.get('name')
test_status = test_case.get('status')
test_time = test_case.get('time')
print('Test Name : ' +test_name)
if test_status == 'notrun' :
test_result = 'Not Run'
failure_message = 'Disable by code'
fail = test_case.find('failure')
error = test_case.find('error')
if error is not None :
test_result = 'Error'
failure_message = error.get('message')
if fail is not None :
test_result = 'Fail'
failure_message = fail.get('message')
print('Result : Fail '+ failure_message )
print('Result : '+test_result +' Message : '+failure_message+' Time : '+test_time)
else :
print('Result : '+test_result+ ' Time : '+test_time)
#writing value into pass worksheet
pass_work_sheet.write(linux_work_sheet_row_index, TEST_CASE_COLUMN_INDEX, test_name, common_format)
pass_work_sheet.write(linux_work_sheet_row_index, TEST_RESULT_COLUMN_INDEX, test_result, common_format)
pass_work_sheet.write(linux_work_sheet_row_index, TEST_DURATION_COLUMN_INDEX, test_time, common_format)
pass_work_sheet.write(linux_work_sheet_row_index, TEST_MESSAGE_COLUMN_INDEX, failure_message, common_format)
linux_work_sheet_row_index += 1
#suite wise merging in pass worksheet
pass_work_sheet.merge_range(merge_pass_row_suite_start_index,2,linux_work_sheet_row_index - 1,2,test_suite_name,test_suite_format)
print ('\n')
#Catagory wise merging in pass worksheet
pass_work_sheet.merge_range(pass_work_sheet_module_row_start_index,1,linux_work_sheet_row_index - 1,1,file_name,test_suite_format)
#Module wise merging in pass worksheet
pass_work_sheet.merge_range(module_name_column_start_index, 0,linux_work_sheet_row_index - 1,0, module_name ,test_suite_format)
#adding module wise missing binary file
not_run_binary_dict.update({module_name : missing_binary_list})
#module wise total count
module_wise_total_count += type_wise_total_count
module_wise_fail_count += type_wise_fail_count
module_wise_not_run_count+= type_wise_not_run_count
module_wise_pass_count += type_wise_pass_count
module_wise_error_count += type_wise_error_count
#grand total count
total_count += module_wise_total_count
fail_count += module_wise_fail_count
not_run_count += module_wise_not_run_count
pass_count += module_wise_pass_count
error_count += module_wise_error_count
#write catagory wise count in excel
summary_work_sheet.write(summary_work_sheet_row_index, 1, module_name, common_format)
summary_work_sheet.write(summary_work_sheet_row_index, 2, module_wise_pass_count, common_format)
summary_work_sheet.write(summary_work_sheet_row_index, 3, module_wise_fail_count, common_format)
summary_work_sheet.write(summary_work_sheet_row_index, 4, module_wise_error_count, common_format)
summary_work_sheet.write(summary_work_sheet_row_index, 5, module_wise_not_run_count, common_format)
summary_work_sheet.write(summary_work_sheet_row_index, 6, module_wise_total_count , common_format)
summary_work_sheet_row_index += 1
#merging summary worksheet Platform column
def print_missing_binary_list_type_wise() :
print('===============================Not Run Binary==================================')
for module_name in not_run_binary_dict :
if len(not_run_binary_dict [module_name]) is not 0 :
print('Module : '+ module_name)
for binary_name in not_run_binary_dict [module_name] :
print(' '+binary_name+' ')
print('\n')
print('================================================================================')
def close_module_workbook() :
module_workbook.close()
def create_excel_report() :
dev_test_result_files_list = []
open_excel_workbook()
for module_name in module :
#print(module_name)
dev_test_result_files_list = module[module_name]
print (module[module_name])
if len(dev_test_result_files_list) > 0 :
parse_xml_file(dev_test_result_files_list, module_name)
summary_work_sheet.write(summary_work_sheet_row_index, 1, 'All', common_format)
summary_work_sheet.write(summary_work_sheet_row_index, 2, pass_count, common_format)
summary_work_sheet.write(summary_work_sheet_row_index, 3, fail_count, common_format)
summary_work_sheet.write(summary_work_sheet_row_index, 4, error_count, common_format)
summary_work_sheet.write(summary_work_sheet_row_index, 5, not_run_count, common_format)
summary_work_sheet.write(summary_work_sheet_row_index, 6, total_count , common_format)
summary_work_sheet.merge_range(1, 0,summary_work_sheet_row_index, 0 ,'Linux', common_format)
close_module_workbook()
def find_and_copy_file() :
directory = DEV_UNIT_TEST_RESULT_DIRECTORY+':xml:'
print('Directory :'+directory)
print(os.path.isdir(directory))
if (os.path.isdir(directory)) :
for root, dirs ,files in os.walk(directory) :
for file_name in files :
if file_name.endswith('.xml') :
print (os.path.abspath(file_name))
print(file_name)
file_full_path = os.path.join(root, file_name)
directory_path = os.path.dirname(file_full_path)
shutil.copy(directory_path + '/unittest.xml', DEV_UNIT_TEST_RESULT_DIRECTORY + 'security_unittest.xml')
def print_summary():
print '\n========================Dev Team Unit TCs===================================\n'
print('\nTEST RESULT SUMMARY \n')
print('\n PLATFORM : %s\n'%('LINUX'))
print('\n MODULE : %s\n'%('ALL'))
print(' PASS : ' +str(pass_count)+'\n')
print(' FAIL : ' +str(fail_count)+'\n')
print(' ERROR : ' +str(error_count)+'\n')
print(' NOT RUN : ' +str(not_run_count)+'\n')
print(' TOTAL : ' +str(total_count)+'\n')
print '\n==============================================================================\n'
if __name__ == '__main__':
find_and_copy_file()
create_excel_report()
print_summary()
print_missing_binary_list_type_wise()
```
#### File: test/test_manager/inter_op_reporter.py
```python
import argparse
import sys
from robottcxmlparser.utility import *
from robottcxmlparser.xmlparser import *
from robottcxmlparser.testsuite import *
from robottcxmlparser.reporter import *
def get_test_suite_obj(xml_file_path):
xml_obj = XmlParser(xml_file_path)
testsuite_obj = TestSuite()
testsuite_obj.set_test_env(xml_obj.xml_content)
Utility.report_dir_name = TestSuite.set_dir_name(Utility.xml_file_path)
testsuite_obj.test_case_parse(xml_obj.testcase_lists)
return testsuite_obj
if __name__ == "__main__":
cmd_parser = argparse.ArgumentParser()
cmd_parser.add_argument('-m', dest='module_name', help='Module Name')
cmd_parser.add_argument('-f', dest='file_path', default="all", help='XML File Name')
Utility.get_platform_module_info()
Utility.interop_report_dir = os.path.abspath(Constants.interop_report_dir)
Utility.module_name = sys.argv[2]
Utility.module_dir = Utility.interop_report_dir + os.sep + Utility.module_name.lower()
print("Working DIR : " + Utility.module_dir + '\n')
if len(sys.argv)==1:
cmd_parser.print_help()
sys.exit(1)
arg = cmd_parser.parse_args()
Utility.module_name = arg.module_name
report_obj = Reporter()
report_obj.create_report(Utility.module_name)
#
# if report_obj.summary_work_sheet == None:
# print("Report WorkBook is NULL")
if arg.file_path == 'all' :
for file in os.listdir(Utility.module_dir):
if file.endswith(".xml"):
Utility.file_name = file
Utility.xml_file_path = Utility.module_dir + '/' + Utility.file_name
report_obj.tc_suite_obj_list.append(get_test_suite_obj(Utility.xml_file_path))
# for tc in testsuite_obj.test_list:
# tc.print_tc_details()
report_obj.merge_report()
report_obj.close_report()
print(Utility.module_name)
```
#### File: ite/exec/runner_info_setter.py
```python
import os
import re
import subprocess
from subprocess import Popen, PIPE
from ite.multi_logger import print_runner_output
from ite.config import *
class RunnerInfoSetter:
def __init__(self, test_root):
self.test_root = test_root
def set_gtest_tc_list(self, test_result_dir, given_module, file_filter, given_testsuites, given_testcases, dynamic_runner):
binary_list = dynamic_runner.get_binary_list()
sz = 0
for binary_name in binary_list:
print_runner_output(binary_name)
if file_filter:
if file_filter not in binary_name:
continue
if given_module:
if len(binary_name.split('_')) < 3:
continue
if binary_name.split('_')[1] not in given_module:
continue
testsuite = ''
command = dynamic_runner.get_tc_list_command(binary_name)
rc = subprocess.check_output(command, shell=True)
log = re.sub(r'(b\'|\')', '', str(rc))
log = dynamic_runner.remove_invalid_character_from_log(log)
for line in log.split('\n'):
line = line.strip()
if not line:
continue
if line.endswith('.') == True :
testsuite = line
continue
if given_testsuites and testsuite[:-1] not in given_testsuites:
continue
if given_testcases and line not in given_testcases:
continue
if testsuite != '' and line != '':
if dynamic_runner.tc_scenario:
if testsuite.split('_')[-1] not in dynamic_runner.tc_scenario:
continue
dynamic_runner.add_new_tc(binary_name, testsuite[:-1], line, '')
def set_junit_tc_list(self, test_result_dir, given_module, given_package_name, given_testsuites, given_testcases, dynamic_runner):
for testcase_type in TESTCASE_TYPES:
print_runner_output('testcase_type: {}'.format(testcase_type))
if dynamic_runner.tc_scenario:
found = False
for temp in dynamic_runner.tc_scenario:
if temp.lower() == testcase_type.lower():
found = True
break
if not found:
continue
for module in TESTSUITE_MODULES:
if given_testsuites:
found = False
for given_suite in given_testsuites:
if module in given_suite:
found = True
break
if not found:
continue
if given_module:
if module.lower() not in given_module.lower():
continue
if given_package_name:
package_name = given_package_name
else:
package_name = 'org.iotivity.test.' + module.lower() + '.tc.' + testcase_type.lower()
print_runner_output('package_name: {}'.format(package_name))
cwd = os.getcwd()
print_runner_output(cwd)
build_dir = os.path.join(cwd, self.test_root, 'bin', dynamic_runner.platform_type, module.lower(), 'intermediates', 'classes', 'debug', package_name.replace('.', os.sep))
print_runner_output(build_dir)
if not os.path.exists(build_dir):
print_runner_output('path {} is not exist'.format(build_dir))
continue
os.chdir(build_dir)
print_runner_output(os.getcwd())
file_list = [f for f in os.listdir(build_dir) if os.path.isfile(os.path.join(build_dir, f))]
for suite in file_list :
if "$" not in suite and suite.endswith('Test.class'):
suite_name = suite.split('.',1)[0]
if given_testsuites and suite_name not in given_testsuites:
continue
class_command = "find -name '{}.class' | xargs javap -p".format(suite_name)
rc = subprocess.check_output(class_command, shell=True)
log = re.sub(r'(b\'|\')', '', str(rc))
for line in log.split('\\n'):
line = re.sub(r'(b\'|\')', '', str(line.strip()))
if 'public void test' in line:
begin_index = line.find('test')
end_index = line.find('(')
tc_name = line[begin_index:end_index]
if given_testcases and tc_name not in given_testcases:
continue
dynamic_runner.add_new_tc('', suite_name, tc_name, package_name)
os.chdir(cwd)
```
#### File: test_manager/ite/multi_logger.py
```python
import os
import sys
import datetime
import traceback
import logging
global logger
logger = logging.getLogger('scope.name')
logger.setLevel('INFO')
global log_file_name
log_file_name = ''
def set_file_handler(module, platform_type, verdict_path):
tokens = verdict_path.split(os.sep)
global log_file_name
log_file_name = 'test_manager_{}_{}.log'.format(module, platform_type)
if len(tokens) > 1:
log_file_name = os.sep.join(tokens[0:-1]) + os.sep + log_file_name
file_log_handler = logging.FileHandler(log_file_name)
logger.addHandler(file_log_handler)
def print_to_file(*msgs):
global log_file_name
if logger:
for msg in msgs:
timestring = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
logger.info(timestring + ' ' + str(msg))
def print_runner_output(*msgs):
global log_file_name
for msg in msgs:
print (msg)
print_to_file(msgs)
def show_exeception():
#print_runner_output("Unexpected error:", sys.exc_info()[0])
traceback.print_exc(file=sys.stdout)
if logger:
logger.info(traceback.print_exc())
```
#### File: ite/reporter/result_reporter.py
```python
import sys
import operator
import os
from xlsxwriter.workbook import Workbook
from ite.config import *
from ite.constants import *
from ite.result.collector import TestResultCollector
from ite.tc.container import TestSpecContainer
from ite.reporter.excel_util import *
from ite.reporter.reporter_util import *
from ite.util import *
class TestResultReporter:
def __init__(self):
self.summary = dict()
self.failtc = dict()
self.passtc = dict()
self.testspec = None
def make_summary(self, data):
for platform in TEST_PLATFORM:
if not platform in data:
continue
self.summary[platform] = dict()
self.failtc[platform] = dict()
self.passtc[platform] = dict()
for target in data[platform]:
self.summary[platform][target] = dict()
self.failtc[platform][target] = dict()
self.passtc[platform][target] = dict()
for build_type in data[platform][target]:
self.summary[platform][target][build_type] = dict()
self.failtc[platform][target][build_type] = dict()
self.passtc[platform][target][build_type] = dict()
for transport in data[platform][target][build_type]:
self.summary[platform][target][build_type][transport] = dict()
self.failtc[platform][target][build_type][transport] = dict()
self.passtc[platform][target][build_type][transport] = dict()
for network in data[platform][target][build_type][transport]:
self.summary[platform][target][build_type][transport][network] = dict()
self.failtc[platform][target][build_type][transport][network] = dict()
self.passtc[platform][target][build_type][transport][network] = dict()
for tctype in TESTCASE_TYPES:
self.summary[platform][target][build_type][transport][network][tctype] = dict()
self.failtc[platform][target][build_type][transport][network][tctype] = dict()
self.passtc[platform][target][build_type][transport][network][tctype] = dict()
for module in TESTSUITE_MODULES:
self.summary[platform][target][build_type][transport][network][tctype][module] = dict()
self.failtc[platform][target][build_type][transport][network][tctype][module] = dict()
self.passtc[platform][target][build_type][transport][network][tctype][module] = dict()
for result in TC_RESULT:
self.summary[platform][target][build_type][transport][network][tctype][module][result] = 0
if ((not tctype in list(data[platform][target][build_type][transport][network])) or (not module in list(data[platform][target][build_type][transport][network][tctype]))):
continue
for suite in list(data[platform][target][build_type][transport][network][tctype][module]):
self.failtc[platform][target][build_type][transport][network][tctype][module][suite] = list()
self.passtc[platform][target][build_type][transport][network][tctype][module][suite] = list()
for tc_name in list(data[platform][target][build_type][transport][network][tctype][module][suite]):
testcase = data[platform][target][build_type][transport][network][tctype][module][suite][tc_name]
if (testcase.success == 0):
self.summary[platform][target][build_type][transport][network][tctype][module][TC_RESULT.FAIL] += 1
self.failtc[platform][target][build_type][transport][network][tctype][module][suite].append(testcase)
else:
self.summary[platform][target][build_type][transport][network][tctype][module][TC_RESULT.PASS] += 1
self.passtc[platform][target][build_type][transport][network][tctype][module][suite].append(testcase)
def generate_testresult_report(self, path, spec_data):
collector = TestResultCollector()
collector.collect_results(path)
self.testspec = spec_data
self.make_summary(collector.result.data)
def report_fail_result_to_txt(self, dir_path):
self.report_test_result_to_txt(dir_path, self.failtc)
def report_pass_result_to_txt(self, dir_path):
self.report_test_result_to_txt(dir_path, self.passtc)
def report_test_result_to_txt(self, dir_path, result_suite):
for platform in TEST_PLATFORM:
if not platform in result_suite:
continue
for target in result_suite[platform]:
for build_type in result_suite[platform][target]:
for transport in result_suite[platform][target][build_type]:
transport_name = transport
if transport_name == NO_TRANSPORT:
transport_name = ''
for network in result_suite[platform][target][build_type][transport]:
network_name = network
if network_name == NO_NETWORK:
network_name = ''
new_dir = os.path.join(dir_path, '%s_%s_%s_%s_%s' %(platform, target.strip(), build_type, transport_name, network_name))
os.makedirs(new_dir)
for tctype in result_suite[platform][target][build_type][transport][network]:
for module in result_suite[platform][target][build_type][transport][network][tctype]:
for suite in list(result_suite[platform][target][build_type][transport][network][tctype][module]):
for testcase in list(result_suite[platform][target][build_type][transport][network][tctype][module][suite]):
text_file_name = suite
if not ('_' + tctype.lower()) in suite.lower():
text_file_name += '_' + tctype.lower()
text_file_name += '_' + testcase.name + '_' + build_type + '.txt'
file_path = os.path.join(new_dir, text_file_name)
txt = open_write_file(file_path)
if txt == False:
continue
print("[Defect Info]", file=txt)
print(" Title: ", file=txt)
print(" Description: \n", file=txt)
if platform in self.testspec:
if tctype in self.testspec[platform]:
if module in self.testspec[platform][tctype]:
if transport in self.testspec[platform][tctype][module]:
if network in self.testspec[platform][tctype][module][transport]:
if suite in self.testspec[platform][tctype][module][transport][network]:
if testcase.name in self.testspec[platform][tctype][module][transport][network][suite]:
spec = self.testspec[platform][tctype][module][transport][network][suite][testcase.name]
print("[Test Case Info]", file=txt)
print(spec.to_string(), file=txt)
print("\n[Test Result]", file=txt)
print("Fail Rate: %d/%d\n" % (testcase.fail, testcase.fail+testcase.success), file=txt)
index = 1
for result in testcase.runresult:
print("[%d Try]" % index, file=txt)
print("--------------------------------------------------------------------", file=txt)
print("Result: " + result.result, file=txt)
print("Run Type: " + result.runtype, file=txt)
print("Run Time: " + str(result.runtime), file=txt)
print("\n<<<Fail Message>>> \n" + result.fail_msg, file=txt)
print("\n<<<Test Log>>> \n" + result.test_log, file=txt)
print("--------------------------------------------------------------------\n\n", file=txt)
index += 1
def report_result(self, sheet, form, report_title, result_writer, result_suite):
row = 0
col = 0
for title, size in RESULT_CAT_TITLE:
if (title == 'Line'):
continue
sheet.write(row, col, title, form.title)
sheet.set_column(col, col, size)
col += 1
for title, size in report_title:
sheet.write(row, col, title, form.title)
sheet.set_column(col, col, size)
col += 1
row += 1
col = 0
def write_platform(platforms):
for platform in platforms:
nonlocal row, col
platform_first_row = row
if not platform in result_suite:
continue
col += 1
yield result_suite[platform], platform
col -= 1
merge_cell(sheet, platform_first_row, col, row-1, col, platform, form.cell)
def write_target(platforms):
for platform in platforms:
targets, platform_name = platform
for target in list(targets):
nonlocal row, col
target_first_row = row
col += 1
yield targets[target], platform_name, target
col -= 1
merge_cell(sheet, target_first_row, col, row-1, col, target, form.cell)
def write_build_type(targets):
for target in targets:
build_types, platform, target = target
for build_type in list(build_types):
nonlocal row, col
build_type_first_row = row
col += 1
yield build_types[build_type], platform, target, build_type
col -= 1
merge_cell(sheet, build_type_first_row, col, row-1, col, build_type, form.cell)
def write_transport(build_types):
for build_type in build_types:
transports, platform, target, build_type = build_type
for transport in list(transports):
nonlocal row, col
transport_first_row = row
col += 1
yield transports[transport], platform, target, build_type, transport
col -= 1
merge_cell(sheet, transport_first_row, col, row-1, col, transport, form.cell)
def write_network(transports):
for transport in transports:
networks, platform, target, build_type, transport = transport
for network in list(networks):
nonlocal row, col
network_first_row = row
col += 1
yield networks[network], platform, target, build_type, transport, network
col -= 1
merge_cell(sheet, network_first_row, col, row-1, col, network, form.cell)
def write_tctype(networks):
for network in networks:
types, platform, target, build_type, transport, network = network
for tctype in list(types):
nonlocal row, col
type_first_row = row
module_count = 0
for module in TESTSUITE_MODULES:
module_count += len(types[tctype][module])
if module_count == 0:
continue
col += 1
yield TESTSUITE_MODULES, types[tctype], platform, target, build_type, transport, network, tctype
col -= 1
merge_cell(sheet, type_first_row, col, row-1, col, tctype, form.cell)
def write_module(tctypes):
for tctype in tctypes:
module_names, modules, platform, target, build_type, transport, network, tctype = tctype
for module in module_names:
nonlocal row, col
module_first_row = row
if (len(list(modules[module])) == 0):
continue
col += 1
yield modules[module], platform, target, build_type, transport, network, tctype, module
col -= 1
merge_cell(sheet, module_first_row, col, row-1, col, module, form.cell)
def write_suite(modules):
for module in modules:
suites, platform, target, build_type, transport, network, tctype, module = module
for suite in list(suites):
nonlocal row, col
suite_first_row = row
if (len(suites[suite]) == 0):
continue
if not suite in suites and not suite in self.testspec[platform][build_type][tctype][module][transport][network]:
print ('suite not found: ', platform, tctype, module, transport, network, suite)
continue
col += 1
yield suites[suite], platform, target, build_type, transport, network, tctype, module, suite
col -=1
merge_cell(sheet, suite_first_row, col, row-1, col, suite, form.cell)
def write_tc(suites):
for suite in suites:
testcases, platform, target, build_type, transport, network, tctype, module, suite = suite
testspec = self.testspec[platform][build_type][tctype][module][transport][network][suite]
for testcase in testcases:
nonlocal row, col
row, col = result_writer(row, col, testcase, testspec, sheet, platform, target, transport, network)
walk_through_results(write_platform, write_target, write_build_type, write_transport, write_network, write_tctype, write_module, write_suite, write_tc)
def report_to_xlsx(self, path):
workbook = Workbook(path)
form = ExcelFormat(workbook)
summarysheet = workbook.add_worksheet('Summary')
row = 0
col = 0
summarysheet.merge_range(row, col, row+3, col, '', form.cell)
col += 1
for platform in TEST_PLATFORM:
if not platform in self.summary:
continue
row += 1
platform_col = col
for target in self.summary[platform]:
row += 1
target_col = col
for tc_type in TESTCASE_TYPES + ('Total',):
row += 1
tc_type_col = col
for result in TC_RESULT + ('Total',):
summarysheet.write(row, col, result, form.result_title[result])
col += 1
row -= 1
summarysheet.merge_range(row, tc_type_col, row, col-1, tc_type, form.title)
row -= 1
summarysheet.merge_range(row, target_col, row, col-1, target, form.title)
row -= 1
summarysheet.merge_range(row, platform_col, row, col-1, platform, form.title)
total_col = col
for result in TC_RESULT + ('Total','Pass Rate',):
summarysheet.write(row+3, col, result, form.result_title[result])
col += 1
summarysheet.merge_range(row, total_col, row+2, col-1, "Total", form.total)
row += 4
col = 0
for module in TESTSUITE_MODULES :
col = 0
summarysheet.write(row, col, module, form.title)
col += 1
module_total_txt = dict()
for result in TC_RESULT + ('Total',):
module_total_txt[result] = '=SUM('
for platform in TEST_PLATFORM:
if not platform in self.summary:
continue
for target in self.summary[platform]:
for tc_type in TESTCASE_TYPES:
for result in TC_RESULT:
result_sum = 0
for build_type in self.summary[platform][target]:
for transport in self.summary[platform][target][build_type]:
for network in self.summary[platform][target][build_type][transport]:
result_sum += self.summary[platform][target][build_type][transport][network][tc_type][module][result]
summarysheet.write(row, col, result_sum, form.cell)
col += 1
total_txt = '=SUM(%s:%s)' % (get_cell_name(col - len(TC_RESULT), row), get_cell_name(col - 1, row))
summarysheet.write(row, col, total_txt, form.cell)
col += 1
for result in TC_RESULT + ('Total',):
total_txt = '=SUM(%s,%s)' % (get_cell_name(col - (len(TC_RESULT) + 1)*2, row), get_cell_name(col - (len(TC_RESULT) + 1), row))
summarysheet.write(row, col, total_txt, form.cell)
module_total_txt[result] += '%s,' % get_cell_name(col, row)
col += 1
for result in TC_RESULT + ('Total',):
module_total_txt[result] += ')'
summarysheet.write(row, col, module_total_txt[result], form.cell)
col += 1
total_txt = '=IF(%s=0,"",ROUND(%s/%s*100, 2))' % (get_cell_name(col - 1, row), get_cell_name(col - 3, row), get_cell_name(col - 1, row))
summarysheet.write(row, col, total_txt, form.cell)
row += 1
col = 0
summarysheet.write(row, col, 'Total', form.total)
col += 1
for platform in TEST_PLATFORM:
if not platform in self.summary:
continue
for target in self.summary[platform]:
for tc_type in TESTCASE_TYPES + ('Total',):
for result in TC_RESULT + ('Total',):
total_txt = '=SUM(%s:%s)' % (get_cell_name(col, row - len(TESTSUITE_MODULES)), get_cell_name(col, row - 1))
summarysheet.write(row, col, total_txt, form.total_no)
col += 1
for result in TC_RESULT + ('Total',):
total_txt = '=SUM(%s:%s)' % (get_cell_name(col, row - len(TESTSUITE_MODULES)), get_cell_name(col, row - 1))
summarysheet.write(row, col, total_txt, form.total_no)
col += 1
total_txt = '=IF(%s=0,"",ROUND(%s/%s*100, 2))' % (get_cell_name(col - 1, row), get_cell_name(col - 3, row), get_cell_name(col - 1, row))
summarysheet.write(row, col, total_txt, form.total_no)
def write_pass_result(row, col, testcase, testspec, sheet, platform, target, transport, network):
tc_col = col
tc_row = row
col += 2
index = 0
while(index < len(testcase.runresult)):
sheet.write(row, col, testcase.runresult[index].result, form.cell)
sheet.write(row, col + 1, testcase.runresult[index].runtype, form.cell)
sheet.write(row, col + 2, testcase.runresult[index].runtime, form.cell)
sheet.write(row, col + 3, testcase.runresult[index].fail_msg, form.cell_wrap)
temp_log = get_log_content_or_filename(testcase.runresult[index].test_log)
sheet.write(row, col + 4, temp_log, form.cell_wrap)
index += 1
row +=1
col = tc_col
merge_cell(sheet, tc_row, col, row - 1, col, testcase.name, form.cell)
col += 1
merge_cell(sheet, tc_row, col, row - 1, col,
"%d/%d" % (testcase.fail, testcase.fail + testcase.success), form.cell)
col += 6
if testcase.name in testspec:
spec = testspec[testcase.name]
for key, title in sorted(TAG_DIC.items(), key=operator.itemgetter(1)):
if (title[0] < 5):
continue;
merge_cell(sheet, tc_row, col, row -1, col, spec.__dict__[key], form.cell_wrap)
col += 1
col = tc_col
return row, col
def write_fail_result(row, col, testcase, testspec, sheet, platform, target, transport, network):
tc_col = col
tc_row = row
col += 2
index = 0
while(index < len(testcase.runresult)):
sheet.write(row, col, testcase.runresult[index].result, form.cell)
sheet.write(row, col + 1, testcase.runresult[index].runtype, form.cell)
sheet.write(row, col + 2, testcase.runresult[index].runtime, form.cell)
sheet.write(row, col + 3, testcase.runresult[index].fail_msg, form.cell_wrap)
temp_log = get_log_content_or_filename(testcase.runresult[index].test_log)
sheet.write(row, col + 4, temp_log, form.cell_wrap)
index += 1
row +=1
col = tc_col
merge_cell(sheet, tc_row, col, row - 1, col, testcase.name, form.cell)
col += 1
merge_cell(sheet, tc_row, col, row - 1, col,
"%d/%d" % (testcase.fail, testcase.fail + testcase.success), form.cell)
col += 6
if testcase.name in testspec:
spec = testspec[testcase.name]
for key, title in sorted(TAG_DIC.items(), key=operator.itemgetter(1)):
if (title[0] < 5):
continue;
merge_cell(sheet, tc_row, col, row -1, col, spec.__dict__[key], form.cell_wrap)
col += 1
col = tc_col
return row, col
def write_defect_result(row, col, testcase, testspec, sheet, platform, target, transport, network):
tc_col = col
tc_row = row
col += 2
index = 0
while(index < len(testcase.runresult)):
sheet.write(row, col, testcase.runresult[index].result, form.cell)
sheet.write(row, col + 1, testcase.runresult[index].runtype, form.cell)
sheet.write(row, col + 2, testcase.runresult[index].runtime, form.cell)
index += 1
row +=1
col = tc_col
merge_cell(sheet, tc_row, col, row - 1, col, testcase.name, form.cell)
col += 1
merge_cell(sheet, tc_row, col, row - 1, col,
"%d/%d" % (testcase.fail, testcase.fail + testcase.success), form.cell)
col += 4
merge_cell(sheet, tc_row, col, row-1, col, '', form.cell_wrap)
col += 1
merge_cell(sheet, tc_row, col, row-1, col, '', form.cell)
col += 1
test_target = ''
if testcase.name in list(testspec):
test_target = testspec[testcase.name].target
description = '[Device: %s %s]\n' %(platform, target)\
+ '[TC: %s]\n' % (testcase.name)\
+ '[Target: %s]\n\n' %(test_target) \
+ DEFECT_DESCRIPTION
merge_cell(sheet, tc_row, col, row-1, col, description, form.cell_wrap)
col = tc_col
return row, col
def get_log_content_or_filename(log):
if len(log) > 10000:
begin_index = log.find('Log File Name:') + len('Log File Name:')
end_index = log.find('Content:')
log = log[begin_index:end_index].strip()
return log
passsheet = workbook.add_worksheet('PassTC')
self.report_result(passsheet, form, RESULT_TITLE, write_pass_result, self.passtc)
failsheet = workbook.add_worksheet('FailedTC')
self.report_result(failsheet, form, RESULT_TITLE, write_fail_result, self.failtc)
defectsheet = workbook.add_worksheet('DefectReport')
self.report_result(defectsheet, form, DEFECT_TITLE, write_defect_result, self.failtc)
workbook.close()
```
#### File: ite/tc/xmlanalyzer.py
```python
from xml.etree.ElementTree import ElementTree, ParseError
from ite.tc.analyzer import TestSpec
from ite.constants import *
from ite.config import *
class TCXMLAnalyzer:
def __init__(self):
self.data = dict()
self.summary = dict()
self.review_summary = dict();
def read_spec_xml(self, path):
try :
doc = ElementTree(file=path)
testspec = doc.find(SPEC_ELM.TESTSPEC)
for platform in testspec.findall(SPEC_ELM.PLATFORM):
platform_name = platform.get(SPEC_ATT.NAME)
self.data[platform_name] = dict()
for build_type_node in platform.findall(SPEC_ELM.BUILDTYPE):
build_type = build_type_node.get(SPEC_ATT.NAME)
self.data[platform_name][build_type] = dict()
for tctype in build_type_node.findall(SPEC_ELM.TYPE):
type_name = tctype.get(SPEC_ATT.NAME)
self.data[platform_name][build_type][type_name] = dict()
for module in tctype.findall(SPEC_ELM.MODULE):
module_name = module.get(SPEC_ATT.NAME)
self.data[platform_name][build_type][type_name][module_name] = dict()
transport_names = []
tranport_tag_map = {}
for transport in module.findall(SPEC_ELM.TRANSPORT):
transport_name = transport.get(SPEC_ATT.NAME)
transport_names.append(transport_name)
tranport_tag_map[transport_name] = transport
if not transport_names:
transport_names.append('NONE')
tranport_tag_map['NONE'] = module
for transport_name in transport_names:
self.data[platform_name][build_type][type_name][module_name][transport_name] = dict()
network_names = []
network_tag_map = {}
for network in tranport_tag_map[transport_name].findall(SPEC_ELM.NETWORK):
network_name = network.get(SPEC_ATT.NAME)
network_names.append(network_name)
network_tag_map[network_name] = network
if not network_names:
network_names.append('NONE')
network_tag_map['NONE'] = tranport_tag_map[transport_name]
for network_name in network_names:
self.data[platform_name][build_type][type_name][module_name][transport_name][network_name] = dict()
for testsuite in network_tag_map[network_name].findall(SPEC_ELM.TESTSUITE):
suite_name = testsuite.get(SPEC_ATT.NAME)
self.data[platform_name][build_type][type_name][module_name][transport_name][network_name][suite_name] = dict()
for testcase in testsuite.findall(SPEC_ELM.TESTCASE):
spec = TestSpec(int(testcase.get(SPEC_ATT.FILELINE)), suite_name, testcase.get(SPEC_ATT.NAME))
self.data[platform_name][build_type][type_name][module_name][transport_name][network_name][suite_name][spec.name] = spec
for tag in list(TAG_DIC):
tag_elm = testcase.find(tag)
if (tag_elm == None):
spec.__dict__[tag] = ''
else:
spec.__dict__[tag] = tag_elm.text
except ParseError:
print("There is a Parse Error on " + path)
def read_summary_xml(self, path):
try :
doc = ElementTree(file=path)
#testspec = doc.find(SPEC_ELM.TESTSPEC)
summary = doc.find(SPEC_ELM.SUMMARY)
for platform in summary.findall(SPEC_ELM.PLATFORM):
platform_name = platform.get(SPEC_ATT.NAME)
#platform_no = platform.get(SPEC_ATT.NO)
#platform_review_no = platform.get(SPEC_ATT.REVIEW)
self.summary[platform_name] = dict()
self.review_summary[platform_name] = dict()
for transport in summary.findall(SPEC_ELM.TRANSPORT):
transport_name = platform.get(SPEC_ATT.NAME)
self.summary[platform_name][transport_name] = dict()
self.review_summary[platform_name][transport_name] = dict()
for network in summary.findall(SPEC_ELM.NETWORK):
network_name = platform.get(SPEC_ATT.NAME)
self.summary[platform_name][transport_name][network_name] = dict()
self.review_summary[platform_name][transport_name][network_name] = dict()
for tctype in platform.findall(SPEC_ELM.TYPE):
type_name = tctype.get(SPEC_ATT.NAME)
#type_no = tctype.get(SPEC_ATT.NO)
#type_review_no = tctype.get(SPEC_ATT.REVIEW)
self.summary[platform_name][build_type][type_name] = dict()
self.review_summary[platform_name][transport_name][network_name][type_name] = dict()
for module in tctype.findall(SPEC_ELM.MODULE):
module_name = module.get(SPEC_ATT.NAME)
module_no = module.get(SPEC_ATT.NO)
module_review_no = module.get(SPEC_ATT.REVIEW)
self.summary[platform_name][transport_name][network_name][type_name][module_name] = int(module_no);
self.review_summary[platform_name][transport_name][network_name][type_name][module_name] = int(module_review_no);
except ParseError:
print("There is a Parse Error on " + path)
```
#### File: ite/webreporter/tc_list_reporter.py
```python
from xml.dom.minidom import Document
from xml.etree.ElementTree import ElementTree, ParseError
from ite.tc.xmlanalyzer import TCXMLAnalyzer
from ite.webreporter.webreporter_util import *
from ite.constants import *
from ite.config import *
from ite.util import *
class TCListReporter:
def report(self, src_path, dest_path):
analyzer = TCXMLAnalyzer();
analyzer.read_spec_xml(src_path);
xml = open_write_file(dest_path)
if xml == False:
return
doc = Document()
package = create_xml_element(doc, doc, WEB_REPORT.PACKAGE)
for platform in TEST_PLATFORM:
if not platform in analyzer.data:
continue;
platform_elm = create_xml_element(doc, package, WEB_REPORT.TC)
platform_elm.setAttribute(WEB_MODEL_ATT.NAME, platform)
for tctype in TESTCASE_TYPES:
if not tctype in analyzer.data[platform]:
continue;
tctype_elm = create_xml_element(doc, platform_elm, WEB_REPORT.TC)
tctype_elm.setAttribute(WEB_MODEL_ATT.NAME, tctype)
for module in TESTSUITE_MODULES:
if not module in analyzer.data[platform][tctype]:
continue;
module_elm = create_xml_element(doc, tctype_elm, WEB_REPORT.TC)
module_elm.setAttribute(WEB_MODEL_ATT.NAME, module)
for testsuite in analyzer.data[platform][tctype][module]:
testsuite_elm = create_xml_element(doc, module_elm, WEB_REPORT.TC)
testsuite_elm.setAttribute(WEB_MODEL_ATT.NAME, testsuite)
for testcase in analyzer.data[platform][tctype][module][testsuite]:
testcase_elm = create_xml_element(doc, testsuite_elm, WEB_REPORT.TC)
testcase_elm.setAttribute(WEB_MODEL_ATT.NAME, testcase)
testcase_elm.setAttribute(WEB_REPORT.KEY, "%s_%s_%s_%s_%s" %(platform, tctype, module, testsuite, testcase))
doc.writexml(xml, '\t', '\t', '\n', 'UTF-8')
xml.close()
def analyze(self, src_path):
try :
testgroup = dict()
doc = ElementTree(file=src_path)
for platform in doc.findall(WEB_REPORT.TC):
#platform_name = platform.get(SPEC_ATT.NAME)
for tctype in platform.findall(WEB_REPORT.TC):
type_name = tctype.get(SPEC_ATT.NAME)
for module in tctype.findall(WEB_REPORT.TC):
module_name = module.get(SPEC_ATT.NAME)
tc_filter = "%s_%s" % (module_name, type_name)
tc_filter = tc_filter.lower()
testgroup[tc_filter] = dict()
print("### Filter : " + tc_filter)
for testsuite in module.findall(WEB_REPORT.TC):
suite_name = testsuite.get(SPEC_ATT.NAME)
for testcase in testsuite.findall(WEB_REPORT.TC):
tc_name = testcase.get(SPEC_ATT.NAME)
tc_key = testcase.get(WEB_REPORT.KEY)
testcase_filter = "%s.%s" % (suite_name, tc_name)
testgroup[tc_filter][testcase_filter] = tc_key
return testgroup
except ParseError:
print("There is a Parse Error on " + src_path)
```
#### File: test_manager/robottcxmlparser/defectworksheet.py
```python
import xlsxwriter
class DefectWorkSheet:
def __init__(self, workbook):
self.row = 0
self.column = 0
self.defect_worksheet = workbook.add_worksheet("Defects")
self.header_format = workbook.add_format({'bold': True, 'align':'center'})
self.header_format.set_bg_color('#F5DA81')
self.normal_format = workbook.add_format()
self.center_format = workbook.add_format({'align':'center'})
self.normal_format.set_align('top')
self.defect_worksheet.set_column(0, 0, 3)
self.defect_worksheet.write('A1', 'No.', self.header_format)
self.defect_worksheet.set_column(1, 1, 6)
self.defect_worksheet.write('B1', 'Module', self.header_format)
self.defect_worksheet.set_column(2, 2, 10)
self.defect_worksheet.write('C1', 'Test Suite', self.header_format)
self.defect_worksheet.set_column(3, 3, 25)
self.defect_worksheet.write('D1', 'Test Case', self.header_format)
self.defect_worksheet.write('E1', 'Fail Rate', self.header_format)
self.defect_worksheet.write('F1', 'Result', self.header_format)
self.defect_worksheet.write('G1', 'Time', self.header_format)
self.defect_worksheet.set_column(7, 7, 20)
self.defect_worksheet.write('H1', 'Defect', self.header_format)
self.defect_worksheet.write('I1', 'JIRA ID', self.header_format)
self.defect_worksheet.set_column(9, 9, 20)
self.defect_worksheet.write('J1', 'Description', self.header_format)
self.defect_worksheet.write('K1', 'First App Log', self.header_format)
self.defect_worksheet.write('L1', 'Second App Log', self.header_format)
def write_defect_desc(self, tc_suite_obj):
for tc_obj in tc_suite_obj.fail_test_list:
self.row += 1
self.column = 0
tc_result = "PASS"
if tc_obj.result == 0:
tc_result = "FAIL"
self.defect_worksheet.write(self.row, 0, self.row, self.normal_format)
self.defect_worksheet.write(self.row, 1, tc_suite_obj.module_name, self.center_format)
self.defect_worksheet.write(self.row, 2, tc_obj.suite_name, self.normal_format)
self.defect_worksheet.write(self.row, 3, tc_obj.name, self.normal_format)
self.defect_worksheet.write(self.row, 5, tc_result, self.center_format)
self.defect_worksheet.write(self.row, 9, tc_obj.description, self.normal_format)
self.defect_worksheet.write(self.row, 10, tc_obj.log_files[0], self.normal_format)
self.defect_worksheet.write(self.row, 11, tc_obj.log_files[1], self.normal_format)
```
#### File: test_manager/robottcxmlparser/tcworksheet.py
```python
class TcWorkSheet:
testcase_work_sheet = None
cell_format = None
def __init__(self, workbook):
self.row = 0
self.column = 0
self.testcase_work_sheet = workbook.add_worksheet("Test Case")
# Add a bold format to use to highlight cells.
self.header_format = workbook.add_format({'bold': True, 'align':'center'})
self.header_format.set_bg_color('#F5DA81')
self.cell_format = workbook.add_format({'text_wrap': True})
self.cell_format.set_align('top')
self.testcase_work_sheet.set_column(0, 0, 5)
self.testcase_work_sheet.write('A1', 'No.', self.header_format)
self.testcase_work_sheet.set_column(0, 0, 2)
self.testcase_work_sheet.write('B1', 'Test Configuration', self.header_format)
self.testcase_work_sheet.set_column(0, 0, 10)
self.testcase_work_sheet.write('C1', 'Test Suite Name', self.header_format)
self.testcase_work_sheet.set_column(2, 2, 15)
self.testcase_work_sheet.write('D1', 'Test Case Name', self.header_format)
self.testcase_work_sheet.write('E1', 'Objective', self.header_format)
self.testcase_work_sheet.set_column(5, 5, 15)
self.testcase_work_sheet.write('F1', 'Description', self.header_format)
self.testcase_work_sheet.set_column(10, 10, 10)
self.testcase_work_sheet.write('G1', 'Result', self.header_format)
self.testcase_work_sheet.write('H1', 'Logs', self.header_format)
def write_tc_desc(self, test_suite_obj):
platform = test_suite_obj.test_env["os1"] + '_' + test_suite_obj.test_env["os2"]
sdk = test_suite_obj.test_env["sdk1"] + '_' + test_suite_obj.test_env["sdk2"]
details = "Security = " + test_suite_obj.test_env["secured"] + "\nTransport = " + test_suite_obj.test_env["transport"] + "\nQoS = " + test_suite_obj.test_env["qos"] + "\nTLS = " + test_suite_obj.test_env["tls"]
test_configuration = "Platform :\n" + platform + "\n" + "Sdk :\n" + sdk + "\n" + "Details :\n" + details
for tc_obj in test_suite_obj.test_list:
self.row += 1
tc_result = "PASS"
if tc_obj.result == 0:
tc_result = "FAIL"
self.testcase_work_sheet.write(self.row, 0, self.row, self.cell_format)
self.testcase_work_sheet.write(self.row, 1, test_configuration, self.cell_format)
self.testcase_work_sheet.write(self.row, 2, tc_obj.suite_name, self.cell_format)
self.testcase_work_sheet.write(self.row, 3, tc_obj.name, self.cell_format)
self.testcase_work_sheet.write(self.row, 4, tc_obj.objective, self.cell_format)
self.testcase_work_sheet.write(self.row, 5, tc_obj.description, self.cell_format)
self.testcase_work_sheet.write(self.row, 6, tc_result, self.cell_format)
logs = "First App Log:\n" + tc_obj.log_files[0] + "Second App Log:\n" + tc_obj.log_files[1]
self.testcase_work_sheet.write(self.row, 7, logs,
self.cell_format)
```
#### File: test_manager/robottcxmlparser/testsuite.py
```python
from .testcase import *
from lxml import etree
class TestSuite:
def __init__(self):
self.test_env = dict()
self.test_list = list()
self.fail_test_list = list()
self.platform = Utility.platform.replace("_", "")
self.network = ""
self.total_pass = 0
self.total_fail = 0
self.total_not_run = 0
self.version = ""
self.bit = ""
self.module_name = ""
def test_case_parse(self, test_case_list):
for tc in test_case_list:
tc_obj = TestCase(tc, self.module_name, self.test_env)
tc_obj.get_tc_content()
self.test_list.append(tc_obj)
if tc_obj.result == 1:
self.total_pass += 1
else:
self.total_fail += 1
self.fail_test_list.append(tc_obj)
def set_test_env(self, suite_setup_content):
test_env_node = suite_setup_content.xpath(Constants.test_env_xpath)
logs = test_env_node[0].find("kw")
msg = logs[2].text
tc_env_list = msg.split()
print(tc_env_list)
self.test_env["os1"] = tc_env_list[0]
self.test_env["os2"] = tc_env_list[1]
self.test_env["sdk1"] = tc_env_list[2]
self.test_env["sdk2"] = tc_env_list[3]
self.test_env["transport"] = tc_env_list[4]
self.test_env["secured"] = tc_env_list[5]
self.test_env["qos"] = tc_env_list[6]
self.test_env["tls"] = ""
@staticmethod
def set_dir_name(xml_file_name):
xml_file_name_split = xml_file_name.split('-')
temp_dir_name = xml_file_name_split[-2] + '-' + xml_file_name_split[-1]
return temp_dir_name.replace(".xml", "")
```
#### File: test/test_manager/xml_reporter.py
```python
import os
import xml.etree.cElementTree as ET
import time
import platform
import sys
import fnmatch
import subprocess
import optparse
from datetime import datetime
from time import strftime
from time import sleep
import re
import collections
import optparse
from configuration import *
from ite.tc.container import TestSpecContainer
from ite.reporter.tc_reporter import TestSpecReporter
from xml.etree.ElementTree import ElementTree, ParseError
from ite.tc.analyzer import TestSpec
from ite.constants import *
from ite.config import *
try:
import xlrd
except:
print ('install xlrd. Possible way to install: sudo pip3 install xlrd')
exit (0)
current = datetime.now().strftime("%Y-%m-%d_%H:%M")
test_root = '..'
oparser = optparse.OptionParser()
oparser.add_option("-p", action="store", dest="platform", default="linux")
oparser.add_option("--platform", action="store", dest="platform", default="linux")
oparser.add_option("-b", action="store", dest="branch", default="unspecified")
oparser.add_option("--branch", action="store", dest="branch", default="unspecified")
oparser.add_option("-r", action="store", dest="iotivity_root", default="../../")
oparser.add_option("--iotivity_root", action="store", dest="iotivity_root", default="../../")
oparser.add_option("-t", action="store", dest="timestamp", default=current)
oparser.add_option("--timestamp", action="store", dest="timestamp", default=current)
oparser.add_option("-d", action="store", dest="duration", default="")
oparser.add_option("--duration", action="store", dest="duration", default="")
oparser.add_option("--store_log", action="store_true", dest="store_log")
oparser.add_option("--store_path", action="store", dest="store_path", default="../../../../result/")
oparser.add_option("--not_publish", action="store_true", dest="not_publish")
oparser.add_option("--build_number", action="store", dest="build_number")
oparser.add_option("--artifacts_link", action="store", dest="artifacts_link")
opts, args = oparser.parse_args()
platform_name = opts.platform
iotivity_branch = opts.branch
iotivity_root = os.path.abspath(opts.iotivity_root)
total_duration = opts.duration
timestamp = opts.timestamp
store_log = opts.store_log
not_publish = opts.not_publish
store_path = os.path.abspath(opts.store_path)
build_number = opts.build_number
artifacts_link = opts.artifacts_link
print ('store_log: ', store_log)
cwd = os.getcwd()
os.chdir(iotivity_root)
rc = subprocess.check_output('git rev-parse HEAD', shell=True)
log = re.sub(r'(b\'|\')', '', str(rc))
log = log.strip()
log = log.replace('\\n', '')
iotivity_commit_id = log
os.chdir(cwd)
rc = subprocess.check_output('git rev-parse HEAD', shell=True)
log = re.sub(r'(b\'|\')', '', str(rc))
log = log.strip()
log = log.replace('\\n', '')
test_commit_id = log
final_xml_filename = os.path.join(TEST_REPORT_DIR, 'xml_report.xml')
result_dir = TEST_RESULT_DIR
modules = TESTSUITE_MODULES
pass_count_id = 'pass'
fail_count_id = 'fail'
total_count_id = 'total'
na_count_id = 'na'
build_iotivity = 'build_iotivity'
build_test = 'build_test'
test_pre_condition = 'test_pre_condition'
system_failure = 'system_failure'
tc_result_count = {}
for module in modules:
tc_result_count[module] = {}
tc_result_count[module][pass_count_id] = 0
tc_result_count[module][fail_count_id] = 0
tc_result_count[module][total_count_id] = 0
tc_result_count[module][na_count_id] = 0
def read_spec_xml(path, searching_platform):
try :
doc = ElementTree(file=path)
testspec = doc.find(SPEC_ELM.SUMMARY)
for platform in testspec.findall(SPEC_ELM.PLATFORM):
if platform.get(SPEC_ATT.NAME) != searching_platform:
continue
for build_type in platform.findall(SPEC_ELM.BUILDTYPE):
for tctype in build_type.findall(SPEC_ELM.TYPE):
for module in tctype.findall(SPEC_ELM.MODULE):
module_name = module.get(SPEC_ATT.NAME)
tc_result_count[module_name][total_count_id] += int(module.get(SPEC_ATT.NO))
except ParseError:
print("There is a Parse Error on " + path)
if not os.path.exists(result_dir):
os.makedirs(result_dir)
testspec_path = os.path.join(result_dir, TEST_SPEC_XML_FOR_RESULT)
if not os.path.exists(testspec_path) and os.path.exists(API_TC_SRC_DIR):
container = TestSpecContainer()
container.extract_api_testspec(API_TC_SRC_DIR, '')
reporter = TestSpecReporter()
reporter.generate_testspec_report(container.data)
reporter.report('XML', testspec_path)
read_spec_xml(result_dir + os.sep + 'TestSpec.xml', platform_name.upper())
print ('result_dir: ' + result_dir)
os.system('rm -rf ' + result_dir + os.sep + 'xml_report.xml')
os.system('rm -rf ' + result_dir + os.sep + '__merge_report__')
os.system('rm -rf ' + result_dir + os.sep + 'MERGE_REPORT*')
os.system('mkdir ' + result_dir + os.sep + '__merge_report__')
os.system('cp -u ' + result_dir + os.sep + '*_TestResult_*' + os.sep + platform_name + '_*.xml ' + result_dir + os.sep + '__merge_report__' + os.sep)
os.system('cp -u ' + result_dir + os.sep + '*_TestResult_*' + os.sep + platform_name + '_*.log ' + result_dir + os.sep + '__merge_report__' + os.sep)
os.system('cp -u ' + result_dir + os.sep + '__*_*__' + os.sep + platform_name + '_*.xml ' + result_dir + os.sep + '__merge_report__' + os.sep)
os.system('./result_reporter.py ' + result_dir + os.sep + '__merge_report__ merge_report')
list_of_files = []
for path, subdirs, files in os.walk(result_dir):
for fname in files:
if path.startswith(result_dir + os.sep + 'MERGE_REPORT_') and fname.startswith('TestResult_') and fname.endswith('.xlsx'):
print (path)
list_of_files.append(os.path.join(path, fname))
sep = ':'
with open(TEST_REPORT_DIR + os.sep + 'test_pre_info.txt') as f:
lines = f.readlines()
for line in lines:
line = line.lower()
if line.startswith(na_count_id + sep):
infos = line.split(sep)
if len(infos) > 1:
module = infos[1].upper()
if len(infos) > 2 and platform_name in infos[2]:
if len(infos) > 3 and infos[3]:
tc_result_count[module][na_count_id] = int(infos[3])
else:
tc_result_count[module][na_count_id] = tc_result_count[module][total_count_id]
for fname in list_of_files:
worksheet = xlrd.open_workbook(fname).sheet_by_index(0)
num_rows = worksheet.nrows - 1
curr_row = -1
while curr_row < num_rows:
curr_row += 1
row = worksheet.row(curr_row)
c0 = worksheet.cell_value(curr_row, 0)
for module in modules:
if c0.upper() == module.upper():
try:
tc_result_count[c0][pass_count_id] += int(worksheet.cell_value(curr_row, 1)) + int(worksheet.cell_value(curr_row, 4))
tc_result_count[c0][fail_count_id] += int(worksheet.cell_value(curr_row, 2)) + int(worksheet.cell_value(curr_row, 5))
except:
print ('Problem with Parsing module ' + module)
total_tc = 0
total_pass = 0
total_fail = 0
total_na = 0
total_ne = 0
pass_rate = 100.00
status_set = set()
robot_execution_status_file = '../src/automation/robot/report/robot_execution_status.txt'
if os.path.exists(robot_execution_status_file):
with open(robot_execution_status_file) as f:
lines = f.readlines()
print ('reading robot_execution_status.txt')
for line in lines:
line = line.lower().strip()
status_set.add(line)
for line in status_set:
if line.startswith(fail_count_id + sep):
infos = line.split(sep)
if len(infos) > 1:
module = infos[1].upper()
if len(infos) > 2 and platform_name in infos[2]:
if len(infos) > 3 and infos[3]:
print (infos[3])
if infos[3] == build_iotivity or infos[3] == test_pre_condition:
cnt = tc_result_count[module][total_count_id] - tc_result_count[module][pass_count_id] - tc_result_count[module][fail_count_id] - tc_result_count[module][na_count_id]
tc_result_count[module][fail_count_id] += cnt
for module in modules:
total_tc += tc_result_count[module][total_count_id]
total_pass += tc_result_count[module][pass_count_id]
total_fail += tc_result_count[module][fail_count_id]
total_na += tc_result_count[module][na_count_id]
total_ne += tc_result_count[module][total_count_id] - tc_result_count[module][pass_count_id] - tc_result_count[module][fail_count_id] - tc_result_count[module][na_count_id]
pass_rate = (total_pass / (total_tc - total_na)) * 100
report = ET.Element("report")
report.append(ET.ProcessingInstruction('xml-stylesheet', 'type="text/xsl" href="style-report.css"'))
label = ET.SubElement(report, "label")
label.text = 'Target Platform: ' + platform_name
if build_number:
label = ET.SubElement(report, "label")
label.text = 'Build Number: ' + build_number
if artifacts_link:
label = ET.SubElement(report, "inline_label")
label.text = 'Build Artifacts Link:'
label = ET.SubElement(report, "a")
label.text = artifacts_link
if total_duration:
label = ET.SubElement(report, "label")
label.text = 'Total duration: ' + total_duration
label = ET.SubElement(report, "label")
if iotivity_commit_id != iotivity_branch:
label.text = 'Iotivity Branch/Tag: ' + iotivity_branch + ' , Commit Id: ' + iotivity_commit_id
else:
label.text = 'Iotivity Commit Id: ' + iotivity_commit_id
label = ET.SubElement(report, "label")
label.text = 'Test Commit Id: ' + test_commit_id
summary = ET.SubElement(report, "summary")
ET.SubElement(summary, "data-caption").text = 'Summary'
summary_row = ET.SubElement(summary, 'summary-row')
data_row = ET.SubElement(summary, 'data-row')
ET.SubElement(summary_row, 'data-header').text = 'Total TCs'
ET.SubElement(summary_row, 'data-header').text = 'Pass'
ET.SubElement(summary_row, 'data-header').text = 'Fail'
ET.SubElement(summary_row, 'data-header').text = 'NA'
ET.SubElement(summary_row, 'data-header').text = 'NE'
ET.SubElement(summary_row, 'data-header').text = 'Pass Rate'
ET.SubElement(data_row, 'data-cell').text = str(total_tc)
ET.SubElement(data_row, 'data-cell').text = str(total_pass)
ET.SubElement(data_row, 'data-cell').text = str(total_fail)
ET.SubElement(data_row, 'data-cell').text = str(total_na)
ET.SubElement(data_row, 'data-cell').text = str(total_ne)
ET.SubElement(data_row, 'data-cell').text = "{0:.2f}".format(pass_rate)
data = ET.SubElement(report, "data")
ET.SubElement(data, 'data-caption').text = 'Result Details'
data_row1 = ET.SubElement(data, 'data-row')
ET.SubElement(data_row1, 'data-header').text = 'Modules'
ET.SubElement(data_row1, 'data-header').text = 'TCs'
ET.SubElement(data_row1, 'data-header').text = 'Pass'
ET.SubElement(data_row1, 'data-header').text = 'Fail'
ET.SubElement(data_row1, 'data-header').text = 'NA'
ET.SubElement(data_row1, 'data-header').text = 'NE'
ET.SubElement(data_row1, 'data-header').text = 'Pass Rate'
for module in modules:
if tc_result_count[module][total_count_id] == 0:
continue
data_row2 = ET.SubElement(data, 'data-row')
ET.SubElement(data_row2, 'data-cell').text = module
ET.SubElement(data_row2, 'data-cell').text = str(tc_result_count[module][total_count_id])
ET.SubElement(data_row2, 'data-cell').text = str(tc_result_count[module][pass_count_id])
ET.SubElement(data_row2, 'data-cell').text = str(tc_result_count[module][fail_count_id])
ET.SubElement(data_row2, 'data-cell').text = str(tc_result_count[module][na_count_id])
total_ne = tc_result_count[module][total_count_id] - tc_result_count[module][pass_count_id] - tc_result_count[module][fail_count_id] - tc_result_count[module][na_count_id]
if total_ne < 0:
total_ne = 0
ET.SubElement(data_row2, 'data-cell').text = str(total_ne)
if (tc_result_count[module][pass_count_id] - tc_result_count[module][fail_count_id]) > (tc_result_count[module][total_count_id] - tc_result_count[module][na_count_id]):
ET.SubElement(data_row2, 'data-cell').text = "Error!!!"
continue
if (tc_result_count[module][total_count_id] - tc_result_count[module][na_count_id]) > 0:
pass_rate = (tc_result_count[module][pass_count_id] / (tc_result_count[module][total_count_id] - tc_result_count[module][na_count_id]))*100
else:
pass_rate = 100
ET.SubElement(data_row2, 'data-cell').text = "{0:.2f}".format(pass_rate)
tree = ET.ElementTree(report)
tree.write(final_xml_filename)
prefix = '<?xml version="1.0" encoding="UTF-8" standalone="no"?><?xml-stylesheet href="style-report.css"?>'
with open(final_xml_filename, 'r+') as f:
content = f.read()
f.seek(0, 0)
f.write(prefix + '\n' + content)
if store_log:
log_folder = 'log_' + platform_name
if 'linux' in platform_name.lower():
log_folder += '_' + platform.architecture()[0]
log_path = os.path.join(store_path, log_folder)
robot_log_path = os.path.join(log_path, 'robot_fw_log')
execution_result_path = os.path.join(log_path, 'execution_result')
bin_path = os.path.join(log_path, 'bin')
defect_path = os.path.join(log_path, 'defect')
print ('store_path: ', store_path)
print ('log_path: ', log_path)
os.system('rm -rf ' + store_path + '*')
os.system('mkdir -p ' + log_path)
os.system('mkdir -p ' + bin_path)
os.system('mkdir -p ' + robot_log_path)
os.system('cp ' + TEST_REPORT_DIR + os.sep + 'test_pre_info.txt' + ' ' + log_path)
os.system('cp ' + TEST_REPORT_DIR + os.sep + 'style-report.css' + ' ' + store_path)
os.system('cp ' + TEST_REPORT_DIR + os.sep + 'xml_report.xml' + ' ' + store_path + os.sep + 'report.xml')
os.system('cp -r ' + result_dir + ' ' + execution_result_path)
temp_path = os.path.join(test_root, 'src', 'automation', 'robot', 'report')
os.system('cp ' + temp_path + os.sep + '*.txt ' + log_path)
robot_report_platform_path = os.path.join(test_root, 'src', 'automation', 'robot', 'report', 'api', platform_name)
os.system('cp -r ' + robot_report_platform_path + os.sep + '* ' + robot_log_path)
os.system('mv ' + robot_log_path + os.sep + 'bin_* ' + bin_path)
os.system('cp ' + execution_result_path + os.sep + 'MERGE_REPORT_TestResult_*' + os.sep + '*.xlsx ' + log_path)
for module in modules:
os.system('mkdir -p ' + defect_path + os.sep + module)
fail_tc_path = execution_result_path
fail_tc_path += os.sep + 'MERGE_REPORT_TestResult_*'
fail_tc_path += os.sep + 'Failed'
if 'ca' == module.lower():
os.system('cp -r ' + fail_tc_path + os.sep + '*' + os.sep + module.upper() + '* '+ defect_path + os.sep + module)
else:
command = 'cp ' + fail_tc_path + os.sep + platform_name.upper() + '_*__' + os.sep + module.upper() + '*.txt ' + defect_path + os.sep + module
os.system(command)
file_path_list = []
search_text_list = []
if os.path.exists(robot_report_platform_path):
for each_file in os.listdir(robot_report_platform_path):
if os.path.isfile(os.path.join(robot_report_platform_path, each_file)):
if each_file.endswith('.txt'):
if each_file.startswith('pre_condition_' + module.lower() + '_'):
file_path_list.append(os.path.join(robot_report_platform_path, each_file))
search_text_list.append('Execution Status: pass')
elif each_file.startswith('build_iotivity_' + module.lower()):
file_path_list.append(os.path.join(robot_report_platform_path, each_file))
search_text_list.append('Build Status: pass')
for i in range(len(file_path_list)):
with open(file_path_list[i]) as fp:
status_pass = False
for line in fp:
if search_text_list[i].lower() == line.lower().strip():
status_pass = True
break
if not status_pass:
os.system('cp ' + file_path_list[i] + ' ' + defect_path + os.sep + module)
os.system('find ' + defect_path + ' -type d -empty -delete')
cwd = os.getcwd()
os.chdir(store_path)
os.system('zip -rq ' + 'log.zip ' + log_folder)
os.chdir(cwd)
#os.system('rm -rf ' + log_path)
print ('done storing log')
``` |
{
"source": "jonghoonlee98/ClassroomResponse",
"score": 2
} |
#### File: classroom_response/classroom/models.py
```python
from django.contrib.auth.models import AbstractUser
from django.core.validators import FileExtensionValidator
from django.db import models
from django.utils.html import escape, mark_safe
import jsonfield
class User(AbstractUser):
is_student = models.BooleanField(default=False)
is_teacher = models.BooleanField(default=False)
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
email = models.CharField(max_length=50, unique=True)
username = models.CharField(max_length = 30)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['first_name', 'last_name',]
class Subject(models.Model):
name = models.CharField(max_length=30)
color = models.CharField(max_length=7, default='#007bff')
def __str__(self):
return self.name
def get_html_badge(self):
name = escape(self.name)
color = escape(self.color)
html = '<span class="badge badge-primary" style="background-color: %s">%s</span>' % (color, name)
return mark_safe(html)
class Course(models.Model):
owner = models.ForeignKey(User, on_delete=models.CASCADE, related_name='courses')
name = models.CharField(max_length=255)
code = models.CharField(max_length=255, unique=True)
def __str__(self):
return self.name
class Quiz(models.Model):
course = models.ForeignKey(Course, on_delete=models.CASCADE, related_name='quizzes')
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class Question(models.Model):
quiz = models.ForeignKey(Quiz, on_delete=models.CASCADE, related_name='questions')
text = models.CharField('Label (text only)', max_length=255)
latex = models.CharField('Question (latex allowed)', max_length= 255)
question_type = models.CharField('Type', max_length=255, default='MC')
image = models.ImageField(upload_to='images/', default=None, blank=True, null=True)
is_active = models.BooleanField('Active', default=False)
position = models.IntegerField('Active', default = 0);
def __str__(self):
return self.text
class Answer(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE, related_name='answers')
data = jsonfield.JSONField(null=True)
def __str__(self):
return self.text
class Student(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)
quizzes = models.ManyToManyField(Quiz, through='TakenQuiz')
interests = models.ManyToManyField(Subject, related_name='interested_students')
courses = models.ManyToManyField(Course, related_name='student_courses')
def get_unanswered_questions(self, quiz):
answered_questions = self.quiz_answers \
.filter(answer__question__quiz=quiz) \
.values_list('answer__question__pk', flat=True)
questions = quiz.questions.exclude(pk__in=answered_questions).order_by('text')
return questions
def get_courses(self):
return self.courses
def __str__(self):
return self.user.username
class TakenQuiz(models.Model):
student = models.ForeignKey(Student, on_delete=models.CASCADE, related_name='taken_quizzes')
quiz = models.ForeignKey(Quiz, on_delete=models.CASCADE, related_name='taken_quizzes')
score = models.FloatField()
date = models.DateTimeField(auto_now_add=True)
class StudentAnswer(models.Model):
student = models.ForeignKey(Student, on_delete=models.CASCADE, related_name='quiz_answers')
question = models.ForeignKey(Question, on_delete=models.CASCADE, related_name='+', null=True)
submission = jsonfield.JSONField(null=True)
```
#### File: classroom_response/classroom/parse_data.py
```python
import json
from django.core import serializers
from classroom.models import *
def parse_MC(question):
answers = Answer.objects.filter(question = question)
answer = None
if len(answers):
data = json.loads(answers[0].data)
answer = data['answer']
for a in answer:
del a['is_correct']
send_data = {
'type': 'present',
'question_type': 'MC',
'text': question.text,
'latex': question.latex,
'answers': answer,
'question_pk': question.pk
}
return send_data
def parse_NU(question):
answers = Answer.objects.filter(question = question)
units = None
if len(answers):
data = json.loads(answers[0].data)
answer = data['answer']
if 'units' in data:
units = data['units']
send_data = {
'type': 'present',
'question_type': 'NU',
'text': question.text,
'latex': question.latex,
'units': units,
'question_pk': question.pk
}
return send_data
``` |
{
"source": "jongho-park/EAST",
"score": 2
} |
#### File: jongho-park/EAST/convert_mlt.py
```python
import json
import os
import os.path as osp
from glob import glob
from PIL import Image
import numpy as np
from tqdm import tqdm
from torch.utils.data import DataLoader, ConcatDataset, Dataset
SRC_DATASET_DIR = '/data/datasets/ICDAR17_MLT' # FIXME
DST_DATASET_DIR = '/data/datasets/ICDAR17_Korean' # FIXME
NUM_WORKERS = 32 # FIXME
IMAGE_EXTENSIONS = {'.gif', '.jpg', '.png'}
LANGUAGE_MAP = {
'Korean': 'ko',
'Latin': 'en',
'Symbols': None
}
def get_language_token(x):
return LANGUAGE_MAP.get(x, 'others')
def maybe_mkdir(x):
if not osp.exists(x):
os.makedirs(x)
class MLT17Dataset(Dataset):
def __init__(self, image_dir, label_dir, copy_images_to=None):
image_paths = {x for x in glob(osp.join(image_dir, '*')) if osp.splitext(x)[1] in
IMAGE_EXTENSIONS}
label_paths = set(glob(osp.join(label_dir, '*.txt')))
assert len(image_paths) == len(label_paths)
sample_ids, samples_info = list(), dict()
for image_path in image_paths:
sample_id = osp.splitext(osp.basename(image_path))[0]
label_path = osp.join(label_dir, 'gt_{}.txt'.format(sample_id))
assert label_path in label_paths
words_info, extra_info = self.parse_label_file(label_path)
if 'ko' not in extra_info['languages'] or extra_info['languages'].difference({'ko', 'en'}):
continue
sample_ids.append(sample_id)
samples_info[sample_id] = dict(image_path=image_path, label_path=label_path,
words_info=words_info)
self.sample_ids, self.samples_info = sample_ids, samples_info
self.copy_images_to = copy_images_to
def __len__(self):
return len(self.sample_ids)
def __getitem__(self, idx):
sample_info = self.samples_info[self.sample_ids[idx]]
image_fname = osp.basename(sample_info['image_path'])
image = Image.open(sample_info['image_path'])
img_w, img_h = image.size
if self.copy_images_to:
maybe_mkdir(self.copy_images_to)
image.save(osp.join(self.copy_images_to, osp.basename(sample_info['image_path'])))
license_tag = dict(usability=True, public=True, commercial=True, type='CC-BY-SA',
holder=None)
sample_info_ufo = dict(img_h=img_h, img_w=img_w, words=sample_info['words_info'], tags=None,
license_tag=license_tag)
return image_fname, sample_info_ufo
def parse_label_file(self, label_path):
def rearrange_points(points):
start_idx = np.argmin([np.linalg.norm(p, ord=1) for p in points])
if start_idx != 0:
points = np.roll(points, -start_idx, axis=0).tolist()
return points
with open(label_path, encoding='utf-8') as f:
lines = f.readlines()
words_info, languages = dict(), set()
for word_idx, line in enumerate(lines):
items = line.strip().split(',', 9)
language, transcription = items[8], items[9]
points = np.array(items[:8], dtype=np.float32).reshape(4, 2).tolist()
points = rearrange_points(points)
illegibility = transcription == '###'
orientation = 'Horizontal'
language = get_language_token(language)
words_info[word_idx] = dict(
points=points, transcription=transcription, language=[language],
illegibility=illegibility, orientation=orientation, word_tags=None
)
languages.add(language)
return words_info, dict(languages=languages)
def main():
dst_image_dir = osp.join(DST_DATASET_DIR, 'images')
# dst_image_dir = None
mlt_train = MLT17Dataset(osp.join(SRC_DATASET_DIR, 'raw/ch8_training_images'),
osp.join(SRC_DATASET_DIR, 'raw/ch8_training_gt'),
copy_images_to=dst_image_dir)
mlt_valid = MLT17Dataset(osp.join(SRC_DATASET_DIR, 'raw/ch8_validation_images'),
osp.join(SRC_DATASET_DIR, 'raw/ch8_validation_gt'),
copy_images_to=dst_image_dir)
mlt_merged = ConcatDataset([mlt_train, mlt_valid])
anno = dict(images=dict())
with tqdm(total=len(mlt_merged)) as pbar:
for batch in DataLoader(mlt_merged, num_workers=NUM_WORKERS, collate_fn=lambda x: x):
image_fname, sample_info = batch[0]
anno['images'][image_fname] = sample_info
pbar.update(1)
ufo_dir = osp.join(DST_DATASET_DIR, 'ufo')
maybe_mkdir(ufo_dir)
with open(osp.join(ufo_dir, 'train.json'), 'w') as f:
json.dump(anno, f, indent=4)
if __name__ == '__main__':
main()
```
#### File: jongho-park/EAST/train.py
```python
import os
import os.path as osp
import time
import math
from datetime import timedelta
from argparse import ArgumentParser
import torch
from torch import cuda
from torch.utils.data import DataLoader
from torch.optim import lr_scheduler
from tqdm import tqdm
from east_dataset import EASTDataset
from dataset import SceneTextDataset
from model import EAST
def parse_args():
parser = ArgumentParser()
# Conventional args
parser.add_argument('--data_dir', type=str, default=os.environ.get('SM_CHANNEL_TRAIN'))
parser.add_argument('--model_dir', type=str, default=os.environ.get('SM_MODEL_DIR',
'trained_models'))
parser.add_argument('--device', default='cuda' if cuda.is_available() else 'cpu')
parser.add_argument('--num_workers', type=int, default=4)
parser.add_argument('--image_size', type=int, default=1024)
parser.add_argument('--input_size', type=int, default=512)
parser.add_argument('--batch_size', type=int, default=12)
parser.add_argument('--learning_rate', type=float, default=1e-3)
parser.add_argument('--max_epoch', type=int, default=200)
parser.add_argument('--save_interval', type=int, default=5)
args = parser.parse_args()
if args.input_size % 32 != 0:
raise ValueError('`input_size` must be a multiple of 32')
return args
def do_training(data_dir, model_dir, device, image_size, input_size, num_workers, batch_size,
learning_rate, max_epoch, save_interval):
dataset = SceneTextDataset(data_dir, split='train', image_size=image_size, crop_size=input_size)
dataset = EASTDataset(dataset)
num_batches = math.ceil(len(dataset) / batch_size)
train_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = EAST()
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[max_epoch // 2], gamma=0.1)
model.train()
for epoch in range(max_epoch):
epoch_loss, epoch_start = 0, time.time()
with tqdm(total=num_batches) as pbar:
for img, gt_score_map, gt_geo_map, roi_mask in train_loader:
pbar.set_description('[Epoch {}]'.format(epoch + 1))
loss, extra_info = model.train_step(img, gt_score_map, gt_geo_map, roi_mask)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_val = loss.item()
epoch_loss += loss_val
pbar.update(1)
val_dict = {
'Cls loss': extra_info['cls_loss'], 'Angle loss': extra_info['angle_loss'],
'IoU loss': extra_info['iou_loss']
}
pbar.set_postfix(val_dict)
scheduler.step()
print('Mean loss: {:.4f} | Elapsed time: {}'.format(
epoch_loss / num_batches, timedelta(seconds=time.time() - epoch_start)))
if (epoch + 1) % save_interval == 0:
if not osp.exists(model_dir):
os.makedirs(model_dir)
ckpt_fpath = osp.join(model_dir, 'model_latest.pth')
torch.save(model.state_dict(), ckpt_fpath)
def main(args):
do_training(**args.__dict__)
if __name__ == '__main__':
args = parse_args()
main(args)
``` |
{
"source": "jonghough/proton",
"score": 3
} |
#### File: games/othello/minimax.py
```python
import math
from itertools import chain
import logging
import copy
from games.othello.squarecontroller import SquareController
from games.othello.movecalculator import MoveCalculator
"""
Minimax implementation for Othello (Reversi), with a default heuristic evaluation function,
calculatescore.
"""
def calculatescore2(board, color):
"""
:param board:
:param color:
:return:
"""
CORNER = 6
CENTRAL = 1
EDGE = 2
#board.print_me()
white = len([x for x in board._squares if x == SquareController.WHITE])
black = len([x for x in board._squares if x == SquareController.BLACK])
sc = 0
if black + white > 57:
return color * (white - black)
else:
if board._squares[0] == Minimax.minimax_color:
sc += CORNER
elif board._squares[0] == -Minimax.minimax_color:
sc -= CORNER
if board._squares[7] == Minimax.minimax_color:
sc += CORNER
elif board._squares[7] == -Minimax.minimax_color:
sc -= CORNER
if board._squares[56] == Minimax.minimax_color:
sc += CORNER
elif board._squares[56] == -Minimax.minimax_color:
sc -= CORNER
if board._squares[63] == Minimax.minimax_color:
sc += CORNER
elif board._squares[63] == -Minimax.minimax_color:
sc -= CORNER
for i in range(1, 8):
sq = board._squares[i]
if sq == Minimax.minimax_color:
sc += CENTRAL
elif sq == -Minimax.minimax_color:
sc -= CENTRAL
for i in [57, 58, 59, 60, 61, 62]: # range(56, 64):
sq = board._squares[i]
if sq == Minimax.minimax_color:
sc += EDGE
elif sq == -Minimax.minimax_color:
sc -= EDGE
for i in [8, 16, 24, 32, 40, 48]:
sq = board._squares[i]
if sq == Minimax.minimax_color:
sc += EDGE
elif sq == -Minimax.minimax_color:
sc -= EDGE
for i in [15, 23, 31, 39, 47, 55]:
sq = board._squares[i]
if sq == Minimax.minimax_color:
sc += EDGE
elif sq == -Minimax.minimax_color:
sc -= EDGE
return sc
def calculatescore(board, color):
"""
:param board:
:param color:
:return:
"""
white = len([x for x in board._squares if x == SquareController.WHITE])
black = len([x for x in board._squares if x == SquareController.BLACK])
sc = 0
if black + white > 50:
return color * (white - black)
else:
wm = len(MoveCalculator.get_possible_moves(board, SquareController.WHITE))
bm = len(MoveCalculator.get_possible_moves(board, SquareController.BLACK))
return color * (wm - bm)
class Minimax:
current_color = 1
minimax_color = 0
def __init__(self, depth, minimax_color_, log_level, eval_callback):
self.depth = depth
self.log_level = log_level
self.eval_callback = eval_callback
self.alpha = -100000
self.beta = 100000
Minimax.minimax_color = minimax_color_
def changesquare(self, board, color, i, j):
white = len([x for x in board._squares if x == SquareController.WHITE])
black = len([x for x in board._squares if x == SquareController.BLACK])
MoveCalculator.make_move(board, i, j, 8, color)
white = len([x for x in board._squares if x == SquareController.WHITE])
black = len([x for x in board._squares if x == SquareController.BLACK])
def run_next(self, board, color, depth, ismax, possiblemoves):
hs = self.alpha
nextmax = not ismax
if not ismax:
hs = self.beta
if len(possiblemoves) == 0:
#logging.info("minimax: cannot move")
# we cannot move, so allow the opponent to move.
return self.run_minimax(board, color, None, depth, nextmax)
quit = False
for i in range(0, len(possiblemoves)):
if quit is False:
if ismax:
bc = copy.deepcopy(board)
score = self.run_minimax(bc, color, possiblemoves[i], depth, nextmax)
if score is not None:
e = score
if self.alpha <= e:
self.alpha = e
if self.beta <= self.alpha:
return self.alpha
else:
bc = copy.deepcopy(board)
score = self.run_minimax(bc, color, possiblemoves[i], depth, nextmax)
if score is not None:
e = score
if self.beta >= e:
self.beta = e
if self.beta <= self.alpha:
return self.beta
if ismax:
return self.alpha
else:
return self.beta
def run_minimax(self, board, color, position, depth, ismax):
if position is not None:
self.changesquare(board, color, position[0], position[1])
if depth >= self.depth:
return self.eval_callback(board, Minimax.current_color)
else:
possiblemoves = MoveCalculator.get_possible_moves(board, color * -1)
return self.run_next(copy.deepcopy(board), color * -1, depth + 1, ismax, possiblemoves)
def evaluate(self, board, color):
Minimax.minimax_color = color
possibleinitmoves = MoveCalculator.get_possible_moves(board, color)
if len(possibleinitmoves) == 0:
return (board, None, None)
else:
optimal = []
Minimax.current_color = color
self.alpha = -100000
self.beta = 100000
for pos in possibleinitmoves:
b = copy.deepcopy(board)
optimal.append((b, self.run_minimax(b, color, pos, 1, True), pos))
optimal.sort(key=lambda xo: xo[1], reverse=(True))
if self.log_level > 0:
logging.info ("minimax: optimal score for " + str(color) + ", " + str(optimal[0][1]))
logging.info ("minimax: worst score for " + str(color) + ", " + str(optimal[len(optimal)-1][1]))
logging.info ("minimax: MAX SEARCH DEPTH " + str(self.depth))
if self.log_level > 1:
optimal[0][0].print_me()
return optimal[0]
```
#### File: games/othello/othelloscene.py
```python
from games.othello.boardcontroller import BoardController
from proton.learning.rl_interface import RLInterface
from proton.protonengine import ProtonEngine
from proton.scene import Scene
import yaml
from games.othello.playerdata import *
import numpy as np
def generate_player(board, color, playerdata):
player = str(playerdata["playtype"])
if player == "random":
return RandomPlayer(board, color)
elif player == "minimax":
searchdepth = str(playerdata["attributes"]["searchdepth"])
log_level = int(playerdata["attributes"]["log_level"])
randomness = float(playerdata["attributes"]["randomness"])
return MinimaxPlayer(board, color, int(searchdepth), log_level, randomness)
elif player == "human":
return HumanPlayer(board, color)
elif player == "ml":
model_path = str(playerdata["attributes"]["model_path"])
learning_rate = float(playerdata["attributes"]["learning_rate"])
is_training = bool(playerdata["attributes"]["is_training"])
eps_min = float(playerdata["attributes"]["eps_min"])
eps_max = float(playerdata["attributes"]["eps_max"])
eps_decay_steps = float(playerdata["attributes"]["eps_decay_steps"])
n_steps = int(playerdata["attributes"]["n_steps"])
start_training_steps = int(
playerdata["attributes"]["start_training_steps"])
training_interval = int(playerdata["attributes"]["training_interval"])
save_steps = int(playerdata["attributes"]["save_steps"])
copy_steps = int(playerdata["attributes"]["copy_steps"])
discount_rate = float(playerdata["attributes"]["discount_rate"])
batch_size = int(playerdata["attributes"]["batch_size"])
learnerparams = LearnerParams(is_training, eps_min, eps_max, eps_decay_steps,
n_steps, start_training_steps, training_interval, save_steps, copy_steps, discount_rate, learning_rate, model_path,
batch_size)
return MLPlayer(board, color, learnerparams)
def create_players(yamldata, board):
white = yamldata["white"]
wp = generate_player(board, 1, white)
black = yamldata["black"]
bp = generate_player(board, -1, black)
return (wp, bp)
class OthelloScene(Scene, RLInterface):
def __init__(self):
super(OthelloScene, self).__init__()
self.fillcolor = (175, 110, 200)
def getplayers(self, file, boardcontroller):
with open("games/othello/resources/game4.yml", 'r') as stream:
try:
data = yaml.load(stream)
players = create_players(data, boardcontroller)
return players
except yaml.YAMLError as exc:
logging.error(exc)
def initialize(self, config_data):
self.board = self.add_new_game_object("boardcontroller")
self.board.add_component(BoardController)
self.board.motion.set_position(500, 500)
self.boardcontroller = self.board.get_component(BoardController)
players = self.getplayers("", self.boardcontroller)
self.boardcontroller.initialize(players[0], players[1])
def gamestarted(self):
return self.started
def resetnow(self):
logging.info("**** DESTROY GAME OBJECT AND REINITIALIZE ****")
# self.destroyall()
self.started = True
def reloadscene(self):
self.boardcontroller.reset_board()
def get_reward(self, color):
"""
gets the reward for the given action.
:param action:
:return:
"""
score = self.boardcontroller.calculate_score(color)
return score
def get_legal_actions(self):
"""
Gets legal action codes.
:return: list of legal actions.
"""
return self.boardcontroller.getlegalmoves()
def save_state(self):
"""
saves the state
:return:
"""
pass
def load_state(self):
"""
loads the state
:return:
"""
pass
def is_legal_action(self, action, color):
return self.boardcontroller.can_make_move(action, color)
def get_observation(self):
"""
Gets game state observation.
:return: game state observation.
"""
return self.boardcontroller.get_board()
def is_game_over(self):
"""
True if game over, false otherwise.
:return:
"""
return self.boardcontroller.is_gameover()
def finish(self):
pass
def get_state(self):
return self.boardcontroller.get_state()
def perform_action(self, action, color):
"""
Apply action and return reward.
:param action:
:return:
"""
reward = 0
if action is not None:
self.boardcontroller.force_action(action, color)
reward = self.get_reward(color)
else:
winner = self.boardcontroller.winner
if winner == 0:
reward = 0
elif self.boardcontroller.winner == color:
reward = 1
else:
reward = -1
return reward
def step(self, action, color):
reward = self.perform_action(action, color)
state = self.get_state()
done = self.is_game_over()
return state, reward, done
```
#### File: games/piratedefense/cannoncontroller.py
```python
from math import sin, acos
from pygame.constants import K_UP, K_DOWN, K_SPACE
from games.piratedefense.cannonlearner import CannonLearner
from proton.collider import Collider
from proton.component import Component
from proton.graphicscomponent import GraphicsComponent
from proton.physics.rigidbody import RigidBody
from proton.protonmath.vector2 import Vector2
from games.piratedefense.cannonball import CannonBall
from proton.scenemanager import SceneManager
from proton.resourcemanager import ResourceManager
from proton.protonsingleton import ProtonSingleton
from proton.gameinput import GameInput
from proton.gametime import GameTime
class CannonController(Component):
GRAVITY = 900
def __init__(self, gameobject_):
super(CannonController, self).__init__(gameobject_)
self.state = 0 # 0 = stationary, 1 = moving
self.initialspeed = 400
self.initialangle = -1.4
self.initialposition = Vector2(0, 0)
self.time = 0
self.targetpoint = Vector2(0, 0)
self.target = None
self.cannonballs = []
self.cbcount = 20
self.idx = 0
self.input = ProtonSingleton(GameInput)
self.cannon_fire_timer = 0.0
self.FIRE_WAIT_TIME = 1.75 # 1.75 seconds between launches.
def init(self):
rm = ProtonSingleton(ResourceManager)
cannonimag = rm.load_texture("./games/piratedefense/resources/cannon.png")
self.game_object().graphics.set_sprite_obj(cannonimag)
self.game_object().motion.set_position(30, 100)
self.game_object().transform().set_scale(0.5, 0.5)
self.game_object().get_component(GraphicsComponent).renderorder = 100
for i in range(self.cbcount):
go = ProtonSingleton(SceneManager).scene().add_new_game_object("cannonball_" + str(i))
go.add_component(CannonBall)
self.cannonballs.append(go.get_component(CannonBall))
go.set_active(False)
rm = ProtonSingleton(ResourceManager)
blackcircle = rm.load_texture("./games/piratedefense/resources/cannonball.png")
go.motion.set_position(-100, 100)
go.graphics.set_sprite_obj(blackcircle)
col = go.add_component(Collider)
go.add_component(RigidBody)
go.transform().set_scale(0.25, 0.25)
go.get_component(GraphicsComponent).renderorder = 10
col.layer = 24
self.add_learner()
def launch(self, angle, speed, target):
cb = self.cannonballs[self.idx]
self.idx += 1
self.idx %= self.cbcount
cb.launch(self.game_object().motion.position(), angle, speed, target)
cb.game_object().set_active(True)
def restart(self):
self.game_object().motion.set_position(30, 100)
for cannon in self.cannonballs:
cannon.game_object().set_active(False)
def update(self):
if str(K_UP) in self.input.keydownevents:
p = self.game_object().transform().position()
if p.y > 20:
p -= Vector2(0, 20)
self.game_object().transform().set_position(p.x, p.y)
if str(K_DOWN) in self.input.keydownevents:
p = self.game_object().transform().position()
if p.y < 680:
p += Vector2(0, 20)
self.game_object().transform().set_position(p.x, p.y)
if str(K_SPACE) in self.input.keydownevents:
if self.cannon_fire_timer > self.FIRE_WAIT_TIME:
self.cannon_fire_timer = 0
p = self.game_object().transform().position()
self.launch(-0.25, 400, p + Vector2(90000, 0))
self.cannon_fire_timer += ProtonSingleton(GameTime).delta_time()
def launchatpoint(self, point):
pos = self.game_object().motion.position()
vec = point - pos
angle = acos(vec.normalize().dot(Vector2(1, 0)))
self.launch(-angle, 1000, point)
def input(self, action):
if action == 0:
pass
elif action == 1:
self.move_up()
elif action == 2:
self.move_down()
elif action == 3:
self.fire_cannon()
def fire_cannon(self):
if self.cannon_fire_timer > self.FIRE_WAIT_TIME:
self.cannon_fire_timer = 0
p = self.game_object().transform().position()
self.launch(-0.25, 400, p + Vector2(90000, 0))
def move_up(self):
p = self.game_object().transform().position()
if p.y > 20:
p -= Vector2(0, 20)
self.game_object().transform().set_position(p.x, p.y)
def move_down(self):
p = self.game_object().transform().position()
if p.y < 680:
p += Vector2(0, 20)
self.game_object().transform().set_position(p.x, p.y)
def force_action(self, action):
if action is None:
return True
else:
if action == 0:
return True # do nothing
elif action == 1:
self.move_up()
elif action == 2:
self.move_down()
else:
self.fire_cannon()
return True
def get_state(self):
return self.game_object().transform().position().y * 1.0 / 700 # y pos
def add_learner(self):
self.game_object().add_component(CannonLearner)
self.learner = self.game_object().get_component(CannonLearner)
self.learner.env = ProtonSingleton(SceneManager).scene()
```
#### File: games/piratedefense/piratedefensescene.py
```python
import sys
import os
import numpy as np
from games.piratedefense.titletext import TitleText
from proton.physics.rigidbody import RigidBody
from proton.ui.textcomponent import TextComponent
from proton.ui.uigraphics import Dims
from games.piratedefense.towercontroller import TowerController
# sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
# sys.path.insert(0,"../../")
from proton.resourcemanager import ResourceManager
from proton.protonsingleton import ProtonSingleton
from proton.protonengine import ProtonEngine, Vector2, GameTime
from proton.gameobject import GameObject
from proton.scene import Scene
from games.piratedefense.cannonball import CannonBall
from proton.collider import Collider
from games.piratedefense.pirateshipcontroller import PirateShipController
from games.piratedefense.piratescontroller import PiratesController
from proton.learning.rl_interface import RLInterface
class PirateDefenseScene(Scene, RLInterface):
def __init__(self):
super(PirateDefenseScene, self).__init__()
self.fillcolor = (2, 110, 225)
self.mindist = 10000
self.score = 0
self.cache_score = 0
self.game_over = False
def initialize(self, config_data):
c = self.add_new_game_object("TOWER")
rm = ProtonSingleton(ResourceManager)
self._blackcircle = rm.load_texture("./games/piratedefense/resources/blackcircle.png")
self._pirateship = rm.load_texture("./games/piratedefense/resources/pirateship.png")
c.add_component(TowerController)
self._tower = c.get_component(TowerController)
pc = self.add_new_game_object("PIRATES")
pc.add_component(PiratesController)
self._pc = pc.get_component(PiratesController)
self._pc.setup(20, self._pirateship)
self.add_collide_layers(24, 22)
ttc = self.add_new_game_object("TEXT")
ttc.add_component(TextComponent)
tc = ttc.get_component(TextComponent)
tc.settext("Time:", Dims(500, 20, 600, 70), (255, 255, 255))
ttc.add_component(TitleText)
self._titletext = ttc.get_component(TitleText)
self.gameoverobj = self.add_new_game_object("GAMEOVER")
gotex = rm.load_texture("./games/piratedefense/resources/gameover.png")
self.gameoverobj.graphics.set_sprite_obj(gotex)
self.gameoverobj.set_active(False)
def reload_scene(self):
self.restart()
def restart(self):
self.gameoverobj.set_active(False)
self._titletext.restart()
self._tower.restart()
self._pc.restart()
self.game_over = False
def stop(self):
self.game_over = True
self._titletext.stop()
self._tower.stop()
self._pc.stop()
self.gameoverobj.set_active(True)
self.gameoverobj.transform().set_position(500, 350)
def launch(self):
pass
def get_reward(self, action):
if action == None:
if self.game_over:
return -1
else:
return 1
self.score = ProtonSingleton(GameTime).time() / 1000.0
def normalize(v):
return Vector2(v.x / 1200, v.y / 700)
ships = [normalize(x.game_object().transform().position()) for x in self._pc.pirateships if
x.game_object().is_active()]
a = 0
length = len(ships)
for i in range(0, len(ships)):
a += ships[i].y
if a > 0:
a /= length
return a - 0.5
# r = self.score - self.cache_score
# self.cache_score = self.score
# return 1 if r > 0 else 0
def get_legal_actions(self):
return [0, 1, 2, 3] # nothing, up, down, fire
def save_state(self):
"""
saves the state
:return:
"""
pass
def load_state(self):
"""
loads the state
:return:
"""
pass
def get_observation(self):
"""
Gets game state observation.
:return: game state observation.
"""
return None
def is_game_over(self):
"""
True if game over, false otherwise.
:return:
"""
return self.game_over
def step(self, action):
reward = self.perform_action(action)
s = self.get_state()
done = self.game_over
return s, reward, done, True
def finish(self):
pass
def get_state(self):
# y position of cannon, closest 2 ships.
pos = self._tower.get_state()
def normalize(v):
return Vector2(v.x / 1200, v.y / 700)
ships = [normalize(x.game_object().transform().position()) for x in self._pc.pirateships if
x.game_object().is_active()]
sorted_ships = sorted(ships, key=lambda f: f.magnitude(), reverse=False)
while len(sorted_ships) < 5:
sorted_ships.append(Vector2(1, 1))
game_state = [pos]
for i in range(0, 5):
game_state.append(sorted_ships[i].x)
game_state.append(sorted_ships[i].y)
return np.array(game_state)
def perform_action(self, action):
success = self._tower.force_action(action)
rew = self.get_reward(action)
return rew
if __name__ == "__main__":
engine = ProtonEngine("Pirate Defense", 1200, 700, True)
asteroidscene = PirateDefenseScene()
engine.load_scene(asteroidscene)
engine.run()
```
#### File: games/piratedefense/pirateshipcontroller.py
```python
import os, sys
import random
from proton.gametime import GameTime, ProtonSingleton
from proton.component import Component
from proton.protonmath.vector2 import Vector2
from proton.splines import CatmullRomSpline
class PirateShipController(Component):
def __init__(self, gameobject_):
super(PirateShipController, self).__init__(gameobject_)
self.crspline = None
self.game_object().set_active(False)
@staticmethod
def onfinish():
# self.game_object() .set_active(False)
logging.info("finished!")
def start(self):
pass
def launch(self):
self.game_object().set_active(True)
v0 = Vector2(1000, 400)
v1 = Vector2(1000, random.randint(20, 470))
v2 = Vector2(random.randint(800, 990), random.randint(20, 690))
v3 = Vector2(random.randint(500, 700), random.randint(20, 680))
v4 = Vector2(random.randint(400, 450), random.randint(20, 680))
v5 = Vector2(random.randint(200, 300), random.randint(20, 680))
v6 = Vector2(random.randint(120, 150), random.randint(20, 680))
v7 = Vector2(50, random.randint(40, 600))
v8 = Vector2(0, random.randint(40, 600))
self.crspline = CatmullRomSpline([v0, v1, v2, v3, v4, v5, v6, v7, v8], PirateShipController.onfinish)
def update(self):
if not self.game_object().is_active() or self.crspline is None:
return
dt = ProtonSingleton(GameTime).dt()
p = self.crspline.updatecurveatspeed(dt, 100)
self.game_object().motion.set_position(p.x, p.y)
def oncollision(self, other):
self.game_object().set_active(False)
logging.info("COLLISION")
```
#### File: games/piratedefense/titletext.py
```python
from proton.component import Component
from proton.gametime import GameTime, ProtonSingleton
from proton.ui.textcomponent import TextComponent
from proton.ui.uigraphics import Dims
class TitleText(Component):
def __init__(self, gameobject_):
super(TitleText, self).__init__(gameobject_)
self._tc = None
self._run = True
self._finishtime = 0
self._current_time = 0
self._start_time = 0
def start(self):
self._tc = self.game_object().get_component(TextComponent)
def update(self):
if not self._run:
self._start_time = ProtonSingleton(GameTime).time()
self._tc.settext("Game Over. Finish time: " + "{:10.3f}".format(self._finishtime / 1000.0),
Dims(500, 20, 1000, 120), (255, 255, 255))
else:
self._current_time=ProtonSingleton(GameTime).time()
self._tc.settext("Time: "+"{:10.3f}".format((self._current_time - self._start_time)/1000.0), Dims(700, 20, 1000, 90), (255, 255, 255))
def stop(self):
self._run = False
self._finishtime = ProtonSingleton(GameTime).time()
def restart(self):
self._run = True
```
#### File: jonghough/proton/launcher.py
```python
import click
from games.piratedefense.piratedefensescene import PirateDefenseScene
from proton.protonengine import ProtonEngine
from games.othello.othelloscene import OthelloScene
from games.asteroiddodge.asteroidscene import AsteroidScene
@click.command()
@click.option('--game', prompt='path to the game configuraiton file',
help='The configuration (YAML) file defining the game parameters.')
def launch(game):
engine = ProtonEngine(game)
engine.run()
if __name__ == "__main__":
launch()
```
#### File: proton/proton/component.py
```python
class Component(object):
def __init__(self, _gameobject):
self.__gameobject = _gameobject
def start(self):
pass
def update(self):
pass
def draw(self,screen):
pass
def ondestroy(self):
pass
def oncollision(self, other):
pass
def __nullcheck(func):
def wf(*args):
if not args[0].__gameobject.is_alive():
raise Exception
else:
return func(*args)
return wf
@__nullcheck
def game_object(self):
return self.__gameobject
```
#### File: proton/proton/gameobject.py
```python
import proton.graphicscomponent
from proton.scenemanager import SceneManager
from proton.scene import *
from proton.protonsingleton import *
import proton.motioncomponent as mc
class GameObject(object):
def __init__(self, name):
"""
Initializes the gameobject.
:param name: gameobject name.
"""
self.components = {}
self.name = name
self.__parent = None
self.children = []
self.motion = mc.MotionComponent(self)
self.graphics = proton.graphicscomponent.GraphicsComponent(self)
self.components[mc.MotionComponent.__name__] = self.motion
self.components[proton.graphicscomponent.GraphicsComponent.__name__] = self.graphics
self.__alive = True
self.__active = True
def is_alive(self):
return self.__alive
def __nullcheck(func):
def wf(*args):
try:
if not args[0].__alive:
raise Exception
else:
return func(*args)
except RuntimeError as e:
pass
return wf
@__nullcheck
def set_active(self, b):
self.__active = b
for child in self.children:
child.set_active(b)
@__nullcheck
def is_active(self):
return self.__active
@__nullcheck
def set_parent(self, parent):
if parent is None:
s = ProtonSingleton(scenemanager.SceneManager)
self.set_parent(s.currentscene.root)
elif GameObject.is_acyclic(parent, self):
if self.parent() is not None:
self.parent().children.remove(self)
self.__parent = parent
p = self.motion.position()
self.motion.set_position(p.x, p.y)
parent.children.append(self)
@__nullcheck
def parent(self):
return self.__parent
@__nullcheck
def add_component(self, typez):
comp = typez(self)
if typez.__name__ in self.components:
return self.components[type(comp).__name__]
else:
self.components[type(comp).__name__] = comp
comp.start()
return comp
@__nullcheck
def get_component(self, typez):
comp = typez(self)
if typez.__name__ in self.components:
return self.components[type(comp).__name__]
else:
return None
@__nullcheck
def transform(self):
return self.get_component(mc.MotionComponent)
@__nullcheck
def start(self):
pass
@__nullcheck
def update(self):
for k,v in self.components.items():
v.update()
@__nullcheck
def draw(self, screen):
if self.__active:
for k,v in self.components.items():
v.draw(screen)
@__nullcheck
def on_destroy(self):
for k,v in self.components.items():
v.ondestroy()
@__nullcheck
def on_collision(self, other):
for k,v in self.components.items():
v.oncollision(other)
@staticmethod
def is_acyclic(parent, nextchild):
for child in nextchild.children:
if child == parent:
return False
else:
nextok = GameObject.is_acyclic(parent, child)
if nextok is False:
return False
return True
@staticmethod
def destroy(gameobj):
if not gameobj.is_alive():
raise Exception # dont try to destroy dead object, please
else:
s = ProtonSingleton(SceneManager)
s.currentscene.allgameobjects.remove(gameobj)
if gameobj.parent is not None:
gameobj.parent().children.remove(gameobj)
for child in gameobj.children:
GameObject.destroy(child)
gameobj.__alive = False
```
#### File: proton/proton/graphicscomponent.py
```python
from pygame import *
from proton.component import *
from proton.gameobject import *
from pygame import *
from math import *
from proton.resourcemanager import *
from proton.protonsingleton import *
from proton.protonmath.vector2 import Vector2
class GraphicsComponent(Component):
def __init__(self, _gameobject):
super(GraphicsComponent, self).__init__(_gameobject)
self.sprite = None
self.blit_sprite = None
self.sprite_rect = None
self.blit_sprite_rect = None
self.render_flag = False
self.sprite_name = None
self.render_order = 1 # default
self.width = 0
self.height = 0
self.setup_on_load()
def setup_on_load(self):
if self.sprite_name is not None:
self.set_sprite(self.sprite_name)
def set_sprite(self, spritename):
self.sprite_name = spritename
rm = ProtonSingleton(ResourceManager)
sprite = rm.load_texture(spritename)
self.sprite = sprite
self.sprite_rect = self.sprite.get_rect()
self.blit_sprite = sprite
self.blit_sprite_rect = self.blit_sprite.get_rect()
self.render_flag = True
self.width = self.sprite_rect.width
self.height = self.sprite_rect.height
def set_sprite_obj(self, spriteobj):
if spriteobj is None:
self.sprite = None
self.blit_sprite = None
self.width = 0
self.height = 0
else:
self.sprite = spriteobj
self.sprite_rect = self.sprite.get_rect()
self.blit_sprite = self.sprite
self.blit_sprite_rect = self.blit_sprite.get_rect()
self.render_flag = True
self.width = self.sprite_rect.width
self.height = self.sprite_rect.height
def set_render_order(self, rendorder):
self.render_order = rendorder
from proton.scenemanager import SceneManager
ProtonSingleton(SceneManager).scene().sort_render_order()
def update(self):
pass
def draw(self, screen):
if self.render_flag is False:
return
if self.sprite is None:
return
xpos = self.game_object().motion.worldtransform.at(0, 2)
ypos = self.game_object().motion.worldtransform.at(1, 2)
scalex = Vector2(self.game_object().motion.worldtransform.at(0, 0),
self.game_object().motion.worldtransform.at(1, 0)).len()
scaley = Vector2(self.game_object().motion.worldtransform.at(0, 1),
self.game_object().motion.worldtransform.at(1, 1)).len()
self.sprite_rect.x = xpos - 0.5 * self.width
self.sprite_rect.y = ypos - 0.5 * self.height
cx = self.game_object().motion.worldtransform.at(0, 0)
sx = self.game_object().motion.worldtransform.at(0, 1)
rads = atan2(sx, cx)
self.blit_sprite= pygame.transform.scale(self.sprite, (int(scalex * self.width), int(scaley * self.height)))
orig_rect = self.sprite_rect
c = orig_rect.center
self.blit_sprite = pygame.transform.rotate(self.blit_sprite, rads * 180.0 / 3.14159265)
self.blit_sprite_rect = self.blit_sprite.get_rect(center=c)
self.blit_sprite_rect.width = 100
screen.blit(self.blit_sprite, self.blit_sprite_rect)
```
#### File: proton/physics/rigidbody.py
```python
from proton.component import Component
from proton.gametime import GameTime, ProtonSingleton
from proton.motioncomponent import MotionComponent
from proton.protonmath.vector2 import Vector2
class RigidBody(Component):
def __init__(self,_gameobject):
super(RigidBody, self).__init__(_gameobject)
self.position = self.game_object().get_component(MotionComponent).position()
self.netforce = Vector2(0,10)
self.acceleration = Vector2(0,0)
self.velocity = Vector2(0,0)
self.rads = 0
self.mass = 1
self.inversemass = 1
self.gravity = 10
self.rotationvelocity = 10
self.rotationinertia = 1
self.iskinematic = True
self.rotationvelocity = 0
self.rotationinertia = 1
self.iskinematic = False
def updatephysics(self, dt):
'''
Update the kinematics physics.
:param dt: delta time
:return: nothing
'''
if self.iskinematic:
self.position = self.position + (self.velocity * dt) +(self.acceleration * 0.5 * dt * dt)
self.acceleration = self.acceleration + self.netforce * self.inversemass
self.velocity = self.velocity + self.acceleration * dt
self.rads = self.rotationvelocity * dt
def update(self):
if self.iskinematic:
self.updatephysics(GameTime.dt())
self.game_object().get_component(MotionComponent).set_position(self.position.x, self.position.y)
self.game_object().get_component(MotionComponent).rotate_by(self.rads)
@staticmethod
def oncollide(rb1, rb2):
nextv1 = (rb1.velocity * (rb1.mass - rb2.mass)) + (2.0 * rb2.mass * rb2.velocity)
nextv2 = (rb2.velocity * (rb2.mass - rb1.mass)) + (2.0 * rb1.mass * rb1.velocity)
totalmass = rb1.mass + rb2.mass
nextv1.x = nextv1.x / totalmass
nextv1.y = nextv1.y / totalmass
nextv2.x = nextv2.x / totalmass
nextv2.y = nextv2.y / totalmass
rb1.velocity = nextv1
rb2.velocity = nextv2
dt = ProtonSingleton(GameTime).dt()
rb1.position = rb1.position + (rb1.velocity * dt) + (rb1.acceleration * 0.5 * dt * dt)
rb2.position = rb2.position + (rb2.velocity * dt) + (rb2.acceleration * 0.5 * dt * dt)
```
#### File: proton/proton/scene.py
```python
import proton.graphicscomponent
from proton.particlesystem import *
from proton.quadmanager import *
from proton.gameobject import GameObject
from proton.scenemanager import SceneManager
from proton.graphicscomponent import GraphicsComponent
from proton.physics.physicsmanager import PhysicsManager
class Scene(object):
def __init__(self):
self.root = proton.gameobject.GameObject("Root")
self.allgameobjects = []
self.quadmanager = QuadManager(32, self.do_layers_collide)
self.colliders = []
self.collidelayers = {}
self.started = False
self.fillcolor = (255, 255, 255)
def initialize(self, config_data=None):
"""
override this and put initialization code here, e.g.
create initial game objects etc.
:return:
"""
pass
def setup_on_load(self):
for obj in self.allgameobjects:
obj.get_component(GraphicsComponent).setup_on_load()
ps = obj.get_component(ParticleSystem)
if ps is not None:
ps.setup_on_load()
def add_collide_layers(self, layer1, layer2):
ProtonSingleton(proton.physics.physicsmanager.PhysicsManager).add_collide_layers(layer1, layer2)
def do_layers_collide(self, layer1, layer2):
s = (layer1 ^ layer2) + 31 * (layer1 + layer2) - ((layer1 >> 2) ^ (layer2 >> 2))
if s in self.collidelayers:
return True
else:
return False
def update_scene(self):
for child in self.allgameobjects:
child.update()
def render_scene(self, screen):
for child in self.allgameobjects:
if child.is_active() and self.is_on_screen(child):
child.draw(screen)
def find_object_by_name(self, name):
for obj in self.allgameobjects:
if obj.name == name:
return obj
return None
def is_on_screen(self, gameobj):
p = gameobj.motion.position()
sm = ProtonSingleton(SceneManager)
if p.x < 0 or p.x > sm.width or p.y < 0 or p.y > sm.height:
return False
else:
return True
def sort_render_order(self):
self.allgameobjects = sorted(self.allgameobjects,
key=lambda go: go.get_component(proton.graphicscomponent.GraphicsComponent).render_order)
def add_new_game_object(self, name):
go = proton.gameobject.GameObject(name)
go.set_parent(self.root)
self.allgameobjects.append(go)
self.sort_render_order()
return go
def destroy_game_object(self, obj):
if obj in self.allgameobjects:
self.allgameobjects.remove(obj)
GameObject.destroy(obj)
def destroy_all(self):
for obj in self.allgameobjects:
GameObject.destroy(obj)
del self.allgameobjects[:]
```
#### File: proton/ui/textcomponent.py
```python
import pygame
from proton.component import *
from pygame import Color
from proton.component import Component
from proton.ui.uigraphics import UIGraphics
from proton.ui.uigraphics import Dims
class TextComponent(UIGraphics):
def __init__(self, _gameobject):
"""
:param _gameobject:
"""
super(TextComponent, self).__init__(_gameobject)
self.text = None
self.textSize = 1
#self.textColor = Color.Red
self.setup(Dims(100,200,200,100))
self.font = pygame.font.SysFont('Monospace', 100)
def settext(self,txt, dims, color):
self.text=txt
textsurface = self.font.render(self.text, False, color)
self.game_object().graphics.set_sprite_obj(textsurface)
self.setup(dims)
``` |
{
"source": "Jong-hun-Park/adVNTR",
"score": 2
} |
#### File: adVNTR/advntr/vntr_finder.py
```python
from collections import Counter
import logging
import numpy
import os
from multiprocessing import Process, Manager, Value, Semaphore
from random import random
from keras.models import Sequential, load_model
import pysam
from Bio import pairwise2
from Bio.Seq import Seq
from Bio import SeqIO
from advntr.coverage_bias import CoverageBiasDetector, CoverageCorrector
from advntr.hmm_utils import *
from advntr.pacbio_haplotyper import PacBioHaplotyper
from advntr.profiler import time_usage
from advntr.sam_utils import get_reference_genome_of_alignment_file, get_related_reads_and_read_count_in_samfile
from advntr import settings
from advntr.utils import is_low_quality_read
from pomegranate import HiddenMarkovModel as Model
from deep_recruitment import get_embedding_of_string, input_dim
class GenotypeResult:
def __init__(self, copy_numbers, recruited_reads_count, spanning_reads_count, flanking_reads_count, max_likelihood):
self.copy_numbers = copy_numbers
self.recruited_reads_count = recruited_reads_count
self.spanning_reads_count = spanning_reads_count
self.flanking_reads_count = flanking_reads_count
self.maximum_likelihood = max_likelihood
class SelectedRead:
def __init__(self, sequence, logp, vpath, mapq=None, reference_start=None):
self.sequence = sequence
self.logp = logp
self.vpath = vpath
self.mapq = mapq
self.is_mapped = reference_start is not None
def is_mapped(self):
return self.is_mapped
class VNTRFinder:
"""Find the VNTR structure of a reference VNTR in NGS data of the donor."""
def __init__(self, reference_vntr, is_haploid=False, reference_filename=None):
self.reference_vntr = reference_vntr
self.is_haploid = is_haploid
self.reference_filename = reference_filename
self.min_repeat_bp_to_add_read = 2
if len(self.reference_vntr.pattern) < 30:
self.min_repeat_bp_to_add_read = 2
self.min_repeat_bp_to_count_repeats = 2
self.minimum_left_flanking_size = {}
self.minimum_right_flanking_size = {69212: 19, 532789: 12, 400825: 10, 468671: 10}
self.vntr_start = self.reference_vntr.start_point
self.vntr_end = self.vntr_start + self.reference_vntr.get_length()
def get_copies_for_hmm(self, read_length):
return int(round(float(read_length) / len(self.reference_vntr.pattern) + 0.5))
@staticmethod
def get_alignment_file_read_mode(alignment_file):
read_mode = 'r' if alignment_file.endswith('sam') else 'rb'
if alignment_file.endswith('cram'):
read_mode = 'rc'
return read_mode
@time_usage
def build_vntr_matcher_hmm(self, copies, flanking_region_size=100):
patterns = self.reference_vntr.get_repeat_segments()
left_flanking_region = self.reference_vntr.left_flanking_region[-flanking_region_size:]
right_flanking_region = self.reference_vntr.right_flanking_region[:flanking_region_size]
vntr_matcher = get_read_matcher_model(left_flanking_region, right_flanking_region, patterns, copies)
return vntr_matcher
def get_vntr_matcher_hmm(self, read_length):
"""Try to load trained HMM for this VNTR
If there was no trained HMM, it will build one and store it for later usage
"""
logging.info('Using read length %s' % read_length)
copies = self.get_copies_for_hmm(read_length)
base_name = str(self.reference_vntr.id) + '_' + str(read_length) + '.json'
stored_hmm_file = settings.TRAINED_HMMS_DIR + base_name
if settings.USE_TRAINED_HMMS and os.path.isfile(stored_hmm_file):
model = Model()
model = model.from_json(stored_hmm_file)
return model
flanking_region_size = read_length
vntr_matcher = self.build_vntr_matcher_hmm(copies, flanking_region_size)
if settings.USE_TRAINED_HMMS:
json_str = vntr_matcher.to_json()
with open(stored_hmm_file, 'w') as outfile:
outfile.write(json_str)
return vntr_matcher
def get_keywords_for_filtering(self, short_reads=True, keyword_size=21):
vntr = ''.join(self.reference_vntr.get_repeat_segments())
if len(vntr) < keyword_size:
min_copies = int(keyword_size / len(vntr)) + 1
vntr = str(vntr) * min_copies
locus = self.reference_vntr.left_flanking_region[-15:] + vntr + self.reference_vntr.right_flanking_region[:15]
queries = []
step_size = 5 if len(self.reference_vntr.pattern) != 5 else 6
for i in range(0, len(locus) - keyword_size + 1, step_size):
queries.append(locus[i:i+keyword_size])
if not short_reads:
queries = [self.reference_vntr.left_flanking_region[-80:], self.reference_vntr.right_flanking_region[:80]]
queries = set(queries)
return queries
@staticmethod
def add_hmm_score_to_list(sema, hmm, read, result_scores):
logp, vpath = hmm.viterbi(str(read.seq))
rev_logp, rev_vpath = hmm.viterbi(str(Seq(str(read.seq)).reverse_complement()))
if logp < rev_logp:
logp = rev_logp
result_scores.append(logp)
sema.release()
def is_true_read(self, read):
read_start = read.reference_start
reference_name = read.reference_name
if not reference_name.startswith('chr'):
reference_name = 'chr' + reference_name
if reference_name == self.reference_vntr.chromosome and self.vntr_start - len(read.seq) < read_start < self.vntr_end:
return True
return False
def get_min_score_to_select_a_read(self, read_length):
if self.reference_vntr.scaled_score is None or self.reference_vntr.scaled_score == 0:
return None
return self.reference_vntr.scaled_score * read_length
@staticmethod
def recruit_read(logp, vpath, min_score_to_count_read, read_length):
if min_score_to_count_read is not None and logp > min_score_to_count_read:
return True
matches = get_number_of_matches_in_vpath(vpath)
if min_score_to_count_read is None and matches >= 0.9 * read_length and logp > -read_length:
return True
return False
def process_unmapped_read_with_dnn(self, read_segment, hmm, recruitment_score, vntr_bp_in_unmapped_reads, selected_reads, compute_reverse, dnn_model):
logging.info('process unmapped read with DNN')
if read_segment.count('N') <= 0:
sequence = read_segment.upper()
forward_dnn_read = False
reverse_dnn_read = False
logp = 0
vpath = []
rev_logp = 0
rev_vpath = []
embedding = get_embedding_of_string(sequence)
selected = dnn_model.predict(numpy.array([embedding]), batch_size=1)[0]
if selected[0] > selected[1]:
logging.info('%s and %s' % (selected[0], selected[1]))
forward_dnn_read = True
if compute_reverse:
reverse_sequence = str(Seq(sequence).reverse_complement())
embedding = get_embedding_of_string(reverse_sequence)
selected = dnn_model.predict(numpy.array([embedding]), batch_size=1)[0]
if selected[0] > selected[1]:
reverse_dnn_read = True
if forward_dnn_read or reverse_dnn_read:
logging.info('computing HMM viterbi')
if forward_dnn_read:
logp, vpath = hmm.viterbi(sequence)
if reverse_dnn_read:
rev_logp, rev_vpath = hmm.viterbi(reverse_sequence)
if logp < rev_logp:
logging.info('using reversed read')
sequence = reverse_sequence
logp = rev_logp
vpath = rev_vpath
logging.info('this is a VNTR read')
repeat_bps = get_number_of_repeat_bp_matches_in_vpath(vpath)
if self.recruit_read(logp, vpath, recruitment_score, len(sequence)):
if repeat_bps > self.min_repeat_bp_to_count_repeats:
vntr_bp_in_unmapped_reads.value += repeat_bps
if repeat_bps > self.min_repeat_bp_to_add_read:
selected_reads.append(SelectedRead(sequence, logp, vpath))
def process_unmapped_read(self, sema, read_segment, hmm, recruitment_score, vntr_bp_in_unmapped_reads,
selected_reads, compute_reverse=True):
if read_segment.count('N') <= 0:
sequence = read_segment.upper()
logp, vpath = hmm.viterbi(sequence)
if compute_reverse:
reverse_sequence = str(Seq(sequence).reverse_complement())
rev_logp, rev_vpath = hmm.viterbi(reverse_sequence)
if logp < rev_logp:
sequence = reverse_sequence
logp = rev_logp
vpath = rev_vpath
repeat_bps = get_number_of_repeat_bp_matches_in_vpath(vpath)
if self.recruit_read(logp, vpath, recruitment_score, len(sequence)):
if repeat_bps > self.min_repeat_bp_to_count_repeats:
vntr_bp_in_unmapped_reads.value += repeat_bps
if repeat_bps > self.min_repeat_bp_to_add_read:
selected_reads.append(SelectedRead(sequence, logp, vpath))
if sema is not None:
sema.release()
def identify_frameshift(self, location_coverage, observed_indel_transitions, expected_indels, error_rate=0.01):
if observed_indel_transitions >= location_coverage:
return True
from scipy.stats import binom
sequencing_error_prob = binom.pmf(observed_indel_transitions, location_coverage, error_rate)
frameshift_prob = binom.pmf(observed_indel_transitions, location_coverage, expected_indels)
prob = sequencing_error_prob / frameshift_prob
return prob < 0.01
def find_frameshift_from_selected_reads(self, selected_reads):
mutations = {}
repeating_bps_in_data = 0
repeats_lengths_distribution = []
for read in selected_reads:
visited_states = [state.name for idx, state in read.vpath[1:-1]]
repeats_lengths = get_repeating_pattern_lengths(visited_states)
repeats_lengths_distribution += repeats_lengths
current_repeat = None
repeating_bps_in_data += get_number_of_repeat_bp_matches_in_vpath(read.vpath)
for i in range(len(visited_states)):
if visited_states[i].endswith('fix') or visited_states[i].startswith('M'):
continue
if visited_states[i].startswith('unit_start'):
if current_repeat is None:
current_repeat = 0
else:
current_repeat += 1
if current_repeat is None or current_repeat >= len(repeats_lengths):
continue
if not visited_states[i].startswith('I') and not visited_states[i].startswith('D'):
continue
if repeats_lengths[current_repeat] == len(self.reference_vntr.pattern):
continue
state = visited_states[i].split('_')[0]
if state.startswith('I'):
state += get_emitted_basepair_from_visited_states(visited_states[i], visited_states, read.sequence)
if abs(repeats_lengths[current_repeat] - len(self.reference_vntr.pattern)) <= 2:
if state not in mutations.keys():
mutations[state] = 0
mutations[state] += 1
sorted_mutations = sorted(mutations.items(), key=lambda x: x[1])
logging.debug('sorted mutations: %s ' % sorted_mutations)
frameshift_candidate = sorted_mutations[-1] if len(sorted_mutations) else (None, 0)
logging.info(sorted(repeats_lengths_distribution))
logging.info('Frameshift Candidate and Occurrence %s: %s' % frameshift_candidate)
logging.info('Observed repeating base pairs in data: %s' % repeating_bps_in_data)
avg_bp_coverage = float(repeating_bps_in_data) / self.reference_vntr.get_length() / 2
logging.info('Average coverage for each base pair: %s' % avg_bp_coverage)
expected_indel_transitions = 1 / avg_bp_coverage
if self.identify_frameshift(avg_bp_coverage, frameshift_candidate[1], expected_indel_transitions):
logging.info('There is a frameshift at %s' % frameshift_candidate[0])
return frameshift_candidate[0]
return None
def read_flanks_repeats_with_confidence(self, vpath):
minimum_left_flanking = 5
minimum_right_flanking = 5
if self.reference_vntr.id in self.minimum_left_flanking_size:
minimum_left_flanking = self.minimum_left_flanking_size[self.reference_vntr.id]
if self.reference_vntr.id in self.minimum_right_flanking_size:
minimum_right_flanking = self.minimum_right_flanking_size[self.reference_vntr.id]
if get_left_flanking_region_size_in_vpath(vpath) > minimum_left_flanking:
if get_right_flanking_region_size_in_vpath(vpath) > minimum_right_flanking:
return True
return False
def check_if_flanking_regions_align_to_str(self, read_str, length_distribution, spanning_reads):
flanking_region_size = 100
left_flanking = self.reference_vntr.left_flanking_region[-flanking_region_size:]
right_flanking = self.reference_vntr.right_flanking_region[:flanking_region_size]
left_alignments = pairwise2.align.localms(read_str, left_flanking, 1, -1, -1, -1)
if len(left_alignments) < 1:
return
min_left, max_left = 10e9, 0
for aln in left_alignments:
if aln[2] < len(left_flanking) * (1 - settings.MAX_ERROR_RATE):
continue
min_left = min(min_left, aln[3])
max_left = max(max_left, aln[3])
if max_left - min_left > 30:
with open('vntr_complex.txt', 'a') as out:
out.write('%s %s\n' % (self.reference_vntr.id, max_left - min_left))
left_align = left_alignments[0]
if left_align[2] < len(left_flanking) * (1 - settings.MAX_ERROR_RATE):
return
right_alignments = pairwise2.align.localms(read_str, right_flanking, 1, -1, -1, -1)
if len(right_alignments) < 1:
return
min_right, max_right = 10e9, 0
for aln in right_alignments:
if aln[2] < len(right_flanking) * (1 - settings.MAX_ERROR_RATE):
continue
min_right = min(min_right, aln[3])
max_right = max(max_right, aln[3])
if max_right - min_right > 30:
with open('vntr_complex.txt', 'a') as out:
out.write('%s %s\n' % (self.reference_vntr.id, max_right - min_right))
right_align = right_alignments[0]
if right_align[2] < len(right_flanking) * (1 - settings.MAX_ERROR_RATE):
return
if right_align[3] < left_align[3]:
return
spanning_reads.append(read_str[left_align[3]:right_align[3]+flanking_region_size])
length_distribution.append(right_align[3] - (left_align[3] + flanking_region_size))
def check_if_pacbio_read_spans_vntr(self, sema, read, length_distribution, spanning_reads):
self.check_if_flanking_regions_align_to_str(str(read.seq).upper(), length_distribution, spanning_reads)
reverse_complement_str = str(Seq(str(read.seq)).reverse_complement())
self.check_if_flanking_regions_align_to_str(reverse_complement_str.upper(), length_distribution, spanning_reads)
sema.release()
def check_if_pacbio_mapped_read_spans_vntr(self, sema, read, length_distribution, spanning_reads):
flanking_region_size = 100
region_start = self.reference_vntr.start_point - flanking_region_size
region_end = self.reference_vntr.start_point + self.reference_vntr.get_length()
if read.get_reference_positions()[0] < region_start and read.get_reference_positions()[-1] > region_end:
read_region_start = None
read_region_end = None
for read_pos, ref_pos in enumerate(read.get_reference_positions()):
if ref_pos >= region_start and read_region_start is None:
read_region_start = read_pos
if ref_pos >= region_end and read_region_end is None:
read_region_end = read_pos
if read_region_start is not None and read_region_end is not None:
result = read.seq[read_region_start:read_region_end+flanking_region_size]
if read.is_reverse:
result = str(Seq(result).reverse_complement())
spanning_reads.append(result)
length_distribution.append(len(result) - flanking_region_size * 2)
sema.release()
@time_usage
def get_spanning_reads_of_unaligned_pacbio_reads(self, unmapped_filtered_reads):
sema = Semaphore(settings.CORES)
manager = Manager()
shared_length_distribution = manager.list()
shared_spanning_reads = manager.list()
process_list = []
for read in unmapped_filtered_reads:
sema.acquire()
p = Process(target=self.check_if_pacbio_read_spans_vntr, args=(sema, read, shared_length_distribution,
shared_spanning_reads))
process_list.append(p)
p.start()
for p in process_list:
p.join()
logging.info('length_distribution of unmapped spanning reads: %s' % list(shared_length_distribution))
return list(shared_spanning_reads), list(shared_length_distribution)
@time_usage
def get_spanning_reads_of_aligned_pacbio_reads(self, alignment_file):
sema = Semaphore(settings.CORES)
manager = Manager()
length_distribution = manager.list()
mapped_spanning_reads = manager.list()
vntr_start = self.reference_vntr.start_point
vntr_end = self.reference_vntr.start_point + self.reference_vntr.get_length()
region_start = vntr_start
region_end = vntr_end
read_mode = self.get_alignment_file_read_mode(alignment_file)
samfile = pysam.AlignmentFile(alignment_file, read_mode, reference_filename=self.reference_filename)
reference = get_reference_genome_of_alignment_file(samfile)
chromosome = self.reference_vntr.chromosome if reference == 'HG19' else self.reference_vntr.chromosome[3:]
process_list = []
for read in samfile.fetch(chromosome, region_start, region_end):
sema.acquire()
p = Process(target=self.check_if_pacbio_mapped_read_spans_vntr, args=(sema, read, length_distribution,
mapped_spanning_reads))
process_list.append(p)
p.start()
for p in process_list:
p.join()
logging.info('length_distribution of mapped spanning reads: %s' % list(length_distribution))
return list(mapped_spanning_reads)
def get_conditional_likelihood(self, ck, ci, cj, ru_counts, r, r_e):
if ck == ci == cj:
return 1-r
if cj == 0: # CHECK LATER
return 0.5 * (1-r)
if ck == ci:
return 0.5 * ((1-r) + r_e ** abs(ck-cj))
if ck == cj:
return 0.5 * ((1-r) + r_e ** abs(ck-ci))
if ck != ci and ck != cj:
return 0.5 * (r_e ** abs(ck-ci) + r_e ** abs(ck-cj))
def find_genotype_based_on_observed_repeats(self, observed_copy_numbers):
ru_counts = {}
for cn in observed_copy_numbers:
if cn not in ru_counts.keys():
ru_counts[cn] = 0
ru_counts[cn] += 1
if len(ru_counts.keys()) < 2:
priors = 0.5
ru_counts[0] = 1
else:
priors = 1.0 / (len(ru_counts.keys()) * (len(ru_counts.keys())-1) / 2)
import operator
ru_counts = sorted(ru_counts.items(), key=operator.itemgetter(1), reverse=True)
r = 0.03
r_e = r / (2 + r)
prs = {}
for ck, occ in ru_counts:
if ck == 0:
continue
for i in range(len(ru_counts)):
ci = ru_counts[i][0]
for j in range(len(ru_counts)):
if j < i:
continue
if self.is_haploid and i != j:
continue
cj = ru_counts[j][0]
if (ci, cj) not in prs.keys():
prs[(ci, cj)] = []
prs[(ci, cj)].append(self.get_conditional_likelihood(ck, ci, cj, ru_counts, r, r_e) ** occ)
posteriors = {}
import numpy
for key in prs.keys():
prs[key] = numpy.prod(numpy.array(prs[key]))
posteriors[key] = prs[key] * priors
sum_of_probs = sum(posteriors.values())
max_prob = 1e-20
result = None
for key, value in posteriors.items():
if value / sum_of_probs > max_prob:
max_prob = value / sum_of_probs
result = key
logging.info('Maximum probability for genotyping: %s' % max_prob)
return result, max_prob
def get_dominant_copy_numbers_from_spanning_reads(self, spanning_reads):
if len(spanning_reads) < 1:
logging.info('There is no spanning read')
return None
max_length = 0
for read in spanning_reads:
if len(read) - 100 > max_length:
max_length = len(read) - 100
max_copies = int(round(max_length / float(len(self.reference_vntr.pattern))))
# max_copies = min(max_copies, 2 * len(self.reference_vntr.get_repeat_segments()))
vntr_matcher = self.build_vntr_matcher_hmm(max_copies)
observed_copy_numbers = []
for haplotype in spanning_reads:
logp, vpath = vntr_matcher.viterbi(haplotype)
rev_logp, rev_vpath = vntr_matcher.viterbi(str(Seq(haplotype).reverse_complement()))
if logp < rev_logp:
vpath = rev_vpath
observed_copy_numbers.append(get_number_of_repeats_in_vpath(vpath))
logging.info('flanked repeats: %s' % observed_copy_numbers)
return self.find_genotype_based_on_observed_repeats(observed_copy_numbers)
@time_usage
def get_haplotype_copy_numbers_from_spanning_reads(self, spanning_reads):
if len(spanning_reads) < 1:
logging.info('There is no spanning read')
return None
max_length = 0
for read in spanning_reads:
if len(read) - 100 > max_length:
max_length = len(read) - 100
max_copies = int(round(max_length / float(len(self.reference_vntr.pattern))))
max_copies = min(max_copies, 2 * len(self.reference_vntr.get_repeat_segments()))
vntr_matcher = self.build_vntr_matcher_hmm(max_copies)
haplotyper = PacBioHaplotyper(spanning_reads)
haplotypes = haplotyper.get_error_corrected_haplotypes()
copy_numbers = []
for haplotype in haplotypes:
# print('haplotype: %s' % haplotype)
logp, vpath = vntr_matcher.viterbi(haplotype)
rev_logp, rev_vpath = vntr_matcher.viterbi(str(Seq(haplotype).reverse_complement()))
if logp < rev_logp:
vpath = rev_vpath
copy_numbers.append(get_number_of_repeats_in_vpath(vpath))
return copy_numbers
def find_ru_counts_with_naive_approach(self, length_dist, spanning_reads):
haplotyper = PacBioHaplotyper(spanning_reads)
haplotypes = haplotyper.get_error_corrected_haplotypes(1)
flanking_region_lengths = []
new_spanning_reads = []
if len(haplotypes) == 0:
return None
self.check_if_flanking_regions_align_to_str(haplotypes[0].upper(), flanking_region_lengths, new_spanning_reads)
reverse_complement_str = str(Seq(haplotypes[0]).reverse_complement())
self.check_if_flanking_regions_align_to_str(reverse_complement_str.upper(), flanking_region_lengths, new_spanning_reads)
if len(flanking_region_lengths) > 0:
return [round(flanking_region_lengths[0] / len(self.reference_vntr.pattern))] * 2
else:
return None
def find_ru_counts_from_average_flanking_region_distance(self, length_dist):
if len(length_dist):
ru_counts_list = [round(length / len(self.reference_vntr.pattern)) for length in length_dist]
ru_count_frequencies = Counter(ru_counts_list)
copy_numbers = [ru_count_frequencies[0][0]]
if len(ru_count_frequencies.keys()) > 1 and ru_count_frequencies[1][1] > ru_count_frequencies[0][1] / 5:
copy_numbers.append(ru_count_frequencies[1][0])
else:
copy_numbers = copy_numbers * 2
else:
copy_numbers = None
return copy_numbers
@time_usage
def find_repeat_count_from_pacbio_alignment_file(self, alignment_file, unmapped_filtered_reads):
logging.debug('finding repeat count from pacbio alignment file for %s' % self.reference_vntr.id)
unaligned_spanning_reads, length_dist = self.get_spanning_reads_of_unaligned_pacbio_reads(unmapped_filtered_reads)
mapped_spanning_reads = self.get_spanning_reads_of_aligned_pacbio_reads(alignment_file)
spanning_reads = mapped_spanning_reads + unaligned_spanning_reads
copy_numbers = self.get_dominant_copy_numbers_from_spanning_reads(spanning_reads)
return copy_numbers
@time_usage
def find_repeat_count_from_pacbio_reads(self, unmapped_filtered_reads, naive=False):
logging.debug('finding repeat count from pacbio reads file for %s' % self.reference_vntr.id)
spanning_reads, length_dist = self.get_spanning_reads_of_unaligned_pacbio_reads(unmapped_filtered_reads)
if naive:
copy_numbers = self.find_ru_counts_with_naive_approach(length_dist, spanning_reads)
else:
copy_numbers = self.get_dominant_copy_numbers_from_spanning_reads(spanning_reads)
return copy_numbers
@time_usage
def iteratively_update_model(self, alignment_file, unmapped_filtered_reads, selected_reads, hmm):
updated_selected_reads = selected_reads
fitness = sum([read.logp for read in selected_reads])
read_length = len(selected_reads[0].sequence)
reference_repeats = []
for reference_repeat in self.reference_vntr.get_repeat_segments():
sequence = str(reference_repeat).upper()
logp, vpath = hmm.viterbi(sequence)
reference_repeats.append(SelectedRead(sequence, logp, vpath))
logging.info('initial fitness: %s' % fitness)
flanking_region_size = read_length
left_flanking_region = self.reference_vntr.left_flanking_region[-flanking_region_size:]
right_flanking_region = self.reference_vntr.right_flanking_region[:flanking_region_size]
copies = self.get_copies_for_hmm(read_length)
max_steps = 1000
min_improvement = 1
for i in range(max_steps):
old_fitness = fitness
current_vpaths = [(read.sequence, read.vpath) for read in updated_selected_reads + reference_repeats]
hmm = get_read_matcher_model(left_flanking_region, right_flanking_region, None, copies, current_vpaths)
updated_selected_reads = self.select_illumina_reads(alignment_file, unmapped_filtered_reads, False, hmm)
fitness = sum([read.logp for read in selected_reads])
if fitness - old_fitness < min_improvement:
break
logging.info('final fitness: %s' % fitness)
return updated_selected_reads
@time_usage
def select_illumina_reads(self, alignment_file, unmapped_filtered_reads, update=False, hmm=None):
recruitment_score = None
selected_reads = []
vntr_bp_in_unmapped_reads = Value('d', 0.0)
number_of_reads = 0
read_length = 150
for read_segment in unmapped_filtered_reads:
if number_of_reads == 0:
read_length = len(str(read_segment.seq))
number_of_reads += 1
if not hmm:
hmm = self.get_vntr_matcher_hmm(read_length=read_length)
if not recruitment_score:
recruitment_score = self.get_min_score_to_select_a_read(read_length)
if len(read_segment.seq) < read_length:
continue
self.process_unmapped_read(None, str(read_segment.seq), hmm, recruitment_score, vntr_bp_in_unmapped_reads,
selected_reads)
logging.debug('vntr base pairs in unmapped reads: %s' % vntr_bp_in_unmapped_reads.value)
vntr_bp_in_mapped_reads = 0
vntr_start = self.reference_vntr.start_point
vntr_end = self.reference_vntr.start_point + self.reference_vntr.get_length()
read_mode = self.get_alignment_file_read_mode(alignment_file)
samfile = pysam.AlignmentFile(alignment_file, read_mode, reference_filename=self.reference_filename)
reference = get_reference_genome_of_alignment_file(samfile)
chromosome = self.reference_vntr.chromosome if reference == 'HG19' else self.reference_vntr.chromosome[3:]
for read in samfile.fetch(chromosome, vntr_start, vntr_end):
if not recruitment_score:
read_length = len(read.seq)
recruitment_score = self.get_min_score_to_select_a_read(read_length)
if not hmm:
hmm = self.get_vntr_matcher_hmm(read_length=read_length)
if read.is_unmapped:
continue
if len(read.seq) < int(read_length * 0.9):
logging.debug('Rejecting read for short length: %s' % read.seq)
continue
read_end = read.reference_end if read.reference_end else read.reference_start + len(read.seq)
if vntr_start - read_length < read.reference_start < vntr_end or vntr_start < read_end < vntr_end:
if read.seq.count('N') <= 0:
sequence = str(read.seq).upper()
logp, vpath = hmm.viterbi(sequence)
rev_logp, rev_vpath = hmm.viterbi(str(Seq(read.seq).reverse_complement()).upper())
if logp < rev_logp:
sequence = str(Seq(read.seq).reverse_complement()).upper()
logp = rev_logp
vpath = rev_vpath
length = len(sequence)
if is_low_quality_read(read) and not self.recruit_read(logp, vpath, recruitment_score, length):
logging.debug('Rejected Read: %s' % sequence)
continue
selected_reads.append(SelectedRead(sequence, logp, vpath, read.mapq, read.reference_start))
end = min(read_end, vntr_end)
start = max(read.reference_start, vntr_start)
vntr_bp_in_mapped_reads += end - start
logging.debug('vntr base pairs in mapped reads: %s' % vntr_bp_in_mapped_reads)
if update:
selected_reads = self.iteratively_update_model(alignment_file, unmapped_filtered_reads, selected_reads, hmm)
return selected_reads
@time_usage
def find_frameshift_from_alignment_file(self, alignment_file, unmapped_filtered_reads):
logging.debug('finding frameshift from alignment file for %s' % self.reference_vntr.id)
selected_reads = self.select_illumina_reads(alignment_file, unmapped_filtered_reads)
return self.find_frameshift_from_selected_reads(selected_reads)
@time_usage
def get_ru_count_with_coverage_method(self, pattern_occurrences, total_counted_vntr_bp, average_coverage):
haplotypes = 1 if self.is_haploid else 2
estimate = [int(pattern_occurrences / (float(average_coverage) * haplotypes))] * 2
return estimate
pattern_occurrences = total_counted_vntr_bp / float(len(self.reference_vntr.pattern))
read_mode = self.get_alignment_file_read_mode(alignment_file)
samfile = pysam.AlignmentFile(alignment_file, read_mode, reference_filename=self.reference_filename)
reference = get_reference_genome_of_alignment_file(samfile)
bias_detector = CoverageBiasDetector(alignment_file, self.reference_vntr.chromosome, reference)
coverage_corrector = CoverageCorrector(bias_detector.get_gc_content_coverage_map())
logging.info('Sequencing mean coverage: %s' % coverage_corrector.get_sequencing_mean_coverage())
observed_copy_number = pattern_occurrences / coverage_corrector.get_sequencing_mean_coverage()
scaled_copy_number = coverage_corrector.get_scaled_coverage(self.reference_vntr, observed_copy_number)
logging.info('scaled copy number and observed copy number: %s, %s' % (scaled_copy_number, observed_copy_number))
return [scaled_copy_number]
@time_usage
def find_repeat_count_from_alignment_file(self, alignment_file, unmapped_filtered_reads, average_coverage=None,
update=False):
logging.debug('finding repeat count from alignment file for %s' % self.reference_vntr.id)
selected_reads = self.select_illumina_reads(alignment_file, unmapped_filtered_reads, update)
covered_repeats = []
flanking_repeats = []
total_counted_vntr_bp = 0
for selected_read in selected_reads:
repeats = get_number_of_repeats_in_vpath(selected_read.vpath)
total_counted_vntr_bp += get_number_of_repeat_bp_matches_in_vpath(selected_read.vpath)
logging.debug('logp of read: %s' % str(selected_read.logp))
logging.debug('left flankign size: %s' % get_left_flanking_region_size_in_vpath(selected_read.vpath))
logging.debug('right flanking size: %s' % get_right_flanking_region_size_in_vpath(selected_read.vpath))
logging.debug(selected_read.sequence)
visited_states = [state.name for idx, state in selected_read.vpath[1:-1]]
if self.read_flanks_repeats_with_confidence(selected_read.vpath):
logging.debug('spanning read visited states :%s' % visited_states)
logging.debug('repeats: %s' % repeats)
covered_repeats.append(repeats)
else:
flanking_repeats.append(repeats)
flanking_repeats = sorted(flanking_repeats)
logging.info('covered repeats: %s' % covered_repeats)
logging.info('flanking repeats: %s' % flanking_repeats)
min_valid_flanked = max(covered_repeats) if len(covered_repeats) > 0 else 0
max_flanking_repeat = [r for r in flanking_repeats if r == max(flanking_repeats) and r >= min_valid_flanked]
if len(max_flanking_repeat) < 5:
max_flanking_repeat = []
exact_genotype, max_prob = self.find_genotype_based_on_observed_repeats(covered_repeats + max_flanking_repeat)
if exact_genotype is not None:
exact_genotype_log = '/'.join([str(cn) for cn in sorted(exact_genotype)])
else:
exact_genotype_log = 'None'
logging.info('RU count lower bounds: %s' % exact_genotype_log)
if average_coverage is None:
return GenotypeResult(exact_genotype, len(selected_reads), len(covered_repeats), len(flanking_repeats),
max_prob)
pattern_occurrences = sum(flanking_repeats) + sum(covered_repeats)
return self.get_ru_count_with_coverage_method(pattern_occurrences, total_counted_vntr_bp, average_coverage)
def find_repeat_count_from_short_reads(self, short_read_files, working_directory='./'):
"""
Map short read sequencing data to human reference genome (hg19) and call find_repeat_count_from_alignment_file
:param short_read_files: short read sequencing data
:param working_directory: directory for generating the outputs
"""
alignment_file = '' + short_read_files
# TODO: use bowtie2 to map short reads to hg19
return self.find_repeat_count_from_alignment_file(alignment_file, working_directory)
@time_usage
def train_classifier_threshold(self, reference_file, read_length=150):
hmm = self.get_vntr_matcher_hmm(read_length=read_length)
simulated_true_reads = self.simulate_true_reads(read_length)
simulated_false_filtered_reads = self.simulate_false_filtered_reads(reference_file)
processed_true_reads = self.find_hmm_score_of_simulated_reads(hmm, simulated_true_reads)
processed_false_reads = self.find_hmm_score_of_simulated_reads(hmm, simulated_false_filtered_reads)
recruitment_score = self.find_recruitment_score_threshold(processed_true_reads, processed_false_reads)
return recruitment_score / float(read_length)
@time_usage
def find_hmm_score_of_simulated_reads(self, hmm, reads):
initial_recruitment_score = -10000
manager = Manager()
processed_reads = manager.list([])
vntr_bp_in_reads = Value('d', 0.0)
for read_segment in reads:
self.process_unmapped_read(None, read_segment, hmm, initial_recruitment_score, vntr_bp_in_reads, processed_reads, False)
return processed_reads
@time_usage
def simulate_false_filtered_reads(self, reference_file, min_match=3):
alphabet = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
m = 4194301
def get_hash(string):
result = 0
for k in range(len(string)):
result = (result + alphabet[string[k].upper()] * (4 ** (keyword_size - k - 1))) % m
return result
false_filtered_reads = []
MAX_FALSE_READS = 10000
read_size = 150
keyword_size = 11
keywords = self.get_keywords_for_filtering(True, keyword_size)
hashed_keywords = set([get_hash(keyword) for keyword in keywords])
match_positions = []
vntr_start = self.reference_vntr.start_point
vntr_end = vntr_start + self.reference_vntr.get_length()
fasta_sequences = SeqIO.parse(open(reference_file), 'fasta')
for fasta in fasta_sequences:
name, sequence = fasta.id, str(fasta.seq)
if name != self.reference_vntr.chromosome:
continue
window_hash = None
for i in range(0, len(sequence) - keyword_size):
if sequence[i].upper() not in 'ACTG' or sequence[i - 1 + keyword_size].upper() not in 'ACTG':
continue
if window_hash is None or sequence[i - 1].upper() not in 'ACTG':
if 'N' in sequence[i:i + keyword_size].upper():
window_hash = None
continue
window_hash = get_hash(sequence[i:i + keyword_size])
continue
window_hash -= alphabet[sequence[i - 1].upper()] * (4 ** (keyword_size - 1))
window_hash = (window_hash * 4 + alphabet[sequence[i - 1 + keyword_size].upper()]) % m
if window_hash in hashed_keywords:
if name == self.reference_vntr.chromosome and vntr_start - read_size < i < vntr_end:
continue
if sequence[i:i + keyword_size].upper() in keywords:
match_positions.append(i)
if len(match_positions) >= min_match and match_positions[-1] - match_positions[-min_match] < read_size:
for j in range(match_positions[-1] - read_size, match_positions[-min_match], 5):
if 'N' not in sequence[j:j + read_size].upper():
false_filtered_reads.append(sequence[j:j + read_size])
if len(false_filtered_reads) > MAX_FALSE_READS:
break
return false_filtered_reads
def simulate_true_reads(self, read_length):
vntr = ''.join(self.reference_vntr.get_repeat_segments())
right_flank = self.reference_vntr.right_flanking_region
left_flank = self.reference_vntr.left_flanking_region
locus = left_flank[-read_length:] + vntr + right_flank[:read_length]
step_size = 1
alphabet = ['A', 'C', 'G', 'T']
sim_reads = []
for i in range(0, len(locus) - read_length + 1, step_size):
sim_reads.append(locus[i:i+read_length].upper())
# add 4 special reads to sim_read
for copies in range(1, len(self.reference_vntr.get_repeat_segments()) - 1):
vntr_section = ''.join(self.reference_vntr.get_repeat_segments()[:copies])
for i in range(1, 11):
sim_reads.append((left_flank[-i:] + vntr_section + right_flank)[:read_length])
sim_reads.append((left_flank + vntr_section + right_flank[:i])[-read_length:])
min_copies = int(read_length / len(vntr)) + 1
for i in range(1, 21):
# print(len((vntr * min_copies)[i:read_length+i]))
sim_reads.append((vntr * min_copies)[i:read_length+i])
# print(len((vntr * min_copies)[-read_length-i:-i]))
sim_reads.append((vntr * min_copies)[-read_length-i:-i])
simulated_true_reads = []
for sim_read in sim_reads:
from random import randint
for i in range(randint(1, 2)):
temp_read = list(sim_read)
temp_read[randint(0, len(sim_read)-1)] = alphabet[randint(0, 3)]
sim_read = ''.join(temp_read)
simulated_true_reads.append(sim_read)
return simulated_true_reads
@time_usage
def find_recruitment_score_threshold(self, processed_true_reads, processed_false_reads):
from sklearn.linear_model import LogisticRegression
true_scores = [read.logp for read in processed_true_reads]
false_scores = [read.logp for read in processed_false_reads]
if len(false_scores) == 0:
false_scores = [min(true_scores) - 2]
clf = LogisticRegression()
x = [[score] for score in true_scores + false_scores]
y = [1] * len(true_scores) + [0] * len(false_scores)
clf.fit(x, y)
recruitment_score = max(true_scores)
for i in range(-1, -300, -1):
if int(clf.predict([[i]])) == 0:
recruitment_score = i
break
return recruitment_score
``` |
{
"source": "Jong-hun-Park/MutationalSignature",
"score": 3
} |
#### File: MutationalSignature/code/TableExtractor.py
```python
import numpy as np
import pandas as pd
def extract_table(input_file, cancer_type):
Cancer_type = cancer_type
data = pd.read_csv(input_file)
columns = data.columns
selected_col = ['Mutation type', 'Trinucleotide']
for c in columns:
if Cancer_type in c:
selected_col.append(c)
sub_data = data[selected_col]
sub_data['mean'] = sub_data.mean(axis=1)
sub_data['percentile'] = np.percentile(sub_data, 20, axis=1)
sub_data['cutoff'] = sub_data[["mean", "percentile"]].max(axis=1)
selected_col = selected_col[2:]
for c in selected_col:
#sub_data[c] = sub_data[c] >= sub_data['mean']
sub_data[c] = sub_data[c] >= sub_data['cutoff']
sub_data = sub_data.drop('mean', 1)
sub_data = sub_data.drop('percentile', 1)
sub_data = sub_data.drop('cutoff', 1)
sub_data[list(sub_data.columns)[2:]] = sub_data[list(sub_data.columns)[2:]].astype(int)
sub_data.to_csv('../data/' + Cancer_type + '_0_1_percentile.csv', index=False)
selected_col = ['Mutation type', 'Trinucleotide']
for c in columns:
if Cancer_type in c:
selected_col.append(c)
sub_data = data[selected_col]
sub_data = sub_data.replace(0,np.NaN)
sub_data['mean'] = sub_data.mean(axis=1)
selected_col = selected_col[2:]
for c in selected_col:
sub_data[c] = sub_data[c] >= sub_data['mean']
sub_data = sub_data.drop('mean', 1)
sub_data[list(sub_data.columns)[2:]] = sub_data[list(sub_data.columns)[2:]].astype(int)
sub_data.to_csv('../data/' + Cancer_type + '_ignore_0.csv', index=False)
if __name__ == "__main__":
cancer_type = 'Skin-Melanoma'
input = '../data/WES_TCGA.96.csv'
extract_table(input, cancer_type)
``` |
{
"source": "jonghwanhyeon/hangul-jamo",
"score": 2
} |
#### File: hangul-jamo/tests/test_hangul_jamo.py
```python
import pytest
from hangul_jamo import is_syllable, is_jamo_character, compose_jamo_characters, decompose_syllable, compose, decompose
def test_is_syllable():
assert is_syllable('가')
assert is_syllable('갛')
assert is_syllable('힣')
def test_is_not_syllable():
assert not is_syllable('0')
assert not is_syllable('A')
assert not is_syllable('a')
def test_is_jamo_character():
assert is_jamo_character('ㄱ')
assert is_jamo_character('ㅏ')
assert is_jamo_character('ㄳ')
assert is_jamo_character(None)
def test_is_not_jamo_character():
assert not is_jamo_character('0')
assert not is_jamo_character('A')
assert not is_jamo_character('a')
def test_compose_jamo_characters():
assert compose_jamo_characters('ㄱ', 'ㅏ') == '가'
assert compose_jamo_characters('ㄱ', 'ㅏ', None) == '가'
assert compose_jamo_characters('ㄱ', 'ㅏ', 'ㅎ') == '갛'
def test_decompose_syllable():
assert decompose_syllable('가') == ('ㄱ', 'ㅏ', None)
assert decompose_syllable('갛') == ('ㄱ', 'ㅏ', 'ㅎ')
def test_compose():
assert compose('ㄷㅐㅎㅏㄴㅁㅣㄴㄱㅜㄱㅇㅡㄴ ㅁㅣㄴㅈㅜㄱㅗㅇㅎㅘㄱㅜㄱㅇㅣㄷㅏ.') == '대한민국은 민주공화국이다.'
assert compose('Congress shall make no law respecting an establishment of religion, or prohibiting the free exercise thereof') == 'Congress shall make no law respecting an establishment of religion, or prohibiting the free exercise thereof'
def test_decompose():
assert decompose('대한민국은 민주공화국이다.') == 'ㄷㅐㅎㅏㄴㅁㅣㄴㄱㅜㄱㅇㅡㄴ ㅁㅣㄴㅈㅜㄱㅗㅇㅎㅘㄱㅜㄱㅇㅣㄷㅏ.'
assert decompose('Congress shall make no law respecting an establishment of religion, or prohibiting the free exercise thereof') == 'Congress shall make no law respecting an establishment of religion, or prohibiting the free exercise thereof'
``` |
{
"source": "jonghwanhyeon/product-scanner",
"score": 2
} |
#### File: product-scanner/productscanner/pipelines.py
```python
import logging
import os
import pickle
import re
import requests
import scrapy
from functools import partial
from . import base_path, config, notification
seen_directory = os.path.join(base_path, 'seen')
os.makedirs(seen_directory, exist_ok=True)
seen_filename_template = os.path.join(seen_directory, '{name}.pickle')
without_whitespace = partial(re.sub, r'\s', '')
class SearchKeywordPipeline:
def __init__(self):
self.logger = logging.getLogger('searchkeywordpipeline')
self.logger.setLevel(logging.INFO)
self.keywords = [
(without_whitespace(item), item) if isinstance(item, str)
else (without_whitespace(item[0]), item[1])
for item in config.keywords
]
self.crawled = set()
self.seen = set()
def open_spider(self, spider):
seen_filename = seen_filename_template.format(name=spider.name)
if os.path.exists(seen_filename):
with open(seen_filename, 'rb') as input_file:
self.seen = pickle.load(input_file)
def close_spider(self, spider):
seen_filename = seen_filename_template.format(name=spider.name)
with open(seen_filename, 'wb') as output_file:
pickle.dump(self.crawled, output_file)
def process_item(self, item, spider):
if item['id'] in self.crawled:
raise scrapy.exceptions.DropItem('Duplicated item: {id}'.format(id=item['id']))
self.crawled.add(item['id'])
for keyword_without_space, keyword in self.keywords:
if keyword_without_space in without_whitespace(item['name']):
if item['id'] not in self.seen:
self.logger.info('Product found: {name} ({keyword})'.format(
name=item['name'],
keyword=keyword_without_space
))
self.notify(item, keyword)
break
return item
def notify(self, item, keyword):
notification.send(
title='{keyword} - ₩{price:,} (-{discount_rate:.0f}%)'.format(
keyword=keyword,
price=item['price'],
discount_rate=item['discount_rate'] * 100
),
message=item['name'],
url=item['url'],
image_url=item['image_url']
)
self.logger.info('Notification sent: {message}'.format(message=item['name']))
```
#### File: productscanner/spiders/__init__.py
```python
import logging
import scrapy
from urllib.parse import parse_qs, urlencode, urlparse, urlunparse
class ShopSpider(scrapy.Spider):
name = None
allowed_domains = []
start_url = None
parameters = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.logger.setLevel(logging.INFO)
def start_requests(self):
parameters = self.load_parameters()
url = self.start_url_of(parameters)
self.logger.info('Starting request: {url}'.format(url=url))
yield scrapy.Request(url, self.parse)
def load_parameters(self):
arguments = {}
for name in self.parameters:
value = getattr(self, name, None)
if value is None:
raise ValueError('no `{name}`'.format(name=name))
arguments[name] = value
return arguments
def start_url_of(self, parameters):
components = list(urlparse(self.start_url))
query = parse_qs(components[4]) # 4: query
merged_query = dict(query, **parameters)
components[4] = urlencode(merged_query, doseq=True) # 4: query
return urlunparse(components)
```
#### File: jonghwanhyeon/product-scanner/run.py
```python
import inspect
import logging
import os
from importlib import import_module
from scrapy.crawler import CrawlerProcess
from productscanner import base_path, config
def load_spiders():
for name in config.spiders:
module = import_module('productscanner.spiders.{name}'.format(name=name))
for _, member in inspect.getmembers(module, inspect.isclass):
if member.__module__ != module.__name__:
continue
if member.name == name:
yield member
def main():
logs_directory = os.path.join(base_path, 'logs')
os.makedirs(logs_directory, exist_ok=True)
handler = logging.handlers.TimedRotatingFileHandler(
os.path.join(logs_directory, 'product-scanner.log'),
when='midnight',
backupCount=7,
encoding='utf-8'
)
handler.setFormatter(logging.Formatter('%(asctime)s [%(name)s] %(levelname)s: %(message)s'))
handler.setLevel(logging.DEBUG)
# handler.setLevel(logging.INFO)
logging.getLogger().addHandler(handler)
process = CrawlerProcess({
'USER_AGENT': config.crawler['user_agent'],
'ITEM_PIPELINES': {
'productscanner.pipelines.SearchKeywordPipeline': 500,
},
'EXTENSIONS': {
'productscanner.extensions.NotifyExceptionExtension': 500,
}
}, install_root_handler=False)
for spider in load_spiders():
for parameter in config.spiders.get(spider.name, []):
process.crawl(spider, **parameter)
process.start()
if __name__ == '__main__':
main()
``` |
{
"source": "jonghwanhyeon/PushBank2",
"score": 3
} |
#### File: PushBank2/_pushbank/models.py
```python
import datetime
from peewee import *
db = SqliteDatabase('database.db', threadlocals=True)
class BaseModel(Model):
created_at = DateTimeField(default=datetime.datetime.now)
class Meta:
database = db
class Account(BaseModel):
account = CharField(unique=True, max_length=20)
balance = BigIntegerField()
class History(BaseModel):
account = ForeignKeyField(Account, related_name='histories')
date = DateField(index=True)
type = CharField(max_length=16)
depositor = CharField(max_length=20)
pay = IntegerField()
withdraw = IntegerField()
balance = BigIntegerField()
distributor = CharField(max_length=20)
def as_dict(self):
d = {
'account': self.account,
'date': self.date,
'type': self.type,
'depositor': self.depositor,
'pay': self.pay,
'withdraw': self.withdraw,
'balance': self.balance,
'distributor': self.distributor,
}
return d
db.connect()
db.create_tables([Account, History], True)
```
#### File: PushBank2/_pushbank/utils.py
```python
from datetime import datetime, timedelta
class classproperty(property):
def __get__(self, cls, owner):
return classmethod(self.fget).__get__(None, owner)()
class dateutils:
@classproperty
def date():
return datetime.now().date()
``` |
{
"source": "jonghwanhyeon/python-namumark",
"score": 3
} |
#### File: python-namumark/tests/test_blocks.py
```python
from namumark import Parser
from namumark.elements import *
def parse(*lines):
return Parser().parse('\n'.join(lines))
def closed(block):
block.closed = True
return block
def test_heading():
assert parse('= heading 1 =') == Document(
Heading(1, 'heading 1'))
assert parse('== heading 2 ==') == Document(
Heading(2, 'heading 2'))
assert parse('=== heading 3 ===') == Document(
Heading(3, 'heading 3'))
assert parse('==== heading 4 ====') == Document(
Heading(4, 'heading 4'))
assert parse('===== heading 5 =====') == Document(
Heading(5, 'heading 5'))
assert parse('====== heading 6 ======') == Document(
Heading(6, 'heading 6'))
# invalid syntax
assert parse('=heading 1=') == Document(
Paragraph('=heading 1='))
def test_quote():
assert parse('> quote') == Document(
Quote(
Paragraph('quote')))
assert parse(
'> quote 1',
'> quote 1',
) == Document(
Quote(
Paragraph(
'quote 1',
'quote 1')))
assert parse(
'> quote 1',
'>> quote 2',
) == Document(
Quote(
closed(Paragraph('quote 1')),
Quote(
Paragraph('quote 2'))))
assert parse(
'> quote 1',
'>> quote 2',
'>>> quote 3',
) == Document(
Quote(
closed(Paragraph('quote 1')),
Quote(
closed(Paragraph('quote 2')),
Quote(
Paragraph('quote 3')))))
assert parse(
'> quote 1',
'>> quote 2',
'>>>> quote 4',
'> quote 1',
'>>> quote 3',
) == Document(
Quote(
closed(Paragraph('quote 1')),
closed(Quote(
closed(Paragraph('quote 2')),
closed(Quote(
closed(Quote(
closed(Paragraph('quote 4')))))))),
closed(Paragraph('quote 1')),
Quote(
Quote(Paragraph('quote 3')))))
def test_unordered_list():
assert parse(
' * unordered list item 1',
) == Document(
UnorderedList(
ListItem(
Paragraph('unordered list item 1'))))
assert parse(
' * unordered list item 1',
' * unordered list item 1',
) == Document(
UnorderedList(
closed(ListItem(
closed(Paragraph('unordered list item 1')))),
ListItem(
Paragraph('unordered list item 1'))))
assert parse(
' * unordered list item 1',
' * unordered list item 2',
) == Document(
UnorderedList(
ListItem(
closed(Paragraph('unordered list item 1')),
UnorderedList(
ListItem(
Paragraph('unordered list item 2'))))))
assert parse(
' * unordered list item 1',
' * unordered list item 2',
' * unordered list item 3',
) == Document(
UnorderedList(
ListItem(
closed(Paragraph('unordered list item 1')),
UnorderedList(
ListItem(
closed(Paragraph('unordered list item 2')),
UnorderedList(
ListItem(
Paragraph('unordered list item 3'))))))))
assert parse(
' * unordered list item 1',
' unordered list item 1',
' * unordered list item 2',
) == Document(
UnorderedList(
ListItem(
closed(Paragraph(
'unordered list item 1',
'unordered list item 1')),
UnorderedList(
ListItem(
Paragraph('unordered list item 2'))))))
assert parse(
' * unordered list item 1',
' * unordered list item 2',
' unordered list item 2',
' * unordered list item 3',
) == Document(
UnorderedList(
ListItem(
closed(Paragraph('unordered list item 1')),
UnorderedList(
ListItem(
closed(Paragraph(
'unordered list item 2',
'unordered list item 2')),
UnorderedList(
ListItem(
Paragraph('unordered list item 3'))))))))
assert parse(
' * unordered list item 1',
' unordered list item 1',
' * unordered list item 2',
' unordered list item 1',
' * unordered list item 3',
) == Document(
UnorderedList(
ListItem(
closed(Paragraph(
'unordered list item 1',
'unordered list item 1')),
closed(UnorderedList(
closed(ListItem(
closed(Paragraph('unordered list item 2')))))),
closed(Paragraph('unordered list item 1')),
Indentation(
UnorderedList(
ListItem(
Paragraph('unordered list item 3')))))))
def test_ordered_list():
bullets = '1AaIi'
for bullet in bullets:
assert parse(
' {}. ordered list item 1'.format(bullet),
) == Document(
OrderedList(
1, bullet,
ListItem(
Paragraph('ordered list item 1'))))
assert parse(
' {}. ordered list item 1'.format(bullet),
' {}. ordered list item 1'.format(bullet),
) == Document(
OrderedList(
1, bullet,
closed(ListItem(
closed(Paragraph('ordered list item 1')))),
ListItem(
Paragraph('ordered list item 1'))))
assert parse(
' {}.#10 ordered list item 1'.format(bullet),
' {}. ordered list item 1'.format(bullet),
) == Document(
OrderedList(
10, bullet,
closed(ListItem(
closed(Paragraph('ordered list item 1')))),
ListItem(
Paragraph('ordered list item 1'))))
assert parse(
' {}. ordered list item 1'.format(bullet),
' {}. ordered list item 2'.format(bullet),
) == Document(
OrderedList(
1, bullet,
ListItem(
closed(Paragraph('ordered list item 1')),
OrderedList(
1, bullet,
ListItem(
Paragraph('ordered list item 2'))))))
assert parse(
' {}. ordered list item 1'.format(bullet),
' {}. ordered list item 2'.format(bullet),
' {}. ordered list item 3'.format(bullet),
) == Document(
OrderedList(
1, bullet,
ListItem(
closed(Paragraph('ordered list item 1')),
OrderedList(
1, bullet,
ListItem(
closed(Paragraph('ordered list item 2')),
OrderedList(
1, bullet,
ListItem(
Paragraph('ordered list item 3'))))))))
assert parse(
' {}. ordered list item 1'.format(bullet),
' ordered list item 1',
' {}. ordered list item 2'.format(bullet),
) == Document(
OrderedList(
1, bullet,
ListItem(
closed(Paragraph(
'ordered list item 1',
'ordered list item 1')),
OrderedList(
1, bullet,
ListItem(
Paragraph('ordered list item 2'))))))
assert parse(
' {}. ordered list item 1'.format(bullet),
' {}. ordered list item 2'.format(bullet),
' ordered list item 2',
' {}. ordered list item 3'.format(bullet),
) == Document(
OrderedList(
1, bullet,
ListItem(
closed(Paragraph('ordered list item 1')),
OrderedList(
1, bullet,
ListItem(
closed(Paragraph(
'ordered list item 2',
'ordered list item 2')),
OrderedList(
1, bullet,
ListItem(
Paragraph('ordered list item 3'))))))))
assert parse(
' {}. ordered list item 1'.format(bullet),
' ordered list item 1',
' {}. ordered list item 2'.format(bullet),
' ordered list item 1',
' {}. ordered list item 3'.format(bullet),
) == Document(
OrderedList(
1, bullet,
ListItem(
closed(Paragraph(
'ordered list item 1',
'ordered list item 1')),
closed(OrderedList(
1, bullet,
closed(ListItem(
closed(Paragraph('ordered list item 2')))))),
closed(Paragraph('ordered list item 1')),
Indentation(
OrderedList(
1, bullet,
ListItem(
Paragraph('ordered list item 3')))))))
def test_indentation():
assert parse(
'indentation 0',
' indentation 1',
) == Document(
closed(Paragraph('indentation 0')),
Indentation(
Paragraph('indentation 1')))
assert parse(
'indentation 0',
' indentation 1',
' indentation 2',
) == Document(
closed(Paragraph('indentation 0')),
Indentation(
closed(Paragraph('indentation 1')),
Indentation(
Paragraph('indentation 2'))))
assert parse(
'indentation 0',
' indentation 1',
' indentation 2',
' indentation 1',
) == Document(
closed(Paragraph('indentation 0')),
Indentation(
closed(Paragraph('indentation 1')),
closed(Indentation(
closed(Paragraph('indentation 2')))),
Paragraph('indentation 1')))
assert parse(
'indentation 0',
' indentation 1',
' indentation 2',
'indentation 0',
' indentation 2',
) == Document(
closed(Paragraph('indentation 0')),
closed(Indentation(
closed(Paragraph('indentation 1')),
closed(Indentation(
closed(Paragraph('indentation 2')))))),
closed(Paragraph('indentation 0')),
Indentation(
Indentation(
Paragraph('indentation 2'))))
def test_thematic_break():
assert parse('----') == Document(
ThematicBreak())
assert parse('-----') == Document(
ThematicBreak())
assert parse('------') == Document(
ThematicBreak())
assert parse('-------') == Document(
ThematicBreak())
assert parse('--------') == Document(
ThematicBreak())
assert parse('---------') == Document(
ThematicBreak())
# invalid syntax
assert parse('---') == Document(
Paragraph('---'))
``` |
{
"source": "jonghwanhyeon/python-notification",
"score": 3
} |
#### File: notification/notifiers/pushover.py
```python
import requests
from . import Notifier
from ..exceptions import NotifierException
pushover_url = 'https://api.pushover.net/1/messages.json'
allowed_parameters = { 'attachment', 'device', 'title', 'url', 'url_title', 'priority', 'sound', 'timestamp' }
class Pushover(Notifier):
def __init__(self, user, token):
self.user = user
self.token = token
def notify(self, message, **kwargs):
for key in kwargs:
if key not in allowed_parameters:
raise ValueError('Invalid parameter: {key}'.format(key=key))
parameters = dict(kwargs, user=self.user, token=self.token, message=message)
files = None
if 'attachment' in parameters:
files = { 'attachment': ('attachment.jpg', parameters['attachment'], 'image/jpeg') }
respoonse = requests.post(pushover_url, data=parameters, files=files)
respoonse_as_json = respoonse.json()
if respoonse.status_code != 200:
raise NotifierException(respoonse_as_json['errors'][0])
return respoonse_as_json['request']
``` |
{
"source": "jonghwanhyeon/python-stopwatch",
"score": 2
} |
#### File: stopwatch/contextmanagers/profile.py
```python
import atexit
import functools
import math
from typing import Callable, Optional, Union
from termcolor import colored
from ..statistics import Statistics
from ..stopwatch import Stopwatch
from . import Caller, format_elapsed_time, inspect_caller
def make_report(caller: Caller, name: str, statistics: Statistics) -> str:
tag = ''.join([colored(f'[{caller.module}', color='blue', attrs=['bold']),
colored(f'#{name}', color='green', attrs=['bold']),
colored(']', color='blue', attrs=['bold'])])
items = ', '.join([f'hits={len(statistics)}',
f'mean={format_elapsed_time(statistics.mean)}',
f'min={format_elapsed_time(statistics.minimum)}',
f'median={format_elapsed_time(statistics.median)}',
f'max={format_elapsed_time(statistics.maximum)}',
f'dev={format_elapsed_time(math.sqrt(statistics.variance))}'])
return f'{tag} {items}'
def print_report(caller: Caller, name: str, statistics: Statistics):
if len(statistics) > 0:
print(make_report(caller, name, statistics))
def profile(func: Optional[Callable] = None, **kwargs) -> Callable:
caller = inspect_caller()
def decorated(func: Callable):
name = kwargs.get('name', func.__name__)
report_every = kwargs.get('report_every', 1)
should_report = report_every is not None
statistics = Statistics()
atexit.register(print_report, caller, name, statistics)
@functools.wraps(func)
def wrapper(*args, **kwargs):
with Stopwatch() as stopwatch:
result = func(*args, **kwargs)
statistics.add(stopwatch.elapsed)
if should_report and (len(statistics) % report_every) == 0:
print_report(caller, name, statistics)
return result
return wrapper
return decorated(func) if callable(func) else decorated
```
#### File: stopwatch/contextmanagers/stopwatch.py
```python
import sys
from typing import Optional
from termcolor import colored
from ..stopwatch import Stopwatch
from . import Caller, format_elapsed_time, inspect_caller
# pylint: disable=invalid-name
class stopwatch:
def __init__(self, message: Optional[str] = None):
self._message = message
self._caller = inspect_caller()
self._stopwatch = Stopwatch()
def __enter__(self):
self._stopwatch.start()
def __exit__(self, *exception):
self._stopwatch.stop()
print(self._format(self._message, self._caller, self._stopwatch.elapsed), file=sys.stderr)
@staticmethod
def _format(message: Optional[str], caller: Caller, elapsed: float) -> str:
items = [colored(f'[{caller.module}:{caller.function}:{caller.line_number}]',
color='blue', attrs=['bold']),
' ~ ',
colored(format_elapsed_time(elapsed), color='magenta', attrs=['bold'])]
if message is not None:
items += [' - ', message]
return ''.join(items)
``` |
{
"source": "jonghwanhyeon/python-switchbot",
"score": 3
} |
#### File: python-switchbot/switchbot/client.py
```python
from typing import Any
import humps
import requests
switchbot_host = 'https://api.switch-bot.com/v1.0'
class SwitchBotClient:
def __init__(self, token: str):
self.session = requests.Session()
self.session.headers['Authorization'] = token
def request(self, method: str, path: str, **kwargs) -> Any:
url = f'{switchbot_host}/{path}'
response = self.session.request(method, url, **kwargs)
if response.status_code != 200:
raise RuntimeError(
f'SwitchBot API server returns status {response.status_code}')
response_in_json = humps.decamelize(response.json())
if response_in_json['status_code'] != 100:
raise RuntimeError(
f'An error occurred: {response_in_json["message"]}')
return response_in_json
def get(self, path: str, **kwargs) -> Any:
return self.request('GET', path, **kwargs)
def post(self, path: str, **kwargs) -> Any:
return self.request('POST', path, **kwargs)
def put(self, path: str, **kwargs) -> Any:
return self.request('PUT', path, **kwargs)
def delete(self, path: str, **kwargs) -> Any:
return self.request('DELETE', path, **kwargs)
``` |
{
"source": "JonghwanMun/MCL-KD",
"score": 2
} |
#### File: src/experiment/eval.py
```python
import os
import time
import yaml
import json
import logging
import argparse
import numpy as np
from datetime import datetime
import torch
import torch.utils.data as data
from torch.autograd import Variable
from src.model import building_networks
from src.dataset import clevr_dataset, vqa_dataset
from src.experiment import common_functions as cmf
from src.utils import accumulator, timer, utils, io_utils
""" Get parameters """
def _get_argument_params():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--loader_config_path", default="src/experiment/options/test_loader_config.yml",
help="Do evaluation or getting/saving some values")
parser.add_argument("--mode", default="eval", help="Do evaluation or getting/saving some values")
parser.add_argument("--exp", type=str, required=True, help="Experiment or configuration name")
parser.add_argument("--model_type", default="ensemble", help="Model type among [san | ensemble | saaa].")
parser.add_argument("--dataset", default="clevr", help="dataset to train models [clevr|vqa].")
parser.add_argument("--num_workers", type=int, default=4, help="The number of workers for data loader.")
parser.add_argument("--start_epoch", type=int, default=10, help="Start epoch to evaluate.")
parser.add_argument("--end_epoch", type=int, default=50, help="End epoch to evaluate.")
parser.add_argument("--epoch_stride", type=int, default=5, help="Stride for jumping epoch.")
parser.add_argument("--debug_mode" , action="store_true", default=False,
help="Run the script in debug mode")
params = vars(parser.parse_args())
print (json.dumps(params, indent=4))
return params
def main(params):
# load configuration of pre-trained models
exp_path = os.path.join("results", params["dataset"],
params["model_type"], params["exp"])
config_path = os.path.join(exp_path, "config.yml")
config = io_utils.load_yaml(config_path)
params["config_path"] = config_path
config = M.override_config_from_params(config, params)
config["exp_path"] = exp_path
cmf.create_save_dirs(config["misc"])
# create logger
logger_path = os.path.join(config["exp_path"], "evaluation.log")
logger = io_utils.get_logger("Evaluate", log_file_path=logger_path)
""" Build data loader """
loader_config = io_utils.load_yaml(params["loader_config_path"])
dset = dataset.DataSet(loader_config)
L = data.DataLoader(dset, batch_size=loader_config["batch_size"], \
num_workers=params["num_workers"], \
shuffle=False, collate_fn=dataset.collate_fn)
config = M.override_config_from_loader(config, dset)
if params["mode"] == "eval":
""" Evaluating networks """
e0 = params["start_epoch"]
e1 = params["end_epoch"]
e_stride = params["epoch_stride"]
sample_data = dset.get_samples(5)
for epoch in range(e0, e1+1, e_stride):
""" Build network """
net = M(config)
net.bring_loader_info(dset)
# ship network to use gpu
if config["model"]["use_gpu"]:
net.gpu_mode()
# load checkpoint
if not (net.classname == "ENSEMBLE" and config["model"]["version"] == "IE"):
ckpt_path = os.path.join(exp_path, "checkpoints",
"checkpoint_epoch_{:03d}.pkl".format(epoch))
assert os.path.exists(ckpt_path), \
"Checkpoint does not exists ({})".format(ckpt_path)
net.load_checkpoint(ckpt_path)
# If checkpoint is already applied with curriculum learning
apply_cc_after = utils.get_value_from_dict(
config["model"], "apply_curriculum_learning_after", -1)
if (apply_cc_after > 0) and (epoch >= apply_cc_after):
net.apply_curriculum_learning()
cmf.evaluate(config, L, net, epoch-1, logger_name="eval",
mode="Evaluation", verbose_every=100)
elif params["mode"] == "selection":
epoch = params["start_epoch"]
""" Build network """
net = M(config)
net.bring_loader_info(dset)
# ship network to use gpu
if config["model"]["use_gpu"]:
net.gpu_mode()
# load checkpoint
ckpt_path = os.path.join(exp_path, "checkpoints", "checkpoint_epoch_{:03d}.pkl".format(epoch))
assert os.path.exists(ckpt_path), "Checkpoint does not exists ({})".format(ckpt_path)
net.load_checkpoint(ckpt_path)
apply_cc_after = utils.get_value_from_dict(
config["model"], "apply_curriculum_learning_after", -1)
# If checkpoint use curriculum learning
if (apply_cc_after > 0) and (epoch >= apply_cc_after):
net.apply_curriculum_learning()
cmf.get_selection_values(config, L, net, epoch-1, logger_name="eval", mode="Evaluation", verbose_every=100)
if __name__ == "__main__":
params = _get_argument_params()
global M, dataset
M = cmf.get_model(params["model_type"])
dataset = cmf.get_dataset(params["dataset"])
main(params)
```
#### File: src/experiment/train.py
```python
import os
import sys
sys.path.append("src/externals/vqa")
import time
import yaml
import json
import logging
import argparse
import numpy as np
from datetime import datetime
import torch
import torch.utils.data as data
from torch.autograd import Variable
from src.model import building_networks
from src.dataset import clevr_dataset, vqa_dataset
from src.experiment import common_functions as cmf
from src.utils import accumulator, timer, utils, io_utils
""" Get parameters """
def _get_argument_params():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--config_path",
default="src/experiment/options/default.yml", help="Path to config file.")
parser.add_argument("--model_type",
default="ensemble", help="Model type among [san | saaa | ensemble].")
parser.add_argument("--dataset",
default="clevr", help="Dataset to train models [clevr | vqa].")
parser.add_argument("--num_workers", type=int,
default=4, help="The number of workers for data loader.")
parser.add_argument("--tensorboard_dir" , type=str, default="./tensorboard",
help="Directory for tensorboard")
parser.add_argument("--debug_mode" , action="store_true", default=False,
help="Train the model in debug mode.")
params = vars(parser.parse_args())
print(json.dumps(params, indent=4))
return params
""" Training the network """
def train(config):
""" Build data loader """
dsets = {}
dsets["train"] = dataset.DataSet(config["train_loader"])
dsets["test"] = dataset.DataSet(config["test_loader"])
L = {}
L["train"] = data.DataLoader( \
dsets["train"], batch_size=config["train_loader"]["batch_size"], \
num_workers=config["misc"]["num_workers"], \
shuffle=True, collate_fn=dataset.collate_fn)
L["test"] = data.DataLoader( \
dsets["test"], batch_size=config["test_loader"]["batch_size"], \
num_workers=config["misc"]["num_workers"], \
shuffle=True, collate_fn=dataset.collate_fn)
config = M.override_config_from_loader(config, dsets["train"])
""" Build network """
net = M(config)
net.bring_loader_info(dsets)
logger["train"].info(str(net))
apply_cc_after = utils.get_value_from_dict(
config["model"], "apply_curriculum_learning_after", -1)
# load checkpoint if exists
if len(config["model"]["checkpoint_path"]) > 0:
net.load_checkpoint(config["model"]["checkpoint_path"])
start_epoch = int(utils.get_filename_from_path(
config["model"]["checkpoint_path"]).split("_")[-1])
# If checkpoint use curriculum learning
if (apply_cc_after > 0) and (start_epoch >= apply_cc_after):
net.apply_curriculum_learning()
else:
start_epoch = 0
# ship network to use gpu
if config["model"]["use_gpu"]:
net.gpu_mode()
# Prepare tensorboard
net.create_tensorboard_summary(config["misc"]["tensorboard_dir"])
""" Run training network """
ii = 0
tm = timer.Timer() # tm: timer
iter_per_epoch = dsets["train"].get_iter_per_epoch()
min_lr = config["optimize"].get("min_lr", 0.0002)
for epoch in range(start_epoch, config["optimize"]["num_epoch"]):
net.train_mode() # set network as train mode
net.reset_status() # initialize status
for batch in L["train"]:
data_load_duration = tm.get_duration()
# maintain sample data to observe learning status
if ii == 0:
sample_data = dsets["train"].get_samples(5)
""" TODO: get samples from both training/test set
test_sample_data = dsets["test"].get_samples(5))
"""
# Forward and update the network
# Note that the 1st and 2nd item of outputs from forward() should be
# loss and logits. The others would change depending on the network
tm.reset()
lr = utils.adjust_lr(ii+1, iter_per_epoch, config["optimize"], min_lr)
outputs = net.forward_update(batch, lr)
run_duration = tm.get_duration()
# Compute status for current batch: loss, evaluation scores, etc
net.compute_status(outputs[1], batch[0][-1])
# print learning status
if (ii+1) % config["misc"]["print_every"] == 0:
net.print_status(epoch+1, ii+1)
txt = "fetching for {:.3f}s, optimizing for {:.3f}s, lr = {:.5f}"
logger["train"].debug(txt.format( data_load_duration, run_duration, lr))
logger["train"].info("\n")
# visualize results
if (config["misc"]["vis_every"] > 0) \
and ((ii+1) % config["misc"]["vis_every"] == 0):
if config["misc"]["model_type"] == "ensemble":
net.save_results(sample_data, "iteration_{}".format(ii+1), mode="train")
ii += 1
tm.reset()
if config["misc"]["debug"]:
if ii % 100 == 0:
break
# epoch done
# save network every epoch
net.save_checkpoint(epoch+1)
# visualize results
net.save_results(sample_data, "epoch_{:03d}".format(epoch+1), mode="train")
# print status (metric) accumulated over each epoch
net.print_counters_info(epoch+1, logger_name="epoch", mode="Train")
# validate network
if (epoch+1) % config["evaluation"]["every_eval"] == 0:
cmf.evaluate(config, L["test"], net, epoch, logger_name="epoch", mode="Valid")
# curriculum learning
if (apply_cc_after >= 0) and ((epoch+1) == apply_cc_after):
net.apply_curriculum_learning()
# reset reference time to compute duration of loading data
tm.reset()
def main():
# get parameters from cmd
params = _get_argument_params()
global M, dataset
M = cmf.get_model(params["model_type"])
dataset = cmf.get_dataset(params["dataset"])
# loading configuration and setting environment
config = io_utils.load_yaml(params["config_path"])
config = M.override_config_from_params(config, params)
cmf.create_save_dirs(config["misc"])
# create loggers
global logger
logger = cmf.create_logger(config)
# train network
train(config)
if __name__ == "__main__":
main()
``` |
{
"source": "jonghyunharrylee/PCGA",
"score": 2
} |
#### File: examples/adh_Savannah/adh.py
```python
import os
import numpy as np
from shutil import copy2, rmtree
from time import time
from multiprocessing import Pool
import setup_savannah
'''
three operations
1. write inputs
2. run simul
3. read input
'''
class Model:
def __init__(self,params = None):
self.idx = 0
self.homedir = os.path.abspath('./')
self.deletedir = True
from psutil import cpu_count # physcial cpu counts
self.ncores = cpu_count(logical=False)
self.ntsim = 1
##instantiate the class that describes the forward problem geometry, boundary conditions, initial conditions
# inflow discharge and free surface elevation at the boundary
self.Q_b = 6873.5
self.z_f = 97.14
if params is not None:
if 'deletedir' in params:
self.deletedir = params['deletedir']
if 'homedir' in params:
self.homedir = params['homedir']
if 'ncores' in params:
self.ncores = params['ncores']
self.adh_version = params['adh_version']
self.adh_exec = params['adh_exec']
self.pre_adh_exec = params['pre_adh_exec']
self.adh_grid = params['adh_grid']
self.adh_rect = params['adh_rect']
self.adh_mesh = params['adh_mesh']
self.adh_bc = params['adh_bc']
if 'adh_ntsim' in params: self.ntsim = params['adh_ntsim']
# inflow discharge and free surface elevation at the boundary
# needed for writing initial condtions potentailly
if 'z_f' in params: self.z_f = params['z_f']
if 'Q_b' in params: self.Q_b = params['Q_b']
self.velocity_obs_file = params['velocity_obs_file']
self.elevation_obs_file = params['elevation_obs_file']
self.true_soln_file_h5 = None if 'true_soln_file_h5' not in params else params['true_soln_file_h5']
self.true_soln_meshbase = None if 'true_soln_meshbase' not in params else params['true_soln_meshbase']
self.sim_dir = './simul' if 'sim_dir' not in params else params['sim_dir']
def create_dir(self,idx=None):
if idx is None:
idx = self.idx
mydir = os.path.join(self.sim_dir,"simul{0:04d}".format(idx))
mydir = os.path.abspath(os.path.join(self.homedir, mydir))
if not os.path.exists(mydir):
os.makedirs(mydir)
#sim_prefix= "./sim_files/savannah_gridgen_new_nx501_ny41" #basename of adh mesh and files for simulation
#sim_prefix = './sim_files/savannah_gridgen_new_nx501_ny41.bc'
#if self.adh_version < 5.:
sim_prefix = os.path.abspath(mydir + "/savannah_gridgen_new_nx501_ny41")
#else:
# sim_prefix = os.path.abspath(mydir + "/savannah_gridgen_new_nx501_ny41")
copy2(self.adh_mesh, sim_prefix + '.3dm')
copy2(self.adh_bc, sim_prefix + '.bc')
return mydir, sim_prefix
def run_model(self,bathy,idx=0):
'''run adh
'''
sim_dir, sim_prefix = self.create_dir(idx)
#print(sim_dir)
##instantiate the inverse problem which controls the forward model simulation
forward_prob = setup_savannah.SavannahRiver(grid_file=self.adh_grid,
rect_file=self.adh_rect,
initial_free_surface_elevation=self.z_f)
# mesh_file=self.adh_mesh,
##write out the base mesh, input file, and initial condition file
forward_prob.writeMesh(sim_prefix)
forward_prob.writeBCFile(sim_prefix)
forward_prob.writeHotFile(sim_prefix)
##get the measurement locations
velocity_obs_loc = np.loadtxt(self.velocity_obs_file)
elev_obs_loc = np.loadtxt(self.elevation_obs_file)
##instantiate the inverse problem which controls the forward model simulation
prm = setup_savannah.SavannahRiverProblem(forward_prob.mesh,
forward_prob,
velocity_obs_loc,
elev_obs_loc,
ntsim=self.ntsim,
sim_prefix=sim_prefix,
debug_rigid_lid=False,
pre_adh_path=self.pre_adh_exec,
adh_path=self.adh_exec,
true_soln_file_h5=self.true_soln_file_h5,
true_soln_meshbase=self.true_soln_meshbase,
Q_b=self.Q_b,
z_f=self.z_f)
# AdH_version=self.adh_version,
t0 = 0.
x_true = prm.get_true_solution(t0)
# measurment matrix
H_meas = prm.get_measurement_matrix(t0)
x_dummy = x_true.copy()
#z_in = x_true[:prm.nn]
bathy = bathy.reshape(-1)
x_dummy[:prm.nn] = bathy
x_dummy[prm.nn:] = prm.compute_velocity(bathy, t0)
if self.deletedir:
rmtree(sim_dir, ignore_errors=True)
return H_meas.dot(x_dummy)
def run(self,bathy,par,ncores=None):
if ncores is None:
ncores = self.ncores
method_args = range(bathy.shape[1])
args_map = [(bathy[:, arg:arg + 1], arg) for arg in method_args]
if par:
pool = Pool(processes=ncores)
simul_obs = pool.map(self, args_map)
else:
simul_obs =[]
for item in args_map:
simul_obs.append(self(item))
return np.array(simul_obs).T
#pool.close()
#pool.join()
def __call__(self,args):
return self.run_model(args[0],args[1])
#return args[0](args[1], args[2])
#return self.run_model(self,bathy,idx)
#def run_in_parallel(self,args):
# return args[0].run_model(args[1], args[2])
if __name__ == '__main__':
import adh
import numpy as np
from time import time
params = {'sim_dir':'./simul',
'adh_exec':'./bin/v4/adh',
'pre_adh_exec':'./bin/v4/pre_adh',
'adh_version':4.5,
'adh_grid':'./mesh_files/grid_savannah_river_nx501_ny41',
'adh_rect':'./mesh_files/rect_savannah_river_nx501_ny41',
'adh_mesh':'./sim_files/savannah_gridgen_new_nx501_ny41.3dm',
'adh_bc':'./sim_files/savannah_gridgen_new_nx501_ny41.bc',
'velocity_obs_file':'./observation_files/observation_loc_drogue12345_50ft.dat',
'elevation_obs_file':'./observation_files/observation_loc_none.dat',
'true_soln_file_h5':'./true_files/savannah_gridgen_true_nx501_ny41_p0.h5',
'true_soln_meshbase':'./true_files/savannah_gridgen_true_nx501_ny41'
}
bathy = np.loadtxt("true.txt")
bathy = np.array(bathy).reshape(-1, 1)
par = False # parallelization false
mymodel = adh.Model(params)
print('1) single run')
#simul_obs = mymodel.run(bathy,False)
#simul_obs = mymodel.run_model(bathy)
ncores = 2
nrelzs = 2
print('2) parallel run with ncores = %d' % ncores)
par = True # parallelization false
bathyrelz = np.zeros((np.size(bathy,0),nrelzs),'d')
for i in range(nrelzs):
bathyrelz[:,i:i+1] = bathy + 0.1*np.random.randn(np.size(bathy,0),1)
simul_obs_all = mymodel.run(bathyrelz,True,ncores)
#
#simul_obs_all = pool.map(run_in_parallel, args_map)
#pool.close()
#pool.join()
#simul_obs_all = mymodel.run(bathyrelz,par,ncores = ncores)
#simul_obs = run_in_parallel(args_map[0])
#print(simul_obs_all)
# use all the physcal cores if not specify ncores
#print('3) parallel run with all the physical cores')
#simul_obs_all = mymodel.run(bathyrelz,par)
#print(simul_obs_all)
```
#### File: examples/modflow_flopy/example_inv_mf.py
```python
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
from pyPCGA import PCGA
import mf
import math
import datetime as dt
import os
import sys
# model domain and discretization
Lx = 1000.; Ly = 750.; Lz = 1; nlay = 1; nrow = 75; ncol = 100
Q = 25.; Rch = 0.001
ztop = 0.; zbot = -1.
# seems confusing considering flopy notation, remember python array ordering of col, row and lay
N = np.array([ncol, nrow, nlay])
m = np.prod(N)
dx = np.array([10., 10., 1.])
xmin = np.array([0. + dx[0] / 2., 0. + dx[1] / 2., 0. + dx[2] / 2.])
xmax = np.array([Lx - dx[0] / 2., Ly - dx[1] / 2., Lz - dx[2] / 2.])
# parameters
if os.name == 'nt':
mf_exec = 'mf2005.exe'
elif sys.platform == 'darwin':
mf_exec = 'mf2005_mac'
else:
mf_exec = 'mf2005'
input_dir = "./input_files"
sim_dir = './simul'
# location of observations
obs_locmat = np.zeros((nlay, nrow, ncol), np.bool)
for i in range(5, 71, 16):
for j in range(9, 96, 16):
obs_locmat[0, i, j] = 1
# Hydraulic tomography - crosswell pumping test setting
Q_locs_idx = np.where(obs_locmat == True)
Q_locs = []
for Q_loc in zip(Q_locs_idx[0], Q_locs_idx[1], Q_locs_idx[2]):
Q_locs.append(Q_loc)
# covairance kernel and scale parameters
prior_std = 1.0
prior_cov_scale = np.array([200., 200., 1.])
def kernel(r): return (prior_std ** 2) * np.exp(-r)
# for plotting
x = np.linspace(0. + dx[0] / 2., Lx - dx[0] / 2., N[0])
y = np.linspace(0. + dx[1] / 2., Ly - dx[1] / 2., N[1])
XX, YY = np.meshgrid(x, y)
pts = np.hstack((XX.ravel()[:, np.newaxis], YY.ravel()[:, np.newaxis]))
# load true value for comparison purpose
s_true = np.loadtxt('true_logK.txt')
s_true = np.array(s_true).reshape(-1, 1) # make it 2D array
obs = np.loadtxt('obs.txt')
mf_params = {'mf_exec': mf_exec, 'input_dir': input_dir,
'sim_dir': sim_dir,
'Lx': Lx, 'Ly': Ly,
'Q': Q, 'Rch': Rch,
'nlay': nlay, 'nrow': nrow, 'ncol': ncol,
'zbot': zbot, 'ztop': ztop,
'obs_locmat': obs_locmat, 'Q_locs': Q_locs}
# prepare interface to run as a function
def forward_model(s, parallelization, ncores=None):
model = mf.Model(mf_params)
if parallelization:
simul_obs = model.run(s, parallelization, ncores)
else:
simul_obs = model.run(s, parallelization)
return simul_obs
params = {'R': (0.5) ** 2, 'n_pc': 50,
'maxiter': 10, 'restol': 0.01,
'matvec': 'FFT', 'xmin': xmin, 'xmax': xmax, 'N': N,
'prior_std': prior_std, 'prior_cov_scale': prior_cov_scale,
'kernel': kernel, 'post_cov': "diag",
'precond': True, 'LM': True, #'LM_smin' : 1.0, 'LM_smax' : 4.0,
'parallel': True, 'linesearch': True,
'forward_model_verbose': False, 'verbose': False,
'iter_save': True}
# params['objeval'] = False, if true, it will compute accurate objective function
# params['ncores'] = 36, with parallell True, it will determine maximum physcial core unless specified
s_init = np.ones((m, 1))
# s_init = np.copy(s_true) # you can try with s_true!
# initialize
prob = PCGA(forward_model, s_init, pts, params, s_true, obs)
# prob = PCGA(forward_model, s_init, pts, params, s_true, obs, X = X) #if you want to add your own drift X
# run inversion
s_hat, simul_obs, post_diagv, iter_best = prob.Run()
# plotting results
s_hat3d = s_hat.reshape(nlay, nrow, ncol)
s_hat2d = s_hat3d[0,:,:]
s_true3d = s_true.reshape(nlay, nrow, ncol)
s_true2d = s_true3d[0,:,:]
post_diagv[post_diagv < 0.] = 0. # just in case
post_std = np.sqrt(post_diagv)
post_std3d = post_std.reshape(nlay, nrow, ncol)
post_std2d = post_std3d[0,:,:]
minv = s_true.min()
maxv = s_true.max()
# best result
fig, axes = plt.subplots(1, 2, figsize=(15, 5))
plt.suptitle('prior var.: (%g)^2, n_pc : %d' % (prior_std, params['n_pc']))
im = axes[0].pcolormesh(XX,YY,s_true2d, vmin=minv, vmax=maxv, cmap=plt.get_cmap('jet'))
axes[0].set_title('(a) True', loc='left')
axes[0].set_aspect('equal')
axes[0].set_xlabel('x (m)')
axes[0].set_ylabel('y (m)')
axes[0].axis([XX.min(), XX.max(), YY.min(), YY.max()])
axes[1].pcolormesh(XX, YY, s_hat2d, vmin=minv, vmax=maxv, cmap=plt.get_cmap('jet'))
axes[1].set_title('(b) Estimate', loc='left')
axes[1].set_xlabel('x (m)')
axes[1].set_aspect('equal')
axes[1].axis([XX.min(), XX.max(), YY.min(), YY.max()])
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar(im, cax=cbar_ax)
fig.savefig('best.png')
plt.close(fig)
# uncertainty
fig = plt.figure()
im = plt.pcolormesh(XX,YY,post_std2d, cmap=plt.get_cmap('jet'))
plt.axis([XX.min(), XX.max(), YY.min(), YY.max()])
plt.title('Uncertainty (std)', loc='left')
plt.xlabel('x (m)')
plt.ylabel('y (m)')
plt.gca().set_aspect('equal', adjustable='box')
fig.colorbar(im)
fig.savefig('std.png')
plt.close(fig)
# observation mismatch
nobs = prob.obs.shape[0]
fig = plt.figure()
plt.title('obs. vs simul.')
plt.plot(prob.obs, simul_obs, '.')
plt.xlabel('observation')
plt.ylabel('simulation')
minobs = np.vstack((prob.obs, simul_obs)).min(0)
maxobs = np.vstack((prob.obs, simul_obs)).max(0)
plt.plot(np.linspace(minobs, maxobs, 20), np.linspace(minobs, maxobs, 20), 'k-')
plt.axis('equal')
axes = plt.gca()
axes.set_xlim([math.floor(minobs), math.ceil(maxobs)])
axes.set_ylim([math.floor(minobs), math.ceil(maxobs)])
fig.savefig('obs.png', dpi=fig.dpi)
# plt.show()
plt.close(fig)
# objective values
fig = plt.figure()
plt.semilogy(range(len(prob.objvals)), prob.objvals, 'r-')
plt.title('obj values over iterations')
plt.axis('tight')
fig.savefig('obj.png', dpi=fig.dpi)
plt.close(fig)
fig, axes = plt.subplots(4, 4, sharex=True, sharey=True)
fig.suptitle('n_pc : %d' % params['n_pc'])
for i in range(4):
for j in range(4):
tmp3d = prob.priorU[:, (i * 4 + j) * 2].reshape(nlay,nrow,ncol)
axes[i, j].pcolormesh(XX,YY,tmp3d[0,:,:])
axes[i, j].set_title('%d-th eigv' % ((i * 4 + j) * 2))
axes[i, j].axis([XX.min(), XX.max(), YY.min(), YY.max()])
fig.savefig('eigv.png', dpi=fig.dpi)
plt.close(fig)
fig = plt.figure()
plt.semilogy(prob.priord, 'o')
fig.savefig('eig.png', dpi=fig.dpi)
# plt.show()
plt.close(fig)
```
#### File: examples/stwave_duck/example_inv_stwave.py
```python
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
import stwave as st
from pyPCGA import PCGA
import math
import datetime as dt
# model domain and discretization
N = np.array([110,83])
m = np.prod(N)
dx = np.array([5.,5.])
xmin = np.array([0. + dx[0]/2., 0. + dx[1]/2.])
xmax = np.array([110.*5. - dx[0]/2., 83.*5. - dx[1]/2.])
# covairance kernel and scale parameters
# following Hojat's paper
prior_std = 1.5
prior_cov_scale = np.array([18.*5., 18.*5.])
def kernel(r): return (prior_std**2)*np.exp(-r**2)
# for plotting
x = np.linspace(0. + dx[0]/2., 110*5 - dx[0]/2., N[0])
y = np.linspace(0. + dx[1]/2., 83*5 - dx[0]/2., N[1])
XX, YY = np.meshgrid(x, y)
pts = np.hstack((XX.ravel()[:,np.newaxis], YY.ravel()[:,np.newaxis]))
s_true = np.loadtxt('true_depth.txt')
obs = np.loadtxt('obs.txt')
# 1st-order polynomial (linear trend)
#X = np.zeros((m,2),'d')
#X[:,0] = 1/np.sqrt(m)
#X[:,1] = pts[:,0]/np.linalg.norm(pts[:,0])
# 2nd-order polynomial
#X = np.zeros((m,3),'d')
#X[:,0] = 1/np.sqrt(m)
#X[:,1] = pts[:,0]/np.linalg.norm(pts[:,0])
#X[:,2] = pts[:,0]**2/np.linalg.norm(pts[:,0]**2)
# sqrt(x) + c
#X = np.zeros((m,2),'d')
#X[:,0] = 1/np.sqrt(m)
#X[:,1] = np.sqrt(110.*5. - pts[:,0])/np.linalg.norm(np.sqrt(110.*5. - pts[:,0]))
nx = 110
ny = 83
Lx = 550
Ly = 415
x0, y0 = (62.0, 568.0)
t1 = dt.datetime(2015, 10, 07, 20, 00)
t2 = dt.datetime(2015, 10, 07, 21, 00)
stwave_params = {'nx': nx, 'ny': ny, 'Lx': Lx, 'Ly': Ly, 'x0': x0, 'y0': y0, 't1': t1, 't2': t2,
'offline_dataloc': "./input_files/8m-array_2015100718_2015100722.nc"}
# prepare interface to run as a function
def forward_model(s,parallelization,ncores = None):
model = st.Model(stwave_params)
if parallelization:
simul_obs = model.run(s,parallelization,ncores)
else:
simul_obs = model.run(s,parallelization)
return simul_obs
params = {'R':(0.1)**2, 'n_pc':50,
'maxiter':10, 'restol':0.01,
'matvec':'FFT','xmin':xmin, 'xmax':xmax, 'N':N,
'prior_std':prior_std,'prior_cov_scale':prior_cov_scale,
'kernel':kernel, 'post_cov':"diag",
'precond':True, 'LM': True,
'parallel':True, 'linesearch' : True,
'forward_model_verbose': False, 'verbose': False,
'iter_save': True}
#params['objeval'] = False, if true, it will compute accurate objective function
#params['ncores'] = 36, with parallell True, it will determine maximum physcial core unless specified
s_init = np.mean(s_true)*np.ones((m,1))
#s_init = np.copy(s_true) # you can try with s_true!
# initialize
prob = PCGA(forward_model, s_init, pts, params, s_true, obs)
#prob = PCGA(forward_model, s_init, pts, params, s_true, obs, X = X) #if you want to add your own drift X
# run inversion
s_hat, simul_obs, post_diagv, iter_best = prob.Run()
s_hat2d = s_hat.reshape(N[1],N[0])
s_true2d = s_true.reshape(N[1],N[0])
post_diagv[post_diagv <0.] = 0. # just in case
post_std = np.sqrt(post_diagv)
post_std2d = post_std.reshape(N[1],N[0])
minv = s_true.min()
maxv = s_true.max()
fig, axes = plt.subplots(1,2, figsize=(15,5))
plt.suptitle('prior var.: (%g)^2, n_pc : %d' % (prior_std, params['n_pc']))
im = axes[0].imshow(np.flipud(np.fliplr(-s_true2d)), extent=[0, 110, 0, 83], vmin=-7., vmax=0.,
cmap=plt.get_cmap('jet'))
axes[0].set_title('(a) True', loc='left')
axes[0].set_aspect('equal')
axes[0].set_xlabel('Offshore distance (px)')
axes[0].set_ylabel('Alongshore distance (px)')
axes[1].imshow(np.flipud(np.fliplr(-s_hat2d)), extent=[0, 110, 0, 83], vmin=-7., vmax=0., cmap=plt.get_cmap('jet'))
axes[1].set_title('(b) Estimate', loc='left')
axes[1].set_xlabel('Offshore distance (px)')
axes[1].set_aspect('equal')
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar(im, cax=cbar_ax)
fig.savefig('best.png')
plt.close(fig)
fig = plt.figure()
im = plt.imshow(np.flipud(np.fliplr(post_std2d)), extent=[0, 110, 0, 83], cmap=plt.get_cmap('jet'))
plt.title('Uncertainty (std)', loc='left')
plt.xlabel('Offshore distance (px)')
plt.ylabel('Alongshore distance (px)')
plt.gca().set_aspect('equal', adjustable='box')
fig.colorbar(im)
fig.savefig('std.png')
plt.close(fig)
# estimated deterministic trend
# Xbeta = np.dot(prob.X,prob.beta_best)
# Xbeta2d = Xbeta.reshape(N[1],N[0])
fig, axes = plt.subplots(1, 2)
fig.suptitle('transect with prior var.: (%g)^2, n_pc : %d, lx = %f m, ly = %f m' % (
prior_std, params['n_pc'], prior_cov_scale[0], prior_cov_scale[1]))
linex = np.arange(1, 111) * 5.0
line1_true = s_true2d[83 - 25 + 1, :]
line1 = s_hat2d[83 - 25 + 1, :]
line1_u = s_hat2d[83 - 25 + 1, :] + 1.96 * post_std2d[83 - 25 + 1, :]
line1_l = s_hat2d[83 - 25 + 1, :] - 1.96 * post_std2d[83 - 25 + 1, :]
# line1_X = Xbeta2d[83-25+1,:]
line2_true = s_true2d[83 - 45 + 1, :]
line2 = s_hat2d[83 - 45 + 1, :]
line2_u = s_hat2d[83 - 45 + 1, :] + 1.96 * post_std2d[83 - 45 + 1, :]
line2_l = s_hat2d[83 - 45 + 1, :] - 1.96 * post_std2d[83 - 45 + 1, :]
# line2_X = Xbeta2d[83-45+1,:]
axes[0].plot(linex, np.flipud(-line1_true), 'r-', label='True')
axes[0].plot(linex, np.flipud(-line1), 'k-', label='Estimated')
axes[0].plot(linex, np.flipud(-line1_u), 'k--', label='95% credible interval')
axes[0].plot(linex, np.flipud(-line1_l), 'k--')
# axes[0].plot(linex, np.flipud(-line1_X),'b--', label='Drift/Trend')
axes[0].set_title('(a) 125 m', loc='left')
# axes[0].set_title('(a) 25 px', loc='left')
handles, labels = axes[0].get_legend_handles_labels()
axes[0].legend(handles, labels)
axes[1].plot(linex, np.flipud(-line2_true), 'r-', label='True')
axes[1].plot(linex, np.flipud(-line2), 'k-', label='Estimated')
axes[1].plot(linex, np.flipud(-line2_u), 'k--', label='95% credible interval')
axes[1].plot(linex, np.flipud(-line2_l), 'k--')
# axes[1].plot(linex, np.flipud(-line2_X),'b--', label='Drift/Trend')
axes[1].set_title('(b) 225 m', loc='left')
# axes[1].set_title('(b) 45 px', loc='left')
handles, labels = axes[1].get_legend_handles_labels()
axes[1].legend(handles, labels)
fig.savefig('transect.png')
plt.close(fig)
nobs = prob.obs.shape[0]
fig = plt.figure()
plt.title('obs. vs simul.')
plt.plot(prob.obs, simul_obs, '.')
plt.xlabel('observation')
plt.ylabel('simulation')
minobs = np.vstack((prob.obs, simul_obs)).min(0)
maxobs = np.vstack((prob.obs, simul_obs)).max(0)
plt.plot(np.linspace(minobs, maxobs, 20), np.linspace(minobs, maxobs, 20), 'k-')
plt.axis('equal')
axes = plt.gca()
axes.set_xlim([math.floor(minobs), math.ceil(maxobs)])
axes.set_ylim([math.floor(minobs), math.ceil(maxobs)])
fig.savefig('obs.png', dpi=fig.dpi)
# plt.show()
plt.close(fig)
fig = plt.figure()
plt.semilogy(range(len(prob.objvals)), prob.objvals, 'r-')
plt.title('obj values over iterations')
plt.axis('tight')
fig.savefig('obj.png', dpi=fig.dpi)
plt.close(fig)
fig, axes = plt.subplots(4, 4, sharex=True, sharey=True)
fig.suptitle('n_pc : %d' % params['n_pc'])
for i in range(4):
for j in range(4):
axes[i, j].imshow(prob.priorU[:, (i * 4 + j) * 2].reshape(N[1], N[0]), extent=[0, 110, 0, 83])
axes[i, j].set_title('%d-th eigv' % ((i * 4 + j) * 2))
fig.savefig('eigv.png', dpi=fig.dpi)
plt.close(fig)
fig = plt.figure()
plt.semilogy(prob.priord, 'o')
fig.savefig('eig.png', dpi=fig.dpi)
# plt.show()
plt.close(fig)
```
#### File: examples/tracer_tomography_ade_crunch/crunch.py
```python
import datetime as dt
import os
import sys
from multiprocessing import Pool
import numpy as np
from shutil import copy2, rmtree
import subprocess
#from subprocess import call
from time import time
from IPython.core.debugger import Tracer; debug_here = Tracer()
'''
three operations
1. write inputs
2. run simul
3. read input
'''
class Model:
def __init__(self,params = None):
self.idx = 0
self.homedir = os.path.abspath('./')
self.inputdir = os.path.abspath(os.path.join(self.homedir,"./input_files"))
self.deletedir = True
self.outputdir = None
self.parallel = False
self.record_cobs = False
from psutil import cpu_count # physcial cpu counts
self.ncores = cpu_count(logical=False)
if params is not None:
if 'deletedir' in params:
self.deletedir = params['deletedir']
if 'homedir' in params:
self.homedir = params['homedir']
self.inputdir = os.path.abspath(os.path.join(self.homedir,"./input_files"))
if 'inputdir' in params:
self.inputdir = params['inputdir']
if 'ncores' in params:
self.ncores = params['ncores']
if 'outputdir' in params:
# note that outputdir is not used for now; pyPCGA forces outputdir in ./simul/simul0000
self.outputdir = params['outputdir']
if 'parallel' in params:
self.parallel = params['parallel']
if 'nx' in params:
self.nx = params['nx']
else:
raise NameError('nx is not defined')
if 'ny' in params:
self.ny = params['ny']
else:
raise NameError('ny is not defined')
if 't' in params:
self.t = params['t']
else:
raise NameError('t is not defined')
if 'record_cobs' in params:
self.record_cobs = True
def create_dir(self,idx=None):
mydirbase = "./simul/simul"
if idx is None:
idx = self.idx
mydir = mydirbase + "{0:04d}".format(idx)
mydir = os.path.abspath(os.path.join(self.homedir, mydir))
if not os.path.exists(mydir):
os.makedirs(mydir)
for filename in os.listdir(self.inputdir):
copy2(os.path.join(self.inputdir,filename),mydir)
return mydir
def cleanup(self,outputdir=None):
"""
Removes outputdir if specified. Otherwise removes all output files
in the current working directory.
"""
import shutil
import glob
log = "dummy.log"
if os.path.exists(log):
os.remove(log)
if outputdir is not None and outputdir != os.getcwd():
if os.path.exists(outputdir):
shutil.rmtree(outputdir)
else:
filelist = glob.glob("*.out")
filelist += glob.glob("*.sim")
for file in filelist:
os.remove(file)
def run_model(self,s,idx=0):
sim_dir = self.create_dir(idx)
os.chdir(sim_dir)
nx, ny = self.nx, self.ny
m = nx*ny
t = self.t
nt = t.shape[0]
perm2d = np.exp(s).reshape(ny,nx)
# perm.x
perm2dx = np.zeros((ny,nx+2),'d')
perm2dx[:,1:-1] = perm2d
perm2dx[:,0] = perm2dx[:,1]
perm2dx[:,-1] = perm2dx[:,-2]
np.savetxt("PermField.x",perm2dx.reshape(ny*(nx+2),),fmt='%10.4E')
perm2dy = np.zeros((ny+2,nx),'d')
perm2dy[1:-1,:] = perm2d
perm2dx[0,:] = perm2dx[1,:]
perm2dx[-1,:] = perm2dx[-2,:]
np.savetxt("PermField.y",perm2dy.reshape((ny+2)*nx,),fmt='%10.4E')
subprocess.call(["./CrunchTope","2DCr.in"], stdout=subprocess.PIPE)
# read results
simul_cobs = np.zeros((m,nt),'d')
simul_obs = np.zeros((m,),'d')
for it in range(nt):
tmp = np.loadtxt('totcon%d.tec' % (it+1),skiprows=3)
simul_cobs[:,it] = tmp[:,3]
if self.record_cobs:
self.simul_cobs = simul_cobs
for it in range(m):
simul_obs[it] = np.trapz(t*simul_cobs[it,:],x=t)/np.trapz(simul_cobs[it,:],x=t)
os.chdir(self.homedir)
if self.deletedir:
rmtree(sim_dir, ignore_errors=True)
# self.cleanup(sim_dir)
return simul_obs
def run(self,s,par,ncores=None):
if ncores is None:
ncores = self.ncores
method_args = range(s.shape[1])
args_map = [(s[:, arg:arg + 1], arg) for arg in method_args]
if par:
pool = Pool(processes=ncores)
simul_obs = pool.map(self, args_map)
else:
simul_obs =[]
for item in args_map:
simul_obs.append(self(item))
return np.array(simul_obs).T
#pool.close()
#pool.join()
def __call__(self,args):
return self.run_model(args[0],args[1])
if __name__ == '__main__':
import crunch
import numpy as np
from time import time
s = np.loadtxt("true.txt")
s = s.reshape(-1, 1)
nx = 50
ny = 50
m = nx*ny
t = np.array([1.1574E-05, 0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7,\
0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0])
params = {'nx':nx,'ny':ny, 't': t, 'deletedir':False, 'record_cobs':True}
#s = -30.*np.ones((nx*ny,1),'d')
#s = s.reshape(-1, 1)
par = False # parallelization false
mymodel = crunch.Model(params)
print('(1) single run')
from time import time
stime = time()
simul_obs = mymodel.run(s,par)
print('simulation run: %f sec' % (time() - stime))
#obs = simul_obs + 0.01*np.random.randn(m,1)
#obs[obs < 0] = 0
#np.savetxt('obs.txt',obs)
#np.savetxt('cobs.txt',mymodel.simul_cobs)
#mymodel.simul_cobs = mymodel.simul_cobs
#for it in range(nx*ny):
# simul_obs[it] = np.trapz(t*mymodel.simul_cobs[it,:],x=t)/np.trapz(mymodel.simul_cobs[it,:],x=t)
#savemat('simul.mat',{'simul_obs':simul_obs})
#import sys
#sys.exit(0)
ncores = 2
nrelzs = 2
print('(2) parallel run with ncores = %d' % ncores)
par = True # parallelization false
srelz = np.zeros((np.size(s,0),nrelzs),'d')
for i in range(nrelzs):
srelz[:,i:i+1] = s + 0.1*np.random.randn(np.size(s,0),1)
simul_obs_all = mymodel.run(srelz,par,ncores = ncores)
print(simul_obs_all)
# use all the physcal cores if not specify ncores
#print('(3) parallel run with all the physical cores')
#simul_obs_all = mymodel.run(srelz,par)
#print(simul_obs_all)
``` |
{
"source": "Jonghyun-Kim-73/ERC_Project",
"score": 2
} |
#### File: CNS_Platform/AI/AI_EM_SAC.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal, Categorical
LOG_SIG_MAX = 2
LOG_SIG_MIN = -20
epsilon = 1e-6
# Initialize Policy weights
def weights_init_(m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight, gain=1)
torch.nn.init.constant_(m.bias, 0)
class ValueNetwork(nn.Module):
def __init__(self, num_inputs, hidden_dim):
super(ValueNetwork, self).__init__()
self.linear1 = nn.Linear(num_inputs, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.linear3 = nn.Linear(hidden_dim, 1)
self.apply(weights_init_)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
x = self.linear3(x)
return x
class QNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_dim, discrete=False):
super(QNetwork, self).__init__()
self.discrete = discrete
# Q1 architecture
if self.discrete:
self.linear1 = nn.Linear(num_inputs, hidden_dim)
else:
self.linear1 = nn.Linear(num_inputs + num_actions, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.linear3 = nn.Linear(hidden_dim, 1)
# Q2 architecture
if self.discrete:
self.linear4 = nn.Linear(num_inputs, hidden_dim)
else:
self.linear4 = nn.Linear(num_inputs + num_actions, hidden_dim)
self.linear5 = nn.Linear(hidden_dim, hidden_dim)
self.linear6 = nn.Linear(hidden_dim, 1)
self.apply(weights_init_)
def forward(self, state, action):
if self.discrete:
xu = state
else:
xu = torch.cat([state, action], 1)
x1 = F.relu(self.linear1(xu))
x1 = F.relu(self.linear2(x1))
x1 = self.linear3(x1)
x2 = F.relu(self.linear4(xu))
x2 = F.relu(self.linear5(x2))
x2 = self.linear6(x2)
return x1, x2
class GaussianPolicy(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_dim, discrete=False, action_space=None):
super(GaussianPolicy, self).__init__()
self.linear1 = nn.Linear(num_inputs, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.discrete_linear = nn.Linear(hidden_dim, num_actions)
self.mean_linear = nn.Linear(hidden_dim, num_actions)
self.log_std_linear = nn.Linear(hidden_dim, num_actions)
self.apply(weights_init_)
# discrete action space
self.discrete = True if discrete else False
# action rescaling
if action_space is None:
self.action_scale = torch.tensor(1.)
self.action_bias = torch.tensor(0.)
else:
self.action_scale = torch.FloatTensor(
(action_space.high - action_space.low) / 2.)
self.action_bias = torch.FloatTensor(
(action_space.high + action_space.low) / 2.)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
if self.discrete:
x = self.discrete_linear(x)
return x
else:
mean = self.mean_linear(x)
log_std = self.log_std_linear(x)
log_std = torch.clamp(log_std, min=LOG_SIG_MIN, max=LOG_SIG_MAX)
return mean, log_std
def sample(self, state):
if self.discrete:
action_prob = self.forward(state)
action_prob = F.softmax(action_prob, dim=1)
action_distribution = Categorical(action_prob)
action = action_distribution.sample().view(-1, 1) # pi_theta(s_t)
# 0 값 방지
z = (action_prob == 0.0).float() * 1e-8
log_probs = torch.log(action_prob + z) # log(pi_theta(s_t))
"""
현재 액션 1개 Policy 2개 (up, down)
action : [[0]], torch.Tensor
action_prob : [[0.1, 0.9]], torch.Tensor
log_prob : [[0.1, 0.1]], torch.Tensor
"""
return action, action_prob, log_probs
else:
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
x_t = normal.rsample() # for reparameterization trick (mean + std * N(0,1))
y_t = torch.tanh(x_t)
action = y_t * self.action_scale + self.action_bias
log_prob = normal.log_prob(x_t)
# Enforcing Action Bound
log_prob -= torch.log(self.action_scale * (1 - y_t.pow(2)) + epsilon)
log_prob = log_prob.sum(1, keepdim=True)
mean = torch.tanh(mean) * self.action_scale + self.action_bias
"""
action : [[0.1, 0.1]], torch.Tensor
log_prob : [[0.1]], torch.Tensor
mean : [[0.1, 0.1]], torch.Tensor
"""
return action, log_prob, mean
class AIEMSAC:
def __init__(self, input_shape, output_shape, discrete_mode):
self.discrete_mode = discrete_mode
self.policy = GaussianPolicy(input_shape,
output_shape,
hidden_dim=256, discrete=self.discrete_mode)
def agent_select_action(self, state, evaluate=False):
state = torch.FloatTensor(state).unsqueeze(0)
if evaluate is False:
if self.discrete_mode:
_, action_probs, _ = self.policy.sample(state)
action = torch.argmax(action_probs, dim=1, keepdim=True)
else:
action, _, _ = self.policy.sample(state)
"""
action : tensor([[-0.4534, 0.1533]], grad_fn=<AddBackward0>) <class 'torch.Tensor'>
"""
else:
_, _, action = self.policy.sample(state)
return action.detach().cpu().numpy()[0] # [ ], numpy[0.80986434 0.7939146 ] <class 'numpy.ndarray'>
def agent_load_model(self, actor_path):
print('Loading models from {}'.format(actor_path))
if actor_path is not None:
self.policy.load_state_dict(torch.load(actor_path))
``` |
{
"source": "Jonghyun-Kim-73/SAMG_Project",
"score": 2
} |
#### File: Jonghyun-Kim-73/SAMG_Project/CNS_Platform_controller.py
```python
import sys
import multiprocessing
import time
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
#
import CNS_Platform_controller_interface as CNS_controller
from main_window import MainWindow
from TOOL.TOOL_etc import p_
from TOOL.TOOL_MatGP import Trend
class InterfaceFun(multiprocessing.Process):
def __init__(self, shmem):
multiprocessing.Process.__init__(self)
self.shmem = shmem
def run(self):
app = QApplication(sys.argv)
w = MyForm(self.shmem)
sys.exit(app.exec_())
class MyForm(QWidget):
def __init__(self, shmem):
super(MyForm, self).__init__()
# shmem
self.shmem = shmem
# ---- UI 호출
p_(__file__, f'[SHMem:{self.shmem}][Controller UI 호출]')
self.ui = CNS_controller.Ui_Form()
self.ui.setupUi(self)
# ----------- ADD !!! -------------- (20210419 for 효진)
self.setGeometry(0, 0, 269, 620)
self.auto_data_info_list = AutoDataList(parent=self)
self.auto_data_info_list.setGeometry(20, 380, 225, 100)
# ----------- Value 값 수정용 ----------------------------
self.call_val_change_editor = QPushButton('Change Val Editor', self)
self.call_val_change_editor.setGeometry(20, 540, 225, 30)
self.call_val_change_editor.clicked.connect(self.go_val_change)
# ----------- Trend 테스트 용 ----------------------------
self.call_trend_view = QPushButton('Call Trend View', self)
self.call_trend_view.setGeometry(20, 580, 225, 30)
self.call_trend_view.clicked.connect(self.go_trend_view)
# ---- UI 초기 세팅
self.ui.Cu_SP.setText(str(self.shmem.get_logic('Speed')))
self.ui.Se_SP.setText(str(self.shmem.get_logic('Speed')))
# ---- 초기함수 호출
# ---- 버튼 명령
self.ui.Run.clicked.connect(self.run_cns)
self.ui.Freeze.clicked.connect(self.freeze_cns)
self.ui.Go_mal.clicked.connect(self.go_mal)
self.ui.Initial.clicked.connect(self.go_init)
self.ui.Apply_Sp.clicked.connect(self.go_speed)
self.ui.Go_db.clicked.connect(self.go_save)
# self.setWindowFlags(self.windowFlags() ^ QtCore.Qt.WindowStaysOnTopHint)
self.show()
# Call
self.cns_main_win = MainWindow(parent=self)
self.cns_main_win.show()
def run_cns(self):
if self.shmem.get_logic('Initial_condition'):
p_(__file__, 'CNS 시작')
self.shmem.change_logic_val('Run', True)
else:
p_(__file__, '먼저 초기 조건을 선언')
def freeze_cns(self):
if self.shmem.get_logic('Initial_condition'):
p_(__file__, 'CNS 일시정지')
self.shmem.change_logic_val('Run', False)
else:
p_(__file__, '먼저 초기 조건을 선언')
def go_mal(self):
if self.ui.Mal_nub.text() != '' and self.ui.Mal_type.text() != '' and self.ui.Mal_time.text() != '':
# 1. 입력된 내용 List에 저장
self.ui.Mal_list.addItem('{}_{}_{}'.format(self.ui.Mal_nub.text(),
self.ui.Mal_type.text(),
self.ui.Mal_time.text()))
# 2. 입력된 내용 Trig mem에 저장
Mal_index = self.ui.Mal_list.count()
Mal_dict = {'Mal_nub': int(self.ui.Mal_nub.text()),
'Mal_opt': int(self.ui.Mal_type.text()),
'Mal_time': int(self.ui.Mal_time.text()) * 5,
'Mal_done': False}
self.shmem.change_mal_val(mal_index=Mal_index, mal_dict=Mal_dict)
# 3. 입력하는 레이블 Clear
self.ui.Mal_nub.clear()
self.ui.Mal_type.clear()
self.ui.Mal_time.clear()
p_(__file__, 'Malfunction 입력 완료')
else:
p_(__file__, 'Malfunction 입력 실패')
def go_init(self):
p_(__file__, 'CNS 초기 조건 선언')
# 1. Mal list clear
self.ui.Mal_list.clear()
# 2. Mal trig_mem clear
self.shmem.call_init(int(self.ui.Initial_list.currentIndex()) + 1)
# 3. Controller interface update
self.ui.Cu_SP.setText(str(self.shmem.get_logic('Speed')))
self.ui.Se_SP.setText(str(self.shmem.get_logic('Speed')))
# Main window 초기화
def go_save(self):
# 실시간 레코딩 중 ...
self.shmem.change_logic_val('Run_rc', True)
p_(__file__, 'Ester_Egg_Run_ROD CONTROL TRICK')
def go_speed(self):
p_(__file__, 'CNS 속도 조절')
self.ui.Cu_SP.setText(self.shmem.get_speed(int(self.ui.Se_SP.text())))
def go_val_change(self):
if not self.shmem.get_logic('Run'):
self.val_editor = ValEditor(self)
self.val_editor.show()
def go_trend_view(self):
self.TrendView = Trend(self,
w=500, h=500, para_name='Flow', para_id='KCNTOMS', para_range=[0, 300],
xtitle='Minimum Injection Flowrate (gpm)', ytitle='Time Since Reactor Shutdown (Hours)')
self.TrendView.setGeometry(100, 100, 300, 300)
self.TrendView.show()
def show_main_window(self):
# Controller와 동시 실행
pass
def closeEvent(self, QCloseEvent):
p_(__file__, 'Close')
self.shmem.send_close()
sys.exit()
# # 자동 데이터 수집하는 구간 # #
class AutoDataList(QListWidget):
def __init__(self, parent):
super(AutoDataList, self).__init__(parent=parent)
self.run_tirg = False
# Q Timer ------------------------------------------------------------------------------------------------------
timer = QTimer(self)
for _ in [self._check_list]:
timer.timeout.connect(_)
timer.start(1000)
def contextMenuEvent(self, event) -> None:
""" ChartArea 에 기능 올리기 """
menu = QMenu(self)
add_input1 = menu.addAction("Add input")
add_input2 = menu.addAction("Run")
add_input1.triggered.connect(self._add_input)
add_input2.triggered.connect(self._run_cns)
menu.exec_(event.globalPos())
def _add_input(self):
mal, ok = QInputDialog.getText(self, 'Input Man', 'Mal nub')
#for i in range(10, 21):
# self.addItem(f'12_{mal}{i}_10800')
self.addItem(f'{mal}')
# self.addItem(mal)
def _check_list(self):
if self.__len__() > 0 and self.run_tirg:
local_logic = self.parent().shmem.get_logic_info()
if local_logic['Run']:
pass
else:
get_first_row = self.item(0).text().split('_')
print(get_first_row, 'Start first line mal function')
self.parent().go_init()
time.sleep(5)
self.parent().ui.Mal_nub.setText(get_first_row[0])
self.parent().ui.Mal_type.setText(get_first_row[1])
self.parent().ui.Mal_time.setText(get_first_row[2])
self.parent().go_mal()
time.sleep(5)
self.parent().run_cns()
time.sleep(5)
self.takeItem(0)
else:
self.run_tirg = False
def _run_cns(self):
self.run_tirg = True
class ValEditor(QWidget):
def __init__(self, parent):
super(ValEditor, self).__init__()
self.shmem = parent.shmem
self.setGeometry(0, 0, 300, 50)
h_layer = QHBoxLayout()
v_layer = QVBoxLayout()
name_h_layer = QHBoxLayout()
self.name_label = QLabel('Name')
self.name_label.setFixedWidth(50)
self.name_text = QLineEdit('')
name_h_layer.addWidget(self.name_label)
name_h_layer.addWidget(self.name_text)
val_h_layer = QHBoxLayout()
self.val_label = QLabel('Val')
self.val_label.setFixedWidth(50)
self.val_text = QLineEdit('')
val_h_layer.addWidget(self.val_label)
val_h_layer.addWidget(self.val_text)
v_layer.addLayout(name_h_layer)
v_layer.addLayout(val_h_layer)
self.change_btn = QPushButton('Change')
self.change_btn.clicked.connect(self.change_val)
h_layer.addLayout(v_layer)
h_layer.addWidget(self.change_btn)
self.setLayout(h_layer)
def change_val(self):
if self.shmem.check_para(str(self.name_text.text())):
orgin = self.shmem.get_shmem_val(self.name_text.text())
try:
get_val = int(float(self.val_text.text())) if type(orgin) is int else float(self.val_text.text())
self.shmem.change_shmem_val(self.name_text.text(), get_val)
self.close()
except:
print('잘못된 파라메터 값 입력')
self.val_text.clear()
else:
print('잘못된 파라메터 이름 입력')
self.name_text.clear()
```
#### File: Jonghyun-Kim-73/SAMG_Project/ENVCNS.py
```python
import numpy as np
from TOOL.TOOL_CNS_UDP_FAST import CNS
from TOOL.TOOL_Cool import CoolingRATE
from TOOL import TOOL_PTCurve
import random
class CMem:
def __init__(self, mem):
self.m = mem # Line CNSmem -> getmem
self.CoolingRateSW = 0
self.CoolingRateFixTemp = 0
self.CoolingRateFixTime = 0
self.CoolingRATE = CoolingRATE()
self.StartRL = 0
self.update()
def update(self):
self.CTIME = self.m['KCNTOMS']['Val'] # CNS Time
# Physical
self.SG1Nar = self.m['ZINST78']['Val']
self.SG2Nar = self.m['ZINST77']['Val']
self.SG3Nar = self.m['ZINST76']['Val']
self.SG1Wid = self.m['ZINST72']['Val']
self.SG2Wid = self.m['ZINST71']['Val']
self.SG3Wid = self.m['ZINST70']['Val']
self.SG1Pres = self.m['ZINST75']['Val']
self.SG2Pres = self.m['ZINST74']['Val']
self.SG3Pres = self.m['ZINST73']['Val']
self.SG1Feed = self.m['WFWLN1']['Val']
self.SG2Feed = self.m['WFWLN2']['Val']
self.SG3Feed = self.m['WFWLN3']['Val']
self.Aux1Flow = self.m['WAFWS1']['Val']
self.Aux2Flow = self.m['WAFWS2']['Val']
self.Aux3Flow = self.m['WAFWS3']['Val']
self.SteamLine1 = self.m['BHV108']['Val']
self.SteamLine2 = self.m['BHV208']['Val']
self.SteamLine3 = self.m['BHV308']['Val']
self.AVGTemp = self.m['UAVLEG2']['Val']
self.PZRPres = self.m['ZINST65']['Val']
self.PZRLevel = self.m['ZINST63']['Val']
# Signal
self.Trip = self.m['KLAMPO9']['Val']
self.SIS = self.m['KLAMPO6']['Val']
self.MSI = self.m['KLAMPO3']['Val']
self.NetBRK = self.m['KLAMPO224']['Val']
# Comp
self.RCP1 = self.m['KLAMPO124']['Val']
self.RCP2 = self.m['KLAMPO125']['Val']
self.RCP3 = self.m['KLAMPO126']['Val']
self.TurningGear = self.m['KLAMPO165']['Val']
self.OilSys = self.m['KLAMPO164']['Val']
self.BHV311 = self.m['BHV311']['Val']
self.SteamDumpPos = self.m['ZINST98']['Val']
self.SteamDumpManAuto = self.m['KLAMPO150']['Val']
self.PMSS = self.m['PMSS']['Val']
# 강화학습을 위한 감시 변수
self.PZRSprayManAuto = self.m['KLAMPO119']['Val']
self.PZRSprayPos = self.m['ZINST66']['Val']
self.PZRSprayPosControl = self.m['BPRZSP']['Val']
self.PZRBackHeaterOnOff = self.m['KLAMPO118']['Val']
self.PZRProHeaterManAuto = self.m['KLAMPO117']['Val']
self.PZRProHeaterPos = self.m['QPRZH']['Val']
self.SIValve = self.m['BHV22']['Val']
self.ChargingManAUto = self.m['KLAMPO95']['Val']
self.ChargingValvePos = self.m['BFV122']['Val']
self.ChargingPump2State = self.m['KLAMPO70']['Val']
self.LetdownLV459Pos = self.m['BLV459']['Val']
self.LetdownHV1Pos = self.m['BHV1']['Val']
self.LetdownHV2Pos = self.m['BHV2']['Val']
self.LetdownHV3Pos = self.m['BHV3']['Val']
# Logic
if self.CTIME == 0:
self.CoolingRateSW = 0
self.CoolingRATE.reset_info()
self.StartRL = 0
if self.CoolingRateSW == 1: # 2.0] Cooling rage 계산 시작
self.CoolingRATE.save_info(self.AVGTemp, self.CTIME)
self.CoolingRateSW += 1 # 값 2로 바뀜으로써 이 로직은 1번만 동작함.
class ENVCNS(CNS):
def __init__(self, Name, IP, PORT, RIP, RPORT, Max_len):
super(ENVCNS, self).__init__(threrad_name=Name,
CNS_IP=IP, CNS_Port=PORT,
Remote_IP=RIP, Remote_Port=RPORT, Max_len=Max_len)
self.Name = Name # = id
self.ENVStep = 0
self.LoggerPath = 'DB'
self.want_tick = 300 # 1sec
self.Loger_txt = ''
self.CMem = CMem(self.mem)
self.input_info_EM = [
# (para, x_round, x_min, x_max), (x_min=0, x_max=0 is not normalized.)
('ZINST98', 1, 0, 100), # SteamDumpPos
('ZINST87', 1, 0, 50), # Steam Flow 1
('ZINST86', 1, 0, 50), # Steam Flow 2
('ZINST85', 1, 0, 50), # Steam Flow 3
('KLAMPO70', 1, 0, 1), # Charging Pump2 State
('BHV22', 1, 0, 1), # SI Valve State
('ZINST66', 1, 0, 25), # PZRSprayPos
('UAVLEG2', 1, 150, 320), # PTTemp
('ZINST65', 1, 0, 160), # PTPressure
('ZINST78', 1, 0, 70), # SG1Nar
('ZINST77', 1, 0, 70), # SG2Nar
('ZINST76', 1, 0, 70), # SG3Nar
('ZINST75', 1, 0, 80), # SG1Pres
('ZINST74', 1, 0, 80), # SG2Pres
('ZINST73', 1, 0, 80), # SG3Pres
('ZINST72', 1, 0, 100), # SG1Wid
('ZINST71', 1, 0, 100), # SG2Wid
('ZINST70', 1, 0, 100), # SG3Wid
('UUPPPL', 1, 100, 350), # CoreExitTemp
('WFWLN1', 1, 0, 25), # SG1Feed
('WFWLN2', 1, 0, 25), # SG2Feed
('WFWLN3', 1, 0, 25), # SG3Feed
('UCOLEG1', 1, 0, 100), # RCSColdLoop1
('UCOLEG2', 1, 0, 100), # RCSColdLoop2
('UCOLEG3', 1, 0, 100), # RCSColdLoop3
('ZINST65', 1, 0, 160), # RCSPressure
('ZINST63', 1, 0, 100), # PZRLevel
]
# --------------------------------------------------------------------------------------------------------------
def normalize(self, x, x_round, x_min, x_max):
if x_max == 0 and x_min == 0:
# It means X value is not normalized.
x = x / x_round
else:
x = x_max if x >= x_max else x
x = x_min if x <= x_min else x
x = (x - x_min) / (x_max - x_min)
return x
def get_state(self, input_info):
state = []
for para, x_round, x_min, x_max in input_info:
if para in self.mem.keys():
_ = self.mem[para]['Val']
else:
if para == 'DSetPoint':
_ = 0
else:
_ = None
# ------------------------------------------------------------------------------------------------------
if _ is None:
raise ValueError(f'{para} is not in self.input_info')
# ------------------------------------------------------------------------------------------------------
state.append(self.normalize(_, x_round, x_min, x_max))
return np.array(state), state
def _send_control_save(self, zipParaVal):
super(ENVCNS, self)._send_control_save(para=zipParaVal[0], val=zipParaVal[1])
def _send_act_EM_Module(self, A):
def a_log_f(s=''):
pass
ActOrderBook = {
'StopAllRCP': (['KSWO132', 'KSWO133', 'KSWO134'], [0, 0, 0]),
'StopRCP1': (['KSWO132'], [0]),
'StopRCP2': (['KSWO133'], [0]),
'StopRCP3': (['KSWO134'], [0]),
'NetBRKOpen': (['KSWO244'], [0]),
'OilSysOff': (['KSWO190'], [0]),
'TurningGearOff': (['KSWO191'], [0]),
'CutBHV311': (['BHV311', 'FKAFWPI'], [0, 0]),
'PZRSprayMan': (['KSWO128'], [1]), 'PZRSprayAuto': (['KSWO128'], [0]),
'PZRSprayClose': (['BPRZSP'], [self.mem['BPRZSP']['Val'] + 0.015 * -1]),
'PZRSprayOpen': (['BPRZSP'], [self.mem['BPRZSP']['Val'] + 0.015 * 1]),
'PZRBackHeaterOff': (['KSWO125'], [0]), 'PZRBackHeaterOn': (['KSWO125'], [1]),
'SteamDumpMan': (['KSWO176'], [1]), 'SteamDumpAuto': (['KSWO176'], [0]),
'IFLOGIC_SteamDumpUp': (['PMSS'], [self.CMem.PMSS + 2.0E5 * 3 * 0.2]),
'IFLOGIC_SteamDumpDown': (['PMSS'], [self.CMem.PMSS + 2.0E5 * (-3) * 0.2]),
'DecreaseAux1Flow': (['KSWO142', 'KSWO143'], [1, 0]),
'IncreaseAux1Flow': (['KSWO142', 'KSWO143'], [0, 1]),
'DecreaseAux2Flow': (['KSWO151', 'KSWO152'], [1, 0]),
'IncreaseAux2Flow': (['KSWO151', 'KSWO152'], [0, 1]),
'DecreaseAux3Flow': (['KSWO154', 'KSWO155'], [1, 0]),
'IncreaseAux3Flow': (['KSWO154', 'KSWO155'], [0, 1]),
'SteamLine1Open': (['KSWO148', 'KSWO149'], [1, 0]),
'SteamLine2Open': (['KSWO146', 'KSWO147'], [1, 0]),
'SteamLine3Open': (['KSWO144', 'KSWO145'], [1, 0]),
'ResetSI': (['KSWO7', 'KSWO5'], [1, 1]),
'PZRProHeaterMan': (['KSWO120'], [1]), 'PZRProHeaterAuto': (['KSWO120'], [0]),
'PZRProHeaterDown': (['KSWO121', 'KSWO122'], [1, 0]),
'PZRProHeaterUp': (['KSWO121', 'KSWO122'], [0, 1]),
'RL_IncreaseAux1Flow': (['WAFWS1'], [self.mem['WAFWS1']['Val'] + 0.04 * 1]),
'RL_DecreaseAux1Flow': (['WAFWS1'], [self.mem['WAFWS1']['Val'] + 0.04 * (-1)]),
'RL_IncreaseAux2Flow': (['WAFWS2'], [self.mem['WAFWS2']['Val'] + 0.04 * 1]),
'RL_DecreaseAux2Flow': (['WAFWS2'], [self.mem['WAFWS2']['Val'] + 0.04 * (-1)]),
'RL_IncreaseAux3Flow': (['WAFWS3'], [self.mem['WAFWS3']['Val'] + 0.04 * 1]),
'RL_DecreaseAux3Flow': (['WAFWS3'], [self.mem['WAFWS3']['Val'] + 0.04 * (-1)]),
'ChargingValveMan': (['KSWO100'], [1]), 'ChargingValveAUto': (['KSWO100'], [0]),
'ChargingValveDown': (['KSWO101', 'KSWO102'], [1, 0]),
'ChargingValveUp': (['KSWO101', 'KSWO102'], [0, 1]),
'LetdownLV459Open': (['KSWO114', 'KSWO113'], [1, 0]),
'LetdownLV459Close': (['KSWO114', 'KSWO113'], [0, 1]),
'LetdownHV1Open': (['KSWO104', 'KSWO103'], [1, 0]),
'LetdownHV1Close': (['KSWO104', 'KSWO103'], [0, 1]),
'LetdownHV2Open': (['KSWO106', 'KSWO105'], [1, 0]),
'LetdownHV2Close': (['KSWO106', 'KSWO105'], [0, 1]),
'LetdownHV3Open': (['KSWO108', 'KSWO107'], [1, 0]),
'LetdownHV3Close': (['KSWO108', 'KSWO107'], [0, 1]),
'RunRCP2': (['KSWO130', 'KSWO133'], [1, 1]),
'RunCHP2': (['KSWO70'], [1]), 'StopCHP2': (['KSWO70'], [0]),
'OpenSI': (['KSWO81', 'KSWO82'], [1, 0]), 'CloseSI': (['KSWO81', 'KSWO82'], [0, 1]),
}
AMod = A
print('[EM_Module]', self.CMem.CTIME)
if self.CMem.Trip == 1:
# 1.1] 원자로 Trip 이후 자동 제어 액션
# 1.1.1] RCP 97 압력 이하에서 자동 정지
if self.CMem.RCP1 == 1 and self.CMem.PZRPres < 97 and self.CMem.CTIME < 15 * 60 * 5:
a_log_f(s=f'Pres [{self.CMem.PZRPres}] < 97 RCP 1 stop')
self._send_control_save(ActOrderBook['StopRCP1'])
if self.CMem.RCP2 == 1 and self.CMem.PZRPres < 97 and self.CMem.CTIME < 15 * 60 * 5:
a_log_f(s=f'Pres [{self.CMem.PZRPres}] < 97 RCP 2 stop')
self._send_control_save(ActOrderBook['StopRCP2'])
if self.CMem.RCP3 == 1 and self.CMem.PZRPres < 97 and self.CMem.CTIME < 15 * 60 * 5:
a_log_f(s=f'Pres [{self.CMem.PZRPres}] < 97 RCP 3 stop')
self._send_control_save(ActOrderBook['StopRCP3'])
# 1.1.2] 원자로 트립 후 Netbrk, turning gear, oil sys, BHV311 정지 및 패쇄
if self.CMem.NetBRK == 1:
a_log_f(s=f'NetBRK [{self.CMem.NetBRK}] Off')
self._send_control_save(ActOrderBook['NetBRKOpen'])
if self.CMem.TurningGear == 1:
a_log_f(s=f'TurningGear [{self.CMem.TurningGear}] Off')
self._send_control_save(ActOrderBook['TurningGearOff'])
if self.CMem.OilSys == 1:
a_log_f(s=f'OilSys [{self.CMem.OilSys}] Off')
self._send_control_save(ActOrderBook['OilSysOff'])
if self.CMem.BHV311 > 0:
a_log_f(s=f'BHV311 [{self.CMem.BHV311}] Cut')
self._send_control_save(ActOrderBook['CutBHV311'])
# 1.2] 스팀 덤프벨브 현재 최대 압력을 기준으로 해당 부분까지 벨브 Set-up
a_log_f(s=f'[Check][{self.CMem.SIS}][{self.CMem.MSI}][Check Main logic 1]')
if self.CMem.SIS != 0 and self.CMem.MSI != 0:
if max(self.CMem.SG1Pres, self.CMem.SG2Pres, self.CMem.SG3Pres) < self.CMem.SteamDumpPos:
a_log_f(s=f'StemDumpPos [{self.CMem.SteamDumpPos}] change')
self._send_control_save(ActOrderBook['IFLOGIC_SteamDumpDown'])
# 1.2] SI reset 전에 Aux 평균화 [검증 완료 20200903]
if self.CMem.SG1Feed == self.CMem.SG2Feed and self.CMem.SG1Feed == self.CMem.SG3Feed and \
self.CMem.SG2Feed == self.CMem.SG1Feed and self.CMem.SG2Feed == self.CMem.SG3Feed and \
self.CMem.SG3Feed == self.CMem.SG1Feed and self.CMem.SG3Feed == self.CMem.SG2Feed:
a_log_f(s=f'[{self.CMem.SG1Feed:10}, {self.CMem.SG2Feed:10}, {self.CMem.SG3Feed:10}] Feed water avg done')
else:
# 1.2.1] 급수 일정화 수행
# 1.2.1.1] 가장 큰 급수 찾기
SGFeedList = [self.CMem.SG1Feed, self.CMem.SG2Feed, self.CMem.SG3Feed]
MaxSGFeed = SGFeedList.index(max(SGFeedList)) # 0, 1, 2
MinSGFeed = SGFeedList.index(min(SGFeedList)) # 0, 1, 2
self._send_control_save(ActOrderBook[f'DecreaseAux{MaxSGFeed + 1}Flow'])
self._send_control_save(ActOrderBook[f'IncreaseAux{MinSGFeed + 1}Flow'])
a_log_f(s=f'[{self.CMem.SG1Feed:10}, {self.CMem.SG2Feed:10}, {self.CMem.SG3Feed:10}] Feed water avg')
# 1.3] 3000부터 SI reset
if self.CMem.CTIME == 3000 + (18000 * 5):
self._send_control_save(ActOrderBook['ResetSI'])
a_log_f(s=f'ResetSI [{self.CMem.CTIME}]')
# 2] SI reset 발생 시 냉각 운전 시작
if self.CMem.SIS == 0 and self.CMem.MSI == 0 and self.CMem.CTIME > 5 * 60 * 5:
# 2.0] Cooling rage 계산 시작
if self.CMem.CoolingRateSW == 0:
self.CMem.CoolingRateSW = 1
a_log_f(s=f'CoolingRateSW')
# 2.1] Press set-point 를 현재 최대 압력 기준까지 조절 ( not work )
if self.CMem.SteamDumpManAuto == 0:
self._send_control_save(ActOrderBook['SteamDumpMan'])
a_log_f(s=f'SteamDumpMan [{self.CMem.SteamDumpManAuto}]')
# 2.2] Steam Line Open
if self.CMem.SteamLine1 == 0:
self._send_control_save(ActOrderBook['SteamLine1Open'])
a_log_f(s=f'SteamLine1 [{self.CMem.SteamLine1}] Open')
if self.CMem.SteamLine2 == 0:
self._send_control_save(ActOrderBook['SteamLine2Open'])
a_log_f(s=f'SteamLine2 [{self.CMem.SteamLine2}] Open')
if self.CMem.SteamLine3 == 0:
self._send_control_save(ActOrderBook['SteamLine3Open'])
a_log_f(s=f'SteamLine3 [{self.CMem.SteamLine3}] Open')
# 2.3] Charging flow 최소화
if self.CMem.ChargingManAUto == 0:
self._send_control_save(ActOrderBook['ChargingValveMan'])
a_log_f(s=f'ChargingMode [{self.CMem.ChargingManAUto}] Man')
if self.CMem.ChargingValvePos != 0:
self._send_control_save(ActOrderBook['ChargingValveDown'])
a_log_f(s=f'ChargingPOS [{self.CMem.ChargingValvePos}] Close')
# 2.3] PZR spray 수동 전환 [감압]
if self.CMem.PZRSprayManAuto == 0:
self._send_control_save(ActOrderBook['PZRSprayMan'])
a_log_f(s=f'PZRSprayMan [{self.CMem.PZRSprayManAuto}] Man')
# 2.4] RCP 2 동작
if self.CMem.RCP2 == 0:
self._send_control_save(ActOrderBook['RunRCP2'])
a_log_f(s=f'RCP2 [{self.CMem.RCP2}] Start')
# 2.5] PZR 감압을 위한 Heater 종료
if self.CMem.PZRProHeaterManAuto == 0:
self._send_control_save(ActOrderBook['PZRProHeaterMan'])
a_log_f(s=f'PZR PRO heater [{self.CMem.PZRProHeaterManAuto}] Man')
if self.CMem.PZRProHeaterPos >= 0:
self._send_control_save(ActOrderBook['PZRProHeaterDown'])
a_log_f(s=f'PZR PRO Pos [{self.CMem.PZRProHeaterPos}] Down')
if self.CMem.PZRBackHeaterOnOff == 1:
self._send_control_save(ActOrderBook['PZRBackHeaterOff'])
a_log_f(s=f'PZRBackHeaterOff [{self.CMem.PZRBackHeaterOnOff}] Off')
# 3.0] 강화학습 제어 시작
if self.CMem.StartRL == 0:
self.CMem.StartRL = 1
a_log_f(s=f'StartRL [{self.CMem.StartRL}]')
else:
# 3.1] 가압기 저수위에서 고수위로 복구시 인한 Letdown 차단 금지
if self.CMem.PZRLevel > 20:
pass
# if self.CMem.LetdownLV459Pos == 0:
# self._send_control_save(ActOrderBook['LetdownLV459Open'])
# if self.CMem.LetdownHV1Pos == 0:
# self._send_control_save(ActOrderBook['LetdownHV1Open'])
# if self.CMem.LetdownHV2Pos == 0:
# self._send_control_save(ActOrderBook['LetdownHV2Open'])
# 3.1] Spray control
if True:
pos = self.CMem.PZRSprayPosControl + 0.015 * np.clip(AMod[0] * 2, -2, 2)
zip_spray_pos = (['BPRZSP'], [pos])
self._send_control_save(zip_spray_pos)
a_log_f(s=f'Change Spray Pos [{self.CMem.PZRSprayPosControl:10}|{pos:10}]')
# 3.2] Aux Feed
if True:
aux123 = 0
if AMod[1] < -0.3:
# Decrease
aux123 = -1
elif -0.3 <= AMod[1] < 0.3:
# Stay
aux123 = 0
elif 0.3 <= AMod[1]:
# Increase
aux123 = 1
if self.CMem.SG1Wid > 80:
aux123 = -1
pos1 = self.CMem.Aux1Flow + 0.04 * aux123
pos2 = self.CMem.Aux2Flow + 0.04 * aux123
pos3 = self.CMem.Aux3Flow + 0.04 * aux123
zip_aux_pos = (['WAFWS1', 'WAFWS2', 'WAFWS3'], [pos1, pos2, pos3])
self._send_control_save(zip_aux_pos)
a_log_f(s=f'AuxFlow'
f'[{self.CMem.Aux1Flow:10}|{pos1:10}]'
f'[{self.CMem.Aux2Flow:10}|{pos2:10}]'
f'[{self.CMem.Aux3Flow:10}|{pos3:10}]')
# 3.3] SI Supply water
if True:
if AMod[2] < -0.8:
# self._send_control_save(ActOrderBook['CloseSI'])
a_log_f(s=f'CloseSI')
elif -0.8 <= AMod[2] < -0.6:
self._send_control_save(ActOrderBook['StopCHP2'])
a_log_f(s=f'StopCHP2')
elif -0.6 <= AMod[2] < 0.6:
#
pass
elif 0.6 <= AMod[2] < 0.8:
self._send_control_save(ActOrderBook['RunCHP2'])
a_log_f(s=f'RunCHP2')
elif 0.8 <= AMod[2]:
# self._send_control_save(ActOrderBook['OpenSI'])
a_log_f(s=f'OpenSI')
if self.CMem.CTIME > 30000 + (18000 * 5): # TRICK
# SI logic <- 이를 통해서 압력 감압.
Updis, Botdis = TOOL_PTCurve.PTCureve()._check_distance(self.CMem.AVGTemp, self.CMem.PZRPres)
if Botdis > 12:
self._send_control_save(ActOrderBook['CloseSI'])
elif Botdis < 5:
self._send_control_save(ActOrderBook['OpenSI'])
# 3.4] Steam Dump
if True:
SteamDumpRate = 4
DumpPos = self.CMem.PMSS + 2.0E5 * np.clip(AMod[3] * SteamDumpRate,
- SteamDumpRate, SteamDumpRate) * 0.2
zip_Dump_pos = (['PMSS'], [DumpPos])
self._send_control_save(zip_Dump_pos)
a_log_f(s=f'PMSS [{self.CMem.PMSS:10}|{DumpPos:10}]')
return 0
def send_act(self, A):
"""
A 에 해당하는 액션을 보내고 나머지는 자동
E.x)
self._send_control_save(['KSWO115'], [0])
...
self._send_control_to_cns()
:param A: A 액션 [0, 0, 0] <- act space에 따라서
:return: AMod: 수정된 액션
"""
AMod = A
if isinstance(A, int): # A=0 인경우
if A == 1:
# 16.0
for _tar, _val in zip(['WAFWS1', 'WAFWS2', 'WAFWS3'], ['KSWO143', 'KSWO152', 'KSWO155']):
if self.mem[_tar]['Val'] < 20:
if self.CMem.CTIME >= self.FixedRad + 1325: self._send_control_save([_val], [1])
# 17.2
if self.CMem.CTIME == self.FixedRad + 1750: self._send_control_save(['KSWO208'], [1])
# 20.4
if self.CMem.CTIME == self.FixedRad + 2000: self._send_control_save(['KSWO115'], [1])
if self.CMem.CTIME == self.FixedRad + 2300: self._send_control_save(['KSWO123'], [1])
# 21.3
if self.CMem.CTIME == self.FixedRad + 2600: self._send_control_save(['KSWO132'], [0])
if self.CMem.CTIME == self.FixedRad + 2650: self._send_control_save(['KSWO133'], [0])
if self.CMem.CTIME == self.FixedRad + 2700: self._send_control_save(['KSWO134'], [0])
pass
elif isinstance(A, dict): # A = { ... } 각 AI 모듈에 정보가 들어있는 경우
if A['EM'] is not None:
if self.CMem.CoolingRateSW == 0:
if self.CMem.CTIME % 100 == 0:
self._send_act_EM_Module(A['EM'])
else:
if self.CMem.CTIME % 100 == 0:
self._send_act_EM_Module(A['EM'])
else:
print('Error')
# Done Act
self._send_control_to_cns()
return AMod
def step(self, A):
"""
A를 받고 1 step 전진
:param A: A -> dict
:return: 최신 state와 reward done 반환
"""
# Old Data (time t) ---------------------------------------
AMod = self.send_act(A)
# if self.CMem.CoolingRateSW == 0:
# if self.CMem.CTIME >= 800:
# # 강화학습 이전 시 5 tick
# self.want_tick = int(5)
# else:
# self.want_tick = int(5)
# else:
# # Cooling 계산 시작 및 강화학습 진입 시 100 tick
# self.want_tick = int(5)
print(self.want_tick, self.CMem.CTIME)
# New Data (time t+1) -------------------------------------
super(ENVCNS, self).step() # 전체 CNS mem run-Freeze 하고 mem 업데이트
self.CMem.update() # 선택 변수 mem 업데이트
# 추가된 변수 고려
self.mem['cCOOLRATE']['Val'] = self.CMem.CoolingRATE.get_temp(self.CMem.CTIME)
self._append_val_to_list()
self.ENVStep += 1
# next_state, next_state_list = self.get_state(self.input_info) # [s(t+1)] #
# ----------------------------------------------------------
return 0
def reset(self, file_name, initial_nub=1, mal=False, mal_case=1, mal_opt=0, mal_time=5):
# 1] CNS 상태 초기화 및 초기화된 정보 메모리에 업데이트
super(ENVCNS, self).reset(initial_nub=initial_nub, mal=False, mal_case=1, mal_opt=0, mal_time=5, file_name=file_name)
# 2] 업데이트된 'Val'를 'List'에 추가 및 ENVLogging 초기화
self._append_val_to_list()
# 3] ENVStep 초기화
self.ENVStep = 0
# 5 FIX RADVAL
self.FixedRad = random.randint(0, 20) * 5
return 0
if __name__ == '__main__':
# ENVCNS TEST
env = ENVCNS(Name='Env1', IP='192.168.0.103', PORT=int(f'7101'))
# Run
for _ in range(1, 4):
env.reset(file_name=f'Ep{_}')
for __ in range(500):
A = 0
env.step(A)
```
#### File: Jonghyun-Kim-73/SAMG_Project/main_left.py
```python
import os
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from CustomButton import CustomButton
from Flag import Flag
from arrow import Arrow
from Mitigation_01 import MitigationWindow
def resource_path(relative_path):
if hasattr(sys, '_MEIPASS'):
return os.path.join(sys._MEIPASS, relative_path)
return os.path.join(os.path.abspath("."), relative_path)
source1 = resource_path("x_button.png")
class MainLeft(QWidget):
qss = """
QWidget#main {
background: rgb(128, 128, 128);
border: 2px solid rgb(0, 0, 0);
}
QWidget {
background: rgb(128, 128, 128);
border: 0px solid rgb(0, 0, 0);
}
"""
def __init__(self, parent=None):
super(MainLeft, self).__init__()
self.setAttribute(Qt.WA_StyledBackground, True) # 상위 스타일 상속
self.parent = parent
self.shmem = parent.shmem
self.setStyleSheet(self.qss)
# 크기 조정
self.setMinimumHeight(900 - 40)
self.setMinimumWidth(int(1920 * (2 / 3)))
# 레이어 셋업
layout = QVBoxLayout(self)
layout.setContentsMargins(5, 0, 5, 0)
label2 = FlowChartArea(self)
label2.setObjectName("main")
layout.addWidget(label2)
self.setLayout(layout)
class FlowChartArea(QWidget,QThread):
qss = """
QWidget#scroll {
background: rgb(128, 128, 128);
border: 2px solid rgb(0, 0, 0);
}
QWidget {
background: rgb(128, 128, 128);
border: 0px solid rgb(0, 0, 0);
}
"""
def __init__(self, parent=None):
super(FlowChartArea, self).__init__()
self.setAttribute(Qt.WA_StyledBackground, True)
self.parent = parent
self.shmem = parent.shmem
self.scroll = QScrollArea()
self.scroll.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
self.scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
flowchart = FlowChart(self)
self.scroll.setWidget(flowchart)
layout = QVBoxLayout()
layout.addWidget(self.scroll)
self.setLayout(layout)
# 자동 스크롤
def paintEvent(self, e):
if Flag.PAGE1:
vbar = self.scroll.verticalScrollBar()
vbar.setValue((vbar.maximum())*8/20)
Flag.PAGE1 = False
if Flag.PAGE2:
vbar = self.scroll.verticalScrollBar()
vbar.setValue((vbar.maximum()))
Flag.PAGE2 = False
if Flag.PAGE3:
vbar = self.scroll.verticalScrollBar()
vbar.setValue((vbar.minimum()))
Flag.PAGE3 = False
class FlowChart(QWidget):
def __init__(self, parent=None):
super(FlowChart, self).__init__()
self.parent = parent
self.shmem = parent.shmem
self.setGeometry(0, 0, 1210, 2070) # 1900*(3/4) = 1425
# Arrow
self.line1 = Arrow(self, x=270, y=100, x2=270, y2=123, type=1)
self.line1 = Arrow(self, x=270, y=210, x2=270, y2=243, type=1)
self.line1 = Arrow(self, x=270, y=370, x2=270, y2=403, type=1)
self.line1 = Arrow(self, x=270, y=530, x2=270, y2=563, type=1)
self.line1 = Arrow(self, x=270, y=690, x2=270, y2=723, type=1)
self.line1 = Arrow(self, x=270, y=850, x2=270, y2=883, type=1)
self.line1 = Arrow(self, x=270, y=1010, x2=270, y2=1043, type=1)
self.line1 = Arrow(self, x=270, y=1170, x2=270, y2=1203, type=1)
self.line1 = Arrow(self, x=270, y=130, x2=270, y2=1363, type=1)
self.line1 = Arrow(self, x=270, y=1750, x2=270, y2=1893, type=1)
#아니오
self.line1 = Arrow(self, x=270, y=315, x2=663, y2=315, type=3)
self.line1 = Arrow(self, x=270, y=475, x2=663, y2=475, type=3)
self.line1 = Arrow(self, x=270, y=635, x2=663, y2=635, type=3)
self.line1 = Arrow(self, x=270, y=795, x2=663, y2=795, type=3)
self.line1 = Arrow(self, x=270, y=955, x2=663, y2=955, type=3)
self.line1 = Arrow(self, x=270, y=1115, x2=663, y2=1115, type=3)
self.line1 = Arrow(self, x=270, y=1275, x2=663, y2=1275, type=3)
#돌아오기
self.line1 = Arrow(self, x=895, y=396, x2=280, y2=396, type=2)
self.line1 = Arrow(self, x=895, y=556, x2=280, y2=556, type=2)
self.line1 = Arrow(self, x=895, y=716, x2=280, y2=716, type=2)
self.line1 = Arrow(self, x=895, y=876, x2=280, y2=876, type=2)
self.line1 = Arrow(self, x=895, y=1036, x2=280, y2=1036, type=2)
self.line1 = Arrow(self, x=895, y=1196, x2=280, y2=1196, type=2)
self.line1 = Arrow(self, x=895, y=1356, x2=280, y2=1356, type=2)
self.line1 = Arrow(self, x=1200, y=233, x2=280, y2=233, type=2)
# CustomButton
self.btn_1 = CustomButton(self, page=1, num=1, x=70, y=30, w=400, h=70, text='TSC “완화” 절차서 사용시작',type=0)
self.btn_2 = CustomButton(self, page=1, num=2, x=70, y=130, w=400, h=90, text='안전 변수<br/>R02, P09, H04 감시 시작', type=0)
self.btn_3 = CustomButton(self, page=1, num=3, x=70, y=250, w=400, h=130, text='발전소 부지 경계 선량<br/>< 5분동안 0.5mSv/h', type=2)
self.btn_4 = CustomButton(self, page=1, num=4, x=70, y=410, w=400, h=130, text='격납건물 압력<br/>< 4.97 psig', type=2)
self.btn_5 = CustomButton(self, page=1, num=5, x=70, y=570, w=400, h=130, text='격납건물 수소농도<br/>< [H02]%', type=2)
self.btn_6 = CustomButton(self, page=1, num=6, x=70, y=730, w=400, h=130, text='노심출구온도<br/>< 371.1°C', type=2)
self.btn_7 = CustomButton(self, page=1, num=7, x=70, y=890, w=400, h=130, text='RCS 압력<br/><28.12kg/cm2', type=2)
self.btn_8 = CustomButton(self, page=1, num=8, x=70, y=1050, w=400, h=130, text='모든 증기발생기 수위<br/>> 74% NR', type=2)
self.btn_9 = CustomButton(self, page=1, num=9, x=70, y=1210, w=400, h=130, text='격납건물 수위<br/>> 27.1%', type=2)
self.btn_10 = CustomButton(self, page=1, num=10, x=20, y=1370, w=500, h=500, text='● 노심출구온도 < 371.1°C<br/><br/>그리고 안정 또는 감소<br/>●발전소부지 경계 선량 <[R01]<br/><br/>그리고 안정 또는 감소<br/>● 격납건물 압력 < [P11]<br/><br/>그리고 안정 또는 감소<br/>●격납건물 수소농도 < [H02]<br/><br/>그리고 안정 또는 감소', type=2)
self.btn_11 = CustomButton(self, page=1, num=11, x=70, y=1900, w=400, h=90, text='종료-01<br/>“중대사고관리 종료“ 수행', type=1)
self.btn_3_1 = CustomButton(self, page=1, num=31, x=670, y=270, w=450, h=90, text='완화-01<br/>“핵분열생성물 방출 제어“ 수행')
self.btn_4_1 = CustomButton(self, page=1, num=41, x=670, y=430, w=450, h=90, text='완화-02<br/>“격납건물 상태제어“ 수행', type=1)
self.btn_5_1 = CustomButton(self, page=1, num=51, x=670, y=590, w=450, h=90, text='완화-03<br/>“격납건물내 수소 제어“ 수행', type=1)
self.btn_6_1 = CustomButton(self, page=1, num=61, x=670, y=750, w=450, h=90, text='완화-04<br/>“원자로 냉각재 계통 냉각수 주입“ 수행', type=1)
self.btn_7_1 = CustomButton(self, page=1, num=71, x=670, y=910, w=450, h=90, text='완화-05<br/>“원자로 냉각재 계통 감압“ 수행', type=1)
self.btn_8_1 = CustomButton(self, page=1, num=81, x=670, y=1070, w=450, h=90, text='완화-06<br/>“증기발생기 급수 주입“ 수행', type=1)
self.btn_9_1 = CustomButton(self, page=1, num=91, x=670, y=1230, w=450, h=90, text='완화-07<br/>“격납건물 냉각수 주입“ 수행', type=1)
def paintEvent(self, event):
p = QPainter(self)
p.setPen(QPen(Qt.black))
p.setFont(QFont('맑은 고딕', 14))
p.drawLine(895, 350, 895, 396)
p.drawText(470, 305, "아니오")
p.drawText(240, 403, "예")
p.drawLine(895, 500, 895, 556)
p.drawText(470, 465, "아니오")
p.drawText(240, 563, "예")
p.drawLine(895, 666, 895, 716)
p.drawText(470, 625, "아니오")
p.drawText(240, 723, "예")
p.drawLine(895, 820, 895, 876)
p.drawText(470, 785, "아니오")
p.drawText(240, 883, "예")
p.drawLine(895, 990, 895, 1036)
p.drawText(470, 945, "아니오")
p.drawText(240, 1043, "예")
p.drawLine(895, 1140, 895, 1196)
p.drawText(470, 1105, "아니오")
p.drawText(240, 1203, "예")
p.drawLine(895, 1300, 895, 1356)
p.drawText(470, 1265, "아니오")
p.drawText(240, 1363, "예")
p.drawLine(270, 1620, 1200, 1620)
p.drawLine(1200, 233, 1200, 1620)
current = 0
# Popup open
if Flag.btn_clicked[1]:
self.popup = CustomPopup(p_number=1, p_title="TSC “완화” 절차서 사용시작", p_content='\nTSC “완화” 절차서를 시작합니다.')
Flag.btn_clicked[1] = False
self.btn_1.color()
Flag.color[1] = 2
Flag.close[1] = 0
# 현재 실행중인 버튼 병행처리
current = self.together(1)
self.change(current)
show = self.popup.showModal()
if Flag.btn_yes[1] == 0 and Flag.close[1] == 0: #yes # 완료된 버튼 팝업 생성 방지
self.btn_1.complete()
Flag.color[1] = 3
Flag.btn_clicked[2] = True
self.btn_2.color()
Flag.color[2] = 2
if Flag.btn_clicked[2]:
self.popup = CustomPopup(p_number=2, p_title="안전 변수 R02, P09, H04 감시 시작", p_content='\n안전 변수 R02, P09, H04 감시를 시작합니다.')
Flag.btn_clicked[2] = False
self.btn_2.color()
Flag.color[2] = 2
Flag.close[2] = 0
# 현재 실행중인 버튼 병행처리
current = self.together(2)
self.change(current)
show = self.popup.showModal()
if Flag.btn_yes[2] == 0 and Flag.close[2] == 0: # 완료된 버튼 팝업 생성 방지
self.btn_2.complete()
Flag.color[2] = 3
Flag.btn_clicked[3] = True
self.btn_3.color()
Flag.color[3] = 2
if Flag.btn_clicked[3]:
self.popup = CustomPopup(p_number=3, p_title="발전소 부지 경계 선량 확인", p_content="\n발전소 부지 경계 선량 5분동안 < 0.5mSv/h", p_label1="현재 발전소 부지 경계 선량", p_value1=Flag.value1_1) #단위 추가 필요 "%d"%~
Flag.btn_clicked[3] = False
self.btn_3.color()
Flag.color[3] = 2
Flag.close[3] = 0 # 팝업 닫기
# 현재 실행중인 버튼 병행처리
current = self.together(3)
self.change(current)
show = self.popup.showModal()
if Flag.btn_yes[3] == 0 and Flag.close[3] == 0: #yes
self.btn_3.complete()
Flag.color[3] = 3
Flag.btn_clicked[4] = True
self.btn_3_1.complete()
self.btn_4.color()
Flag.color[31] = 3
Flag.color[4] = 2
elif Flag.btn_yes[3] == 1 and Flag.close[3] == 0: #no
self.btn_3.complete()
Flag.color[3] = 3
Flag.btn_clicked_1[31] = True
self.btn_3_1.color()
Flag.color[31] = 2
if Flag.btn_clicked[4]:
self.popup = CustomPopup(p_number=4, p_title="격납건물 압력 확인", p_content="\n격납건물 압력 < 4.97 psig", p_label1="현재 격납건물 압력", p_value1=Flag.value1_2)
Flag.btn_clicked[4] = False
self.btn_4.color()
Flag.color[4] = 2
Flag.close[4] = 0
# 현재 실행중인 버튼 병행처리
current = self.together(4)
self.change(current)
show = self.popup.showModal()
if Flag.btn_yes[4] == 0 and Flag.close[4] == 0: # yes
self.btn_4.complete()
Flag.color[4] = 3
Flag.btn_clicked[5] = True
self.btn_4_1.complete()
self.btn_5.color()
Flag.color[41] = 3
Flag.color[5] = 2
elif Flag.btn_yes[4] == 1 and Flag.close[4] == 0: # no
self.btn_4.complete()
Flag.color[4] = 3
Flag.btn_clicked_1[41] = True
self.btn_4_1.color()
Flag.color[41] = 2
if Flag.btn_clicked[5]:
self.popup = CustomPopup(p_number=5, p_title="격납건물 수소농도 확인", p_content="\n격납건물 수소농도 < [H02]%", p_label1="현재 격납건물 수소농도", p_value1=Flag.value2_4)
Flag.PAGE1 = True
Flag.btn_clicked[5] = False
self.btn_5.color()
Flag.color[5] = 2
Flag.close[5] = 0
# 현재 실행중인 버튼 병행처리
current = self.together(5)
self.change(current)
show = self.popup.showModal()
if Flag.btn_yes[5] == 0 and Flag.close[5] == 0: # yes
self.btn_5.complete()
Flag.color[5] = 3
Flag.btn_clicked[6] = True
self.btn_5_1.complete()
self.btn_6.color()
Flag.color[51] = 3
Flag.color[6] = 2
elif Flag.btn_yes[5] == 1 and Flag.close[5] == 0: # no
self.btn_5.complete()
Flag.color[5] = 3
Flag.btn_clicked_1[51] = True
self.btn_5_1.color()
Flag.color[51] = 2
if Flag.btn_clicked[6]:
self.popup = CustomPopup(p_number=6, p_title="노심출구온도 확인", p_content="\n노심출구온도 < 371.1°C", p_label1="현재 노심출구온도", p_value1=Flag.value1_3)
Flag.btn_clicked[6] = False
Flag.PAGE1 = True
self.btn_6.color()
Flag.color[6] = 2
Flag.close[6] = 0
# 현재 실행중인 버튼 병행처리
current = self.together(6)
self.change(current)
show = self.popup.showModal()
if Flag.btn_yes[6] == 0 and Flag.close[6] == 0: # yes
self.btn_6.complete()
Flag.color[6] = 3
Flag.btn_clicked[7] = True
self.btn_6_1.complete()
self.btn_7.color()
Flag.color[61] = 3
Flag.color[7] = 2
elif Flag.btn_yes[6] == 1 and Flag.close[6] == 0: # no
self.btn_6.complete()
Flag.color[6] = 3
Flag.btn_clicked_1[61] = True
self.btn_6_1.color()
Flag.color[61] = 2
if Flag.btn_clicked[7]:
self.popup = CustomPopup(p_number=7, p_title="RCS 압력 확인", p_content="\nRCS 압력 < 28.12kg/cm^2", p_label1="현재 RCS 압력", p_value1=Flag.value1_4)
Flag.btn_clicked[7] = False
self.btn_7.color()
Flag.color[7] = 2
Flag.close[7] = 0
# 현재 실행중인 버튼 병행처리
current = self.together(7)
self.change(current)
show = self.popup.showModal()
if Flag.btn_yes[7] == 0 and Flag.close[7] == 0: # yes
self.btn_7.complete()
Flag.color[7] = 3
Flag.btn_clicked[8] = True
self.btn_7_1.complete()
self.btn_8.color()
Flag.color[71] = 3
Flag.color[8] = 2
elif Flag.btn_yes[7] == 1 and Flag.close[7] == 0: # no
self.btn_7.complete()
Flag.color[7] = 3
Flag.btn_clicked_1[71] = True
self.btn_7_1.color()
Flag.color[71] = 2
if Flag.btn_clicked[8]:
self.popup = CustomPopup(p_number=8, p_title="모든 증기발생기 수위 확인", p_content="\n모든 증기발생기 수위 < 74% NR", p_label1="SG 1 Level", p_value1=Flag.value1_5, p_label2="SG 2 Level", p_value2=Flag.value1_6)
Flag.btn_clicked[8] = False
self.btn_8.color()
Flag.color[8] = 2
Flag.close[8] = 0
# 현재 실행중인 버튼 병행처리
current = self.together(8)
self.change(current)
show = self.popup.showModal()
if Flag.btn_yes[8] == 0 and Flag.close[8] == 0: # yes
self.btn_8.complete()
Flag.color[8] = 3
Flag.btn_clicked[9] = True
self.btn_8_1.complete()
self.btn_9.color()
Flag.color[81] = 3
Flag.color[9] = 2
elif Flag.btn_yes[8] == 1 and Flag.close[8] == 0: # no
self.btn_8.complete()
Flag.color[8] = 3
Flag.btn_clicked_1[81] = True
self.btn_8_1.color()
Flag.color[81] = 2
if Flag.btn_clicked[9]:
self.popup = CustomPopup(p_number=9, p_title="격납건물 수위 확인", p_content="\n격납건물 수위 > 27.1%", p_label1="현재 격납건물 수위", p_value1=Flag.value1_7)
Flag.btn_clicked[9] = False
Flag.PAGE2 = True
self.btn_9.color()
Flag.color[9] = 2
Flag.close[9] = 0
# 현재 실행중인 버튼 병행처리
current = self.together(9)
self.change(current)
show = self.popup.showModal()
if Flag.btn_yes[9] == 0 and Flag.close[9] == 0: # yes
self.btn_9.complete()
Flag.color[9] = 3
Flag.btn_clicked[10] = True
self.btn_9_1.complete()
self.btn_10.color()
Flag.color[101] = 3
Flag.color[10] = 2
elif Flag.btn_yes[9] == 1 and Flag.close[9] == 0: # no
self.btn_9.complete()
Flag.color[9] = 3
Flag.btn_clicked_1[91] = True
self.btn_9_1.color()
Flag.color[91] = 2
if Flag.btn_clicked[10]:
self.popup = CustomPopup(p_number=10, p_title="TOTAL", p_content="\nTOTAL",
p_label1="노심출구온도 < 371.1°C", p_value1=Flag.value2_1,
p_label2="발전소부지 경계 선량 30분동안 < 0.5mSv/h", p_value2=Flag.value2_2,
p_label3="격납건물 압력 < 4.97 psig", p_value3=Flag.value2_3,
p_label4="격납건물 수소농도 < [H02]", p_value4=Flag.value2_4)
Flag.btn_clicked[10] = False
self.btn_10.color()
Flag.color[10] = 2
Flag.close[10] = 0
# 현재 실행중인 버튼 병행처리
current = self.together(10)
self.change(current)
show = self.popup.showModal()
if Flag.btn_yes[10] == 0 and Flag.close[10] == 0: # yes
self.btn_10.complete()
Flag.color[10] = 3
Flag.btn_clicked[11] = True
self.btn_11.color()
Flag.color[11] = 2
elif Flag.btn_yes[10] == 1 and Flag.close[10] == 0: # no #안전변수로 이동(버튼 초기화)
Flag.PAGE3 = True
self.btn_1.complete()
self.btn_2.color()
self.btn_3.color_init()
self.btn_4.color_init()
self.btn_5.color_init()
self.btn_6.color_init()
self.btn_7.color_init()
self.btn_8.color_init()
self.btn_9.color_init()
self.btn_10.color_init()
self.btn_11.color_init()
self.btn_3_1.color_init()
self.btn_4_1.color_init()
self.btn_5_1.color_init()
self.btn_6_1.color_init()
self.btn_7_1.color_init()
self.btn_8_1.color_init()
self.btn_9_1.color_init()
for i in range(3, 12):
Flag.btn_yes[i] = -1
Flag.color[i] = 0
Flag.color[i*10+1] = 0
Flag.color[1] = 3
Flag.color[2] = 2
if Flag.btn_clicked[11]:
Flag.btn_clicked[11] = False
self.btn_11.color()
Flag.color[11] = 2
Flag.close[11] = 0
# 현재 실행중인 버튼 병행처리
current = self.together(11)
self.change(current)
if Flag.btn_clicked_1[31]:
self.popup = CustomPopup(p_number=31, p_title="완화-01 “핵분열생성물 방출 제어“ 수행", p_content="\n완화-01 “핵분열생성물 방출 제어“를 수행합니다.")
Flag.btn_clicked_1[31] = False
self.btn_3_1.color()
Flag.color[31] = 2
Flag.close[31] = 0
# 현재 실행중인 버튼 병행처리
current = self.together(31)
self.change(current)
show = self.popup.showModal()
if Flag.btn_yes[31] == 0 and Flag.close[31] == 0: # yes
self.btn_3_1.complete()
Flag.color[31] = 3
Flag.btn_clicked[4] = True
self.btn_4.color()
Flag.color[4] = 2
if Flag.btn_clicked_1[41]:
self.popup = CustomPopup(p_number=41, p_title="완화-02 “격납건물 상태제어“ 수행", p_content="\n완화-02 “격납건물 상태제어“를 수행합니다.")
Flag.btn_clicked_1[41] = False
self.btn_4_1.color()
Flag.color[41] = 2
Flag.close[41] = 0
# 현재 실행중인 버튼 병행처리
current = self.together(41)
self.change(current)
show = self.popup.showModal()
if Flag.btn_yes[41] == 0 and Flag.close[41] == 0: # yes
self.btn_4_1.complete()
Flag.color[41] = 3
Flag.btn_clicked[5] = True
self.btn_5.color()
Flag.color[5] = 2
if Flag.btn_clicked_1[51]:
self.popup = CustomPopup(p_number=51, p_title="완화-03 “격납건물내 수소 제어“ 수행", p_content="\n완화-03 “격납건물내 수소 제어“를 수행합니다.")
Flag.btn_clicked_1[51] = False
self.btn_5_1.color()
Flag.color[51] = 2
Flag.close[51] = 0
# 현재 실행중인 버튼 병행처리
current = self.together(51)
self.change(current)
show = self.popup.showModal()
if Flag.btn_yes[51] == 0 and Flag.close[51] == 0: # yes
self.btn_5_1.complete()
Flag.color[51] = 3
Flag.btn_clicked[6] = True
self.btn_6.color()
Flag.color[6] = 2
if Flag.btn_clicked_1[61]:
self.popup = CustomPopup(p_number=61, p_title="완화-04 “원자로 냉각재 계통 냉각수 주입“ 수행", p_content="\n완화-04 “원자로 냉각재 계통 냉각수 주입“을 수행합니다.")
Flag.btn_clicked_1[61] = False
self.btn_6_1.color()
Flag.color[61] = 2
Flag.close[61] = 0
# 현재 실행중인 버튼 병행처리
current = self.together(61)
self.change(current)
show = self.popup.showModal()
if Flag.btn_yes[61] == 0 and Flag.close[61] == 0: # yes
self.btn_6_1.complete()
Flag.color[61] = 3
Flag.btn_clicked[7] = True
self.btn_7.color()
Flag.color[7] = 2
if Flag.btn_clicked_1[71]:
self.popup = CustomPopup(p_number=71, p_title="완화-05 “원자로 냉각재 계통 감압“ 수행", p_content="\n완화-05 “원자로 냉각재 계통 감압“을 수행합니다.")
Flag.btn_clicked_1[71] = False
self.btn_7_1.color()
Flag.color[71] = 2
Flag.close[71] = 0
# 현재 실행중인 버튼 병행처리
current = self.together(71)
self.change(current)
show = self.popup.showModal()
if Flag.btn_yes[71] == 0 and Flag.close[71] == 0: # yes
self.btn_7_1.complete()
Flag.color[71] = 3
Flag.btn_clicked[8] = True
self.btn_8.color()
Flag.color[8] = 2
if Flag.btn_clicked_1[81]:
self.popup = CustomPopup(p_number=81, p_title="완화-06 “증기발생기 급수 주입“ 수행", p_content="\n완화-06 “증기발생기 급수 주입“을 수행합니다.")
Flag.btn_clicked_1[81] = False
self.btn_8_1.color()
Flag.color[81] = 2
Flag.close[81] = 0
# 현재 실행중인 버튼 병행처리
current = self.together(81)
self.change(current)
show = self.popup.showModal()
if Flag.btn_yes[81] == 0 and Flag.close[81] == 0: # yes
self.btn_8_1.complete()
Flag.color[81] = 3
self.miti_win = MitigationWindow(self) # self 필수
self.miti_win.show()
if Flag.btn_clicked_1[91]:
self.popup = CustomPopup(p_number=91, p_title="완화-07 “격납건물 냉각수 주입“ 수행", p_content="\n완화-07 “격납건물 냉각수 주입“을 수행합니다.")
Flag.PAGE2 = True
Flag.btn_clicked_1[91] = False
self.btn_9_1.color()
Flag.color[91] = 2
Flag.close[91] = 0
# 현재 실행중인 버튼 병행처리
current = self.together(91)
self.change(current)
show = self.popup.showModal()
if Flag.btn_yes[91] == 0 and Flag.close[91] == 0: # yes
self.btn_9_1.complete()
Flag.color[91] = 3
Flag.btn_clicked[10] = True
self.btn_10.color()
Flag.color[10] = 2
def together(self, me):
for i in range(1, 12):
if Flag.color[i] == 2: # 자기 자신 제외, 현재 진행중인 버튼 찾기
if i == me:
pass
else:
Flag.color[i] = 1 # 병행처리
return i
if Flag.color[i*10+1] == 2:
if me == (i*10+1):
pass
else:
Flag.color[i*10+1] = 1 # 병행처리
return i*10+1
def change(self, find):
if find == 1: self.btn_1.color2()
elif find == 2: self.btn_2.color2()
elif find == 3: self.btn_3.color2()
elif find == 4: self.btn_4.color2()
elif find == 5: self.btn_5.color2()
elif find == 6: self.btn_6.color2()
elif find == 7: self.btn_7.color2()
elif find == 8: self.btn_8.color2()
elif find == 9: self.btn_9.color2()
elif find == 10: self.btn_10.color2()
elif find == 11: self.btn_11.color2()
elif find == 31: self.btn_3_1.color2()
elif find == 41: self.btn_4_1.color2()
elif find == 51: self.btn_5_1.color2()
elif find == 61: self.btn_6_1.color2()
elif find == 71: self.btn_7_1.color2()
elif find == 81: self.btn_8_1.color2()
elif find == 91: self.btn_9_1.color2()
class CustomPopup(QDialog):
qss = """
QWidget{
background : rgb(180, 180, 180)
}
QLabel#title {
font-size: 14pt;
}
QLabel#data {
font-size:12pt;
border: 2px inset rgb(0, 0, 0);
background: rgb(255, 255, 255);
}
QDialog{
border: 2px solid rgb(0, 0, 0);
}
QPushButton {
color: rgb(0, 0, 0);
background-color: white;
border: 2px solid rgb(0, 0, 0);
}
"""
def __init__(self, p_number=None, p_title=None, p_content=None, p_label1=None, p_value1=None, p_label2=None, p_value2=None, p_label3=None, p_value3=None, p_label4=None, p_value4=None):
super().__init__()
self.layout = QVBoxLayout()
# 팝업 정보(메시지)
self.p_number = p_number
self.p_title = p_title
self.p_content = p_content
self.p_label1 = p_label1
self.p_value1 = p_value1
self.p_label2 = p_label2
self.p_value2 = p_value2
self.p_label3 = p_label3
self.p_value3 = p_value3
self.p_label4 = p_label4
self.p_value4 = p_value4
self.layout.addWidget(CustomPopupContent(self, p_number=self.p_number, p_title=self.p_title, p_content=self.p_content, p_label1=self.p_label1, p_value1=self.p_value1, p_label2=self.p_label2, p_value2=self.p_value2, p_label3=self.p_label3, p_value3=self.p_value3, p_label4=self.p_label4, p_value4=self.p_value4))
self.setLayout(self.layout)
self.layout.setContentsMargins(0, 0, 0, 0)
self.setStyleSheet(self.qss)
self.layout.addStretch(-1)
self.setGeometry(100, 300, 550, 100)
self.setWindowFlags(Qt.FramelessWindowHint)
self.pressing = False
def showModal(self):
return super().exec_()
class CustomPopupContent(QWidget):
qss = """
QWidget{
background : rgb(180, 180, 180)
}
QPushButton{
background : rgb(218,218,218);
border: 1px solid rgb(0, 0, 0);
}
QTableWidget {
gridline-color: rgb(0,0,0);
font-size: 12pt;
}
QPushButton#xbutton {
background-color: none;
border: 2px solid rgb(0, 0, 0);
}
"""
def __init__(self, parent, p_number = None, p_title = None, p_content = None, p_label1=None, p_value1=None,
p_label2=None, p_value2=None, p_label3=None, p_value3=None, p_label4=None, p_value4=None):
super(CustomPopupContent, self).__init__()
self.parent = parent
self.setStyleSheet(self.qss)
self.layout = QVBoxLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.setMinimumWidth(400)
self.p_number = p_number
self.p_label1 = p_label1
self.p_value1 = p_value1
self.p_label2 = p_label2
self.p_value2 = p_value2
self.p_label3 = p_label3
self.p_value3 = p_value3
self.p_label4 = p_label4
self.p_value4 = p_value4
self.layout_t = QHBoxLayout()
self.layout_t.setContentsMargins(0, 0, 0, 0)
self.title = QLabel(p_title)
self.title.setAlignment(Qt.AlignCenter)
self.title.setFixedSize(50, 40)
self.title.setStyleSheet(""" background-color: rgb(91,155,213); border: 2px solid rgb(0,0,0); color: white;font-size: 14pt; """)
btn_close = QPushButton()
btn_close.setIcon(QIcon(source1))
btn_close.setStyleSheet("border:0px")
btn_close.clicked.connect(self.close)
btn_close.setIconSize(QSize(25, 25))
btn_close.setFixedSize(40, 30)
btn_close.setObjectName('xbutton')
self.layout_t.addWidget(self.title)
self.layout_t.addWidget(btn_close)
self.layout.addLayout(self.layout_t)
self.label = QLabel(p_content)
self.label.setObjectName("title")
#테두리 제거용
self.label.setStyleSheet("margin : 3px;")
self.label.setAlignment(Qt.AlignCenter)
self.subsub = QHBoxLayout()
self.subLayout = QHBoxLayout()
self.layout.addWidget(self.label)
if self.p_number != 1 and self.p_number != 2\
and self.p_number != 31 and self.p_number != 41 and self.p_number != 51\
and self.p_number != 61 and self.p_number != 71 and self.p_number != 81\
and self.p_number != 91:
self.tableWidget = QTableWidget()
self.tableWidget.setStyleSheet("background: rgb(221, 221, 221);"
"border: 0px solid rgb(0, 0, 0);")
self.tableWidget.horizontalHeader().setVisible(False) #table 헤더 숨기기
self.tableWidget.verticalHeader().setVisible(False)
self.tableWidget.setContentsMargins(0, 0, 0, 0)
if self.p_number == 8:
self.tableWidget.setRowCount(2)
self.tableWidget.setFixedSize(350, 60)
elif self.p_number == 10:
self.tableWidget.setRowCount(4)
self.tableWidget.setFixedSize(350, 120)
else:
self.tableWidget.setRowCount(1)
self.tableWidget.setFixedSize(350, 30)
self.tableWidget.setColumnCount(2)
self.tableWidget.setColumnWidth(0,250)
self.tableWidget.horizontalHeader().setStretchLastSection(True)
# 편집 불가
self.tableWidget.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.tableWidget.setFocusPolicy(Qt.NoFocus)
self.tableWidget.setSelectionMode(QAbstractItemView.NoSelection)
if self.p_number == 8:
item1_ = QTableWidgetItem(p_label1)
item2_ = QTableWidgetItem(p_value1)
if float(p_value1) <= 45:
item1_.setBackground(QColor(252, 227, 112))
item2_.setBackground(QColor(252, 227, 112))
item3_ = QTableWidgetItem(p_label2)
item4_ = QTableWidgetItem(p_value2)
if float(p_value2) <= 45:
item3_.setBackground(QColor(252, 227, 112))
item4_.setBackground(QColor(252, 227, 112))
self.tableWidget.setItem(0, 0, item1_)
self.tableWidget.setItem(0, 1, item2_)
self.tableWidget.setItem(1, 0, item3_)
self.tableWidget.setItem(1, 1, item4_)
else:
self.tableWidget.setItem(0, 0, QTableWidgetItem(p_label1))
self.tableWidget.setItem(0, 1, QTableWidgetItem(p_value1))
self.tableWidget.setItem(1, 0, QTableWidgetItem(p_label2))
self.tableWidget.setItem(1, 1, QTableWidgetItem(p_value2))
self.tableWidget.setItem(2, 0, QTableWidgetItem(p_label3))
self.tableWidget.setItem(2, 1, QTableWidgetItem(p_value3))
self.tableWidget.setItem(3, 0, QTableWidgetItem(p_label4))
self.tableWidget.setItem(3, 1, QTableWidgetItem(p_value4))
self.tableWidget.setGeometry(30, 30, 30, 30)
# 테이블 정렬
delegate = AlignDelegate()
self.tableWidget.setItemDelegate(delegate)
fnt = self.font()
fnt.setPointSize(12)
self.tableWidget.setFont(fnt)
self.subsub.addWidget(self.tableWidget)
self.layout.addLayout(self.subsub)
self.btnOK = QPushButton("예")
self.btnOK.setFixedSize(100, 35)
self.btnOK.setCursor(QCursor(Qt.PointingHandCursor))
self.btnOK.clicked.connect(self.onOKButtonClicked)
self.btnCancel = QPushButton("아니오")
self.btnCancel.setFixedSize(100, 35)
self.btnCancel.setCursor(QCursor(Qt.PointingHandCursor))
self.btnCancel.clicked.connect(self.onCancelButtonClicked)
self.subLayout.setContentsMargins(50, 30, 50, 30)
self.subLayout.addWidget(self.btnOK)
self.subLayout.addWidget(self.btnCancel)
if self.p_number == 1 or self.p_number == 2 or self.p_number == 31 or self.p_number == 41 or self.p_number == 51\
or self.p_number == 71 or self.p_number == 61 or self.p_number == 71\
or self.p_number == 81 or self.p_number == 91:
self.btnCancel.hide()
else:
self.btnCancel.show()
self.layout.addLayout(self.subLayout)
self.layout.addStretch(1)
self.setLayout(self.layout)
#Popup move
self.start = QPoint(0, 0)
self.pressing = False
# 그냥 닫으면 병행 컬러로 바뀐다.
def close(self):
Flag.close[self.p_number] = 1
self.setDisabled(True)
self.parent.close()
def onOKButtonClicked(self):
Flag.btn_yes[self.p_number] = 0
self.setDisabled(True)
self.parent.close()
def onCancelButtonClicked(self):
Flag.btn_yes[self.p_number] = 1
self.parent.close()
def showModal(self):
return super().exec_()
def resizeEvent(self, QResizeEvent):
super(CustomPopupContent, self).resizeEvent(QResizeEvent)
self.title.setFixedWidth(self.parent.width())
def mousePressEvent(self, event):
self.start = self.mapToGlobal(event.pos())
self.pressing = True
def mouseMoveEvent(self, event):
if self.pressing:
self.end = self.mapToGlobal(event.pos())
self.movement = self.end - self.start
self.parent.setGeometry(self.mapToGlobal(self.movement).x(),
self.mapToGlobal(self.movement).y(),
self.parent.width(),
self.parent.height())
self.start = self.end
def mouseReleaseEvent(self, QMouseEvent):
self.pressing = False
class AlignDelegate(QStyledItemDelegate):
def initStyleOption(self, option, index):
super(AlignDelegate, self).initStyleOption(option, index)
option.displayAlignment = Qt.AlignCenter
if __name__ == '__main__':
app = QApplication(sys.argv)
window = MainLeft()
window.show()
flow = FlowChart()
font = QFontDatabase()
font.addApplicationFont('./맑은 고딕.ttf')
app.setFont(QFont('맑은 고딕'))
app.exec_()
```
#### File: Jonghyun-Kim-73/SAMG_Project/main_right.py
```python
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from Flag import Flag
from TOOL_MatGP2 import Trend
class MainRight(QWidget):
qss = """
QWidget {
background: rgb(128, 128, 128);
border: 2px solid rgb(0, 0, 0);
font-size: 14pt;
}
QLabel {
background: rgb(131, 131, 131);
border-radius: 6px;
color: rgb(255, 255, 255);
}
QTableWidget {
background: rgb(221, 221, 221);
border: 1px solid rgb(0, 0, 0);
}
QPushButton{
background: rgb(221, 221, 221)
}
QWidget#label {
background: rgb(128, 128, 128);
border: 0px solid rgb(0, 0, 0);
}
"""
def __init__(self, parent=None):
super(MainRight, self).__init__()
self.setAttribute(Qt.WA_StyledBackground, True)
self.parent = parent
self.shmem = parent.shmem
self.setStyleSheet(self.qss)
# # 기본 속성
self.setMinimumHeight(750)
#
# # 레이아웃
layout = QVBoxLayout(self)
layout.setContentsMargins(5, 5, 5, 5)
#
label1 = MainParaArea(self)
label2 = EndCondArea(self)
label1.setObjectName("label")
label2.setObjectName("label")
layout.addWidget(label1)
layout.addWidget(label2)
self.setLayout(layout)
class MainParaArea(QWidget):
def __init__(self, parent=None):
super(MainParaArea, self).__init__()
self.setAttribute(Qt.WA_StyledBackground, True)
self.parent = parent
self.shmem = parent.shmem
self.setFixedHeight(559)
# 레이아웃
layout = QVBoxLayout(self)
layout.setContentsMargins(5, 5, 5, 0)
para_table = ParaTable(self)
layout.addWidget(para_table)
self.setLayout(layout)
class ParaTable(QTableWidget):
def __init__(self, parent=None):
super(ParaTable, self).__init__()
self.setAttribute(Qt.WA_StyledBackground, True)
self.parent = parent
self.shmem = parent.shmem
self.setObjectName('ParaTable')
# 테이블 프레임 모양 정의
self.horizontalHeader().setVisible(False)
self.verticalHeader().setVisible(False) # Row 넘버 숨기기
# 테이블 셋업
col_info = [('주요 발전소 변수', 360), ('현재 발전소 변수', 253)] # 475
self.setColumnCount(2)
self.setRowCount(8)
self.horizontalHeader().setFixedHeight(69)
self.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.setFocusPolicy(Qt.NoFocus)
self.setSelectionMode(QAbstractItemView.NoSelection)
# 테이블 행 높이 조절
for each in range(self.rowCount()):
self.setRowHeight(each, 69)
col_names = []
for i, (l, w) in enumerate(col_info):
self.setColumnWidth(i, w)
col_names.append(l)
# 테이블 헤더
self.setHorizontalHeaderLabels(col_names)
self.horizontalHeader().setStyleSheet("::section {background: rgb(221, 221, 221);font-size:14pt;font-weight: bold;}")
self.horizontalHeader().sectionPressed.disconnect()
item = [0*i for i in range(8)]
self.item2 = [0*i for i in range(8)]
item[0] = QTableWidgetItem('주요 발전소 변수')
item[1] = QTableWidgetItem('발전소 부지 경계 선량')
item[2] = QTableWidgetItem('격납건물 압력')
item[3] = QTableWidgetItem('노심출구 온도')
item[4] = QTableWidgetItem('RCS 압력')
item[5] = QTableWidgetItem('SG 1 수위 NR')
item[6] = QTableWidgetItem('SG 2 수위 NR')
item[7] = QTableWidgetItem('격납건물 수위')
for i in range(8):
item[i].setFlags(Qt.NoItemFlags)
item[i].setForeground(QBrush(QColor(0, 0, 0)))
self.setItem(i, 0, item[i])
self.item2[0] = QTableWidgetItem('현재 발전소 변수')
self.item2[1] = QTableWidgetItem('0 mSv')
self.item2[2] = QTableWidgetItem('0 psig')
self.item2[3] = QTableWidgetItem('0 °C')
self.item2[4] = QTableWidgetItem('0 psig')
self.item2[5] = QTableWidgetItem('0 %')
self.item2[6] = QTableWidgetItem('0 %')
self.item2[7] = QTableWidgetItem('0 %')
for i in range(8):
self.setItem(i, 1, self.item2[i])
self.doubleClicked.connect(self.popup)
# 테이블 셀 내용 가운데 정렬
delegate = AlignDelegate()
self.setItemDelegateForColumn(0, delegate)
self.setItemDelegateForColumn(1, delegate)
fnt = self.font()
fnt.setBold(True)
fnt.setPointSize(14)
item[0].setFont(fnt)
self.item2[0].setFont(fnt)
""" QTimer interval 간격으로 item 디스플레이 업데이트 21.09.16 """ # Flag 추가 수정 21.10.03 소진
# Q Timer ------------------------------------------------------------------------------------------------------
timer = QTimer(self)
timer.setInterval(500) # 500 ms run = 0.5 sec
timer.timeout.connect(self.local_loop)
timer.start()
def popup(self):
r = self.currentItem().row()
r_db = {
0: {'Para': '', 'N': '', 'X': 'Time[Min]', 'Y': '', 'Yr': [0, 1]},
1: {'Para': '', 'N': '', 'X': 'Time[Min]', 'Y': '', 'Yr': [0, 1]},
2: {'Para': 'PCTMT', 'N': 'CTMT Pressure', 'X': 'Time[Min]', 'Y': 'PA', 'Yr': [0, 30000]},
3: {'Para': '', 'N': '', 'X': 'Time[Min]', 'Y': '', 'Yr': [0, 1]},
4: {'Para': 'ZINST58', 'N': 'PZR Pressure', 'X': 'Time[Min]', 'Y': 'Kg/cm^2', 'Yr': [0, 200]},
5: {'Para': 'ZINST78', 'N': 'S/G 1 Level', 'X': 'Time[Min]', 'Y': '%', 'Yr': [0, 100]},
6: {'Para': 'ZINST77', 'N': 'S/G 2 Level', 'X': 'Time[Min]', 'Y': '%', 'Yr': [0, 100]},
7: {'Para': 'ZSUMP', 'N': 'CTMP Sump Level', 'X': 'Time[Min]', 'Y': 'M', 'Yr': [0, 100]}
}
get_selected_para = r_db[r]['Para']
if get_selected_para != '':
self.popup_W = Trend(self, w=500, h=500,
para_name=r_db[r]['N'], para_id=r_db[r]['Para'],
para_range=r_db[r]['Yr'],
xtitle=r_db[r]['X'],
ytitle=r_db[r]['Y'])
self.popup_W.show()
def local_loop(self):
if self.parent is not None:
get_db = self.shmem.get_shmem_db()
"""
get_db 의 구조는 딕셔너리로 아래와 같음.
get_db = {
'para_id': {'Sig': sig, 'Val': 0, 'Num': idx, 'List': deque(maxlen=max_len)},
'para_id': {'Sig': sig, 'Val': 0, 'Num': idx, 'List': deque(maxlen=max_len)},
...
}
"""
self.item2[1].setText(f'{get_db["DCTMT"]["Val"]:.2f} mSv')
self.item2[2].setText(f'{get_db["PCTMT"]["Val"]:.2f} psig')
self.item2[3].setText(f'{get_db["UUPPPL"]["Val"]:.2f} °C')
self.item2[4].setText(f'{get_db["ZINST58"]["Val"]:.2f} psig')
self.item2[5].setText(f'{get_db["ZINST78"]["Val"]:.2f} %')
if get_db['ZINST78']['Val'] <= 45:
self.item2[5].setBackground(QColor(252, 227, 112))
else:
self.item2[5].setBackground(QColor(221, 221, 221))
self.item2[6].setText(f'{get_db["ZINST77"]["Val"]:.2f} %')
if get_db['ZINST77']['Val'] <= 45:
self.item2[6].setBackground(QColor(252, 227, 112))
else:
self.item2[6].setBackground(QColor(221, 221, 221))
self.item2[7].setText(f'{get_db["ZSUMP"]["Val"]:.2f} %')
Flag.value1_1 = str(get_db["DCTMT"]['Val'])
Flag.value1_2 = str(get_db["PCTMT"]['Val'])
Flag.value1_3 = str(get_db["UUPPPL"]['Val'])
Flag.value1_4 = str(get_db["ZINST58"]['Val'])
Flag.value1_5 = str(get_db["ZINST78"]['Val'])
Flag.value1_6 = str(get_db["ZINST77"]['Val'])
Flag.value1_7 = str(get_db["ZSUMP"]['Val'])
# ======================================================================================================================
class EndCondArea(QWidget):
def __init__(self, parent=None):
super(EndCondArea, self).__init__()
self.setAttribute(Qt.WA_StyledBackground, True)
self.parent = parent
self.shmem = parent.shmem
self.setFixedHeight(357)
# 레이아웃
layout = QVBoxLayout(self)
layout.setContentsMargins(5, 5, 5, 5)
label2 = EndCondTable(self)
layout.addWidget(label2)
self.setLayout(layout)
class EndCondTable(QTableWidget):
def __init__(self, parent):
super(EndCondTable, self).__init__()
self.setAttribute(Qt.WA_StyledBackground, True)
self.parent = parent
self.shmem = parent.shmem
self.setObjectName('EndCondTable')
# 테이블 프레임 모양 정의
self.horizontalHeader().setVisible(False)
self.verticalHeader().setVisible(False) # Row 넘버 숨기기
# 편집 불가
self.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.setFocusPolicy(Qt.NoFocus)
self.setSelectionMode(QAbstractItemView.NoSelection)
# 테이블 셋업
col_info = [('종료조건', 360), ('현재 상태', 253)] # 475
self.setColumnCount(len(col_info))
self.setRowCount(5)
col_names = []
for i, (l, w) in enumerate(col_info):
self.setColumnWidth(i, w)
col_names.append(l)
# 테이블 헤더
self.setHorizontalHeaderLabels(col_names)
self.horizontalHeader().setFixedHeight(69)
self.horizontalHeader().setStyleSheet("::section {background: rgb(221, 221, 221);font-size:14pt;font-weight: bold;}")
self.horizontalHeader().sectionPressed.disconnect()
# 테이블 행 높이 조절
for each in range(self.rowCount()):
self.setRowHeight(each, 69)
item = [0 * i for i in range(5)]
self.item2 = [0 * i for i in range(5)]
item[0] = QTableWidgetItem('종료조건')
item[1] = QTableWidgetItem('노심출구온도 < [T01]')
item[2] = QTableWidgetItem('발전소부지 경계 선량 < [R01]')
item[3] = QTableWidgetItem('격납건물 압력 < [P11]')
item[4] = QTableWidgetItem('격납건물 수소농도 < [H02]')
for i in range(5):
item[i].setFlags(Qt.NoItemFlags)
item[i].setForeground(QBrush(QColor(0, 0, 0)))
self.setItem(i, 0, item[i])
self.item2[0] = QTableWidgetItem('현재 발전소 변수')
self.item2[1] = QTableWidgetItem('0 °C')
self.item2[2] = QTableWidgetItem('0 mSv')
self.item2[3] = QTableWidgetItem('0 psig')
self.item2[4] = QTableWidgetItem('0 %')
for i in range(5):
self.setItem(i, 1, self.item2[i])
self.doubleClicked.connect(self.popup)
# 테이블 셀 내용 가운데 정렬
delegate = AlignDelegate()
self.setItemDelegateForColumn(0, delegate)
self.setItemDelegateForColumn(1, delegate)
fnt = self.font()
fnt.setPointSize(14)
fnt.setBold(True)
item[0].setFont(fnt)
self.item2[0].setFont(fnt)
""" QTimer interval 간격으로 item 디스플레이 업데이트 21.09.16 """# value 받아옴 - 21.10.03 소진
# Q Timer ------------------------------------------------------------------------------------------------------
timer = QTimer(self)
timer.setInterval(500) # 500 ms run = 0.5 sec
timer.timeout.connect(self.local_loop)
timer.start()
def popup(self):
r = self.currentItem().row()
r_db = {
0: {'Para':'', 'N':'', 'X':'Time[Min]', 'Y':'', 'Yr': [0, 1]},
1: {'Para':'', 'N':'', 'X':'Time[Min]', 'Y':'', 'Yr': [0, 1]},
2: {'Para':'PCTMT', 'N':'CTMT Pressure', 'X':'Time[Min]', 'Y':'PA', 'Yr': [0, 30000]},
3: {'Para':'', 'N':'', 'X':'Time[Min]', 'Y':'', 'Yr': [0, 1]},
}
get_selected_para = r_db[r]['Para']
if get_selected_para != '':
self.popup_W = Trend(self, w=500, h=500,
para_name=r_db[r]['N'], para_id=r_db[r]['Para'],
para_range=r_db[r]['Yr'],
xtitle=r_db[r]['X'],
ytitle=r_db[r]['Y'])
self.popup_W.show()
def local_loop(self):
if self.parent is not None:
get_db = self.shmem.get_shmem_db()
"""
get_db 의 구조는 딕셔너리로 아래와 같음.
get_db = {
'para_id': {'Sig': sig, 'Val': 0, 'Num': idx, 'List': deque(maxlen=max_len)},
'para_id': {'Sig': sig, 'Val': 0, 'Num': idx, 'List': deque(maxlen=max_len)},
...
}
"""
self.item2[1].setText(f'{get_db["UUPPPL"]["Val"]:.2f} °C')
self.item2[2].setText(f'{get_db["DCTMT"]["Val"]:.2f} mSv')
self.item2[3].setText(f'{get_db["PCTMT"]["Val"]:.2f} psig')
self.item2[4].setText(f'{get_db["H2CONC"]["Val"]:.2f} %')
Flag.value2_1 = str(get_db["UUPPPL"]['Val'])
Flag.value2_2 = str(get_db["DCTMT"]['Val'])
Flag.value2_3 = str(get_db["PCTMT"]['Val'])
Flag.value2_4 = str(get_db["H2CONC"]['Val'])
class AlignDelegate(QStyledItemDelegate):
def initStyleOption(self, option, index):
super(AlignDelegate, self).initStyleOption(option, index)
option.displayAlignment = Qt.AlignCenter
if __name__ == '__main__':
app = QApplication(sys.argv)
window = MainRight()
font = QFontDatabase()
font.addApplicationFont('./맑은 고딕.ttf')
app.setFont(QFont('맑은 고딕'))
window.show()
app.exec_()
```
#### File: Jonghyun-Kim-73/SAMG_Project/Mitigation_Middle.py
```python
import os
import sys
import pandas as pd
from datetime import datetime
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
#from PyQt5.QtChart import *
# from button2 import custom_button
ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
class MitigationMiddleArea(QWidget):
""" 중간 디스플레이 위젯 """
qss = """
QWidget {
background: rgb(10, 10, 10);
}
QLabel {
background: rgb(0, 0, 0);
border-radius: 6px;
color: rgb(255, 255, 255);
}
QTableWidget {
background: rgb(131, 131, 131)
}
"""
def __init__(self, parent=None):
super(MitigationMiddleArea, self).__init__()
self.setAttribute(Qt.WA_StyledBackground, True)
self.parent = parent
self.setStyleSheet(self.qss)
# 크기 조정
self.setMinimumHeight(860)
self.setMinimumWidth(int(1900/2))
# 레이어 셋업
layout = QVBoxLayout(self)
layout.setContentsMargins(5, 5, 5, 5)
label1 = M_MiddleArea()
layout.addWidget(label1)
self.setLayout(layout)
class M_MiddleArea(QWidget):
qss = """
QWidget {
background: rgb(255, 255, 255);
border-radius: 6px;
}
"""
def __init__(self):
super(M_MiddleArea, self).__init__()
self.setAttribute(Qt.WA_StyledBackground, True)
self.setStyleSheet(self.qss)
scroll = QScrollArea()
scroll.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
condition = ConditionArea()
scroll.setWidget(condition)
layout = QVBoxLayout()
layout.addWidget(scroll)
self.setLayout(layout)
class ConditionArea(QWidget):
qss = """
QWidget {
background: rgb(255, 255, 255);
border-radius: 6px;
}
"""
def __init__(self):
super(ConditionArea, self).__init__()
self.setGeometry(0, 0, int(1900/2), 2000)
self.setStyleSheet(self.qss)
purpose = QLabel('● 목적\n'
' - Reactor Coolant System (RCS) 열 제거\n'
' - RCS를 감압하여 RCS 내로 냉각재 공급을 가능하게 함\n'
' - 증기발생기 튜브의 크립 파손 방지\n'
' - 증기발생기로 방출된 핵분열생성물의 세정(Scrubbing)\n'
' - 증기발생기 튜브 파손시 파손부를 통하여 RCS 내에 냉각재 공급\n'
'\n'
'\n'
'● 수행 조건\n'
' - 발전소 상태가 A 또는 B이고 어느 하나라도 증기발생기 수위가 협역수위 지시기로 68% 이하일 때\n'
' - 발전소 상태가 C이고 핵분열생성물의 방출이 있는 증기발생기 수위가 협역수위 지시기로68% 이하일 때'
'\n'
'\n'
'\n'
'● 예상 발전소 거동\n'
' - 증기발생기 수위 상승\n'
' - 증기발생기 증기 유량 증가\n'
' - 노심 출구와 고온관의 온도 감소\n'
' - 증기발생기 튜브 상부에 수소 축적\n'
'\n'
'\n'
'● 비상운전절차서와의 관계\n'
' - 상충성: 비상운전절차서에는 급수가 있는 증기발생기가 있을 경우에 급수가 없는 증기발생기에는\n'
' 급수를 주입하지 않고 있으나,중대사고관리지침서에서는 증기발생기 튜브의 크립 파손을 방지하기 위하여\n'
' 급수가 없는 증기발생기에도 급수 주입을 고려한다.\n'
' - 일치성: 이 전략은 기능회복절차서 회복-06, “노심 및 RCS 열 제거”와 일치한다.\n'
' 증기발생기의 급속한 감압으로 발생할 수 있는 증기발생기에서의 불필요한 열응력 증가를 피하기 위하여\n'
' 세심한 주의가 필요하며 정상적인 증기발생기 운전 절차도 이 지침에 적용될 수 있다.'
'\n'
'\n'
'● 계산보조도구\n'
' - 계산표-01, 장기 붕괴열 제거를 위한 냉각수 주입률'
)
purpose.setMinimumHeight(30)
purpose.setStyleSheet("Color : black; font-size: 14pt; font-weight: bold")
condition = QLabel('\n')
condition.setFixedHeight(1300)
condition.setStyleSheet("Color : black; font-size: 14pt; font-weight: bold")
# 레이어 셋업
layout = QVBoxLayout(self)
layout.setContentsMargins(5, 5, 5, 5)
layout.addWidget(purpose)
layout.addWidget(condition)
self.setLayout(layout)
if __name__ == '__main__':
print('test')
app = QApplication(sys.argv)
window = MitigationMiddleArea()
window.show()
app.exec_()
```
#### File: Jonghyun-Kim-73/SAMG_Project/Mitigation_top.py
```python
import os
import sys
from datetime import datetime
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
class MitigationTopArea(QWidget,QObject):
signal = pyqtSignal(name="s2")
# signal2 = pyqtSignal(int)
# signal3 = pyqtSignal(int)
# signal4 = pyqtSignal(int)
# signal5 = pyqtSignal(int)
# signal6 = pyqtSignal(int)
# signal7 = pyqtSignal(int)
# signal8 = pyqtSignal(int)
""" 왼쪽 디스플레이 위젯 """
qss = """
QWidget {
background: rgb(128, 128, 128);
border: 2px solid rgb(0, 0, 0);
color:rgb(0,0,0);
}
QPushButton {
color: rgb(0,0,0);
background: rgb(221, 221, 221);
font: bold 14px;
padding: 4px 4px;
}
"""
def __init__(self, parent=None):
super(MitigationTopArea, self).__init__()
self.setAttribute(Qt.WA_StyledBackground, True)
self.parent = parent
self.setStyleSheet(self.qss)
# 크기 조정
self.setMinimumHeight(70)
# self.setFixedWidth(400)
# 레이어 셋업
layout = QHBoxLayout(self)
layout.setContentsMargins(5, 5, 5, 5)
self.change_color = False
self.btn_top_1 = QPushButton('Ⅰ.목적, 수행 조건 등')
self.btn_top_1.setMaximumHeight(60)
self.btn_top_1.setStyleSheet("color : black; font-size: 16pt; font-weight: bold")
# self.btn_top_1.clicked.connect(self.click1)
self.btn_top_2 = QPushButton('Ⅱ.이용가능수단확인')
self.btn_top_2.setMaximumHeight(60)
self.btn_top_2.setStyleSheet("Color : black; font-size: 16pt; font-weight: bold")
# self.btn_top_2.clicked.connect(self.start)
# self.btn_top_2.clicked.connect(self.click2)
self.btn_top_3 = QPushButton('Ⅲ.전략수행여부결정')
self.btn_top_3.setMaximumHeight(60)
self.btn_top_3.setStyleSheet("Color : black; font-size: 16pt; font-weight: bold")
# self.btn_top_3.clicked.connect(self.send(1))
self.btn_top_4 = QPushButton('Ⅳ.전략수행방법결정')
self.btn_top_4.setMaximumHeight(60)
self.btn_top_4.setStyleSheet("Color : black; font-size: 16pt; font-weight: bold")
# self.btn_top_4.clicked.connect(self.signal.emit)
self.btn_top_5 = QPushButton('Ⅴ.전략수행')
self.btn_top_5.setMaximumHeight(60)
self.btn_top_5.setStyleSheet("Color : black; font-size: 16pt; font-weight: bold")
# self.btn_top_5.clicked.connect(self.signal.emit)
self.btn_top_6 = QPushButton('Ⅵ.전략종결')
self.btn_top_6.setMaximumHeight(60)
self.btn_top_6.setStyleSheet("Color : black; font-size: 16pt; font-weight: bold")
# self.btn_top_6.clicked.connect(self.signal.emit)
self.btn_top_7 = QPushButton('제어-01로 이동')
self.btn_top_7.setMaximumHeight(60)
self.btn_top_7.setStyleSheet("Color : black; font-size: 16pt; font-weight: bold")
# self.btn_top_7.clicked.connect(self.signal.emit)
self.btn_top_8 = QPushButton('기능-기반 디스플레이')
self.btn_top_8.setMaximumHeight(60)
self.btn_top_8.setStyleSheet("Color : black; font-size: 16pt; font-weight: bold")
# self.btn_top_8.clicked.connect(self.signal.emit)
layout.addWidget(self.btn_top_1)
layout.addWidget(self.btn_top_2)
layout.addWidget(self.btn_top_3)
layout.addWidget(self.btn_top_4)
layout.addWidget(self.btn_top_5)
layout.addWidget(self.btn_top_6)
layout.addWidget(self.btn_top_7)
layout.addWidget(self.btn_top_8)
self.setLayout(layout)
def click1(self):
# if self.change_color:
# self.change_color = False
# self.btn_top_1.setStyleSheet("QPushButton {color: rgb(0,0,0);background: rgb(221, 221, 221);font-size: 16pt; font-weight: bold}")
# else:
# self.change_color = True
# self.btn_top_1.setStyleSheet("QPushButton {color: rgb(0,0,0);background: rgb(0, 176, 218);font-size: 16pt; font-weight: bold}")
print("1클릭")
def click2(self):
if self.change_color:
self.change_color = False
self.btn_top_2.setStyleSheet(
"QPushButton {color: rgb(0,0,0);background: rgb(221, 221, 221);font-size: 16pt; font-weight: bold}")
else:
self.change_color = True
self.btn_top_2.setStyleSheet(
"QPushButton {color: rgb(0,0,0);background: rgb(0, 176, 218);font-size: 16pt; font-weight: bold}")
print("2클릭")
#
# @pyqtSlot()
# def start(self):
# def count(self, num):
# return num
#
# def run(self):
# self.signal.emit()
if __name__ == '__main__':
print('test')
app = QApplication(sys.argv)
window = MitigationTopArea()
window.show()
font = QFontDatabase()
font.addApplicationFont('./맑은 고딕.ttf')
app.setFont(QFont('맑은 고딕'))
app.exec_()
```
#### File: Jonghyun-Kim-73/SAMG_Project/Table_3_6.py
```python
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
class table_3_6(QWidget):
qss = """
QWidget {
background: rgb(221, 221, 221);
border : 0px solid;
}
QPushButton{
background-color: rgb(221,221,221);
border: 1px solid rgb(0,0,0);
font-size: 14pt;
font-weight: bold
}
QCheckBox::indicator {
width: 38px;
height: 60px;
}
QCheckBox::indicator::unchecked {
width: 38px;
height: 60px;
border : 0px solid;
}
QCheckBox::indicator::checked {
image : url(./check.png);
height:30px;
width:38px;
}
QTextEdit{
font-size: 18pt;
Color : black;
border : 0px solid
}
QTextEdit#button{
font-size: 12pt;
font-weight:bold;
Color : black;
border : 0px solid
}
QTableView {
gridline-color : black;
}
QHeaderView::section {
background: black;
}
"""
def __init__(self, parent=None):
super(table_3_6, self).__init__()
self.setAttribute(Qt.WA_StyledBackground, True)
self.setContentsMargins(0, 0, 0, 0)
self.setStyleSheet(self.qss)
# 기본 속성
layout = QVBoxLayout(self)
label = QTextEdit("5. 증기발생기 급수 주입 실시 여부를 결정한다.")
label.setStyleSheet("font-size: 18pt;font-weight: bold")
label.setContentsMargins(10, 10, 10, 20)
label.setDisabled(True)
label.setFixedHeight(80) # QTextEdit 때문에 설정해줘야함 (addStretch 안먹음)
label1 = QTextEdit("가. 증기발생기 급수 주입을 실시하지 않았을 때의 결과를 평가한다.")
label1.setStyleSheet("font-size: 18pt;font-weight: bold")
label1.setContentsMargins(10, 10, 10, 20)
label1.setDisabled(True)
label1.setFixedHeight(80) # QTextEdit 때문에 설정해줘야함 (addStretch 안먹음)
label2 = QTextEdit("<p style=\"line-height:130%\">나. 증기발생기 급수 주입을 실시하지 않았을 때 결과와 증기발생기 급수<p>"
"<p style=\"line-height:130%\">주입을 실시하였을 떄의 부정적 영향을 비교한다.<p>")
label2.setStyleSheet("font-size: 18pt;font-weight: bold")
label2.setContentsMargins(10, 10, 10, 20)
label2.setDisabled(True)
label2.setFixedHeight(160) # QTextEdit 때문에 설정해줘야함 (addStretch 안먹음)
label3 = QTextEdit("<p style=\"line-height:130%\">다. 증기발생기 급수 주입을 실시하지 않기로 결정되었다면 전략수행<p>"
"<p style=\"line-height:130%\">제어도 또는 이 전략 수행 직전에 주행중이든 전략으로 되돌아간다.<p>")
label3.setStyleSheet("font-size: 18pt;font-weight: bold")
label3.setContentsMargins(10, 10, 10, 20)
label3.setDisabled(True)
label3.setFixedHeight(160) # QTextEdit 때문에 설정해줘야함 (addStretch 안먹음)
self.setLayout(layout)
para_table = ParaTable(self)
layout.addWidget(label)
layout.addWidget(label1)
layout.addWidget(para_table)
layout.addWidget(label2)
layout.addWidget(label3)
layout.addStretch(1)
class ParaTable(QTableWidget):
def __init__(self, parent):
super(ParaTable, self).__init__(parent=parent)
self.setAttribute(Qt.WA_StyledBackground, True)
self.horizontalHeader().setFixedHeight(1)
self.verticalHeader().setFixedWidth(1)
self.setContentsMargins(0, 0, 0, 0)
self.setFixedHeight(200)
self.setColumnCount(2)
self.setRowCount(4)
# 편집 불가
self.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.setFocusPolicy(Qt.NoFocus)
self.setSelectionMode(QAbstractItemView.NoSelection)
# 테이블 행 너비 조절
self.setColumnWidth(0, 798)
self.setColumnWidth(1, 38)
for i in range(0, 5):
self.setRowHeight(i, 40)
self.setItem(0, 0, QTableWidgetItem(" 증기발생기가 RCS의 열제거원 역할을 할 수 없음"))
self.setItem(1, 0, QTableWidgetItem(" 증기발생기 튜브의 건전성이 위협받을 수 있음"))
self.setItem(2, 0, QTableWidgetItem(" RCS를 감압하는 데 증기발생기를 사용할 수 없음"))
self.setItem(3, 0, QTableWidgetItem(" 증기발생기 튜브 파손부로 부터 누출된 핵분열 생성물을 세정할 수 없음"))
# 체크박스
for i in range(0, self.rowCount()):
self.checkbox = QCheckBox(self)
self.setCellWidget(i, 1, self.checkbox)
fnt = self.font()
fnt.setBold(True)
fnt.setPointSize(12)
self.setFont(fnt)
class AlignDelegate(QStyledItemDelegate):
def initStyleOption(self, option, index):
super(AlignDelegate, self).initStyleOption(option, index)
option.displayAlignment = Qt.AlignCenter
if __name__ == '__main__':
app = QApplication(sys.argv)
app.setStyle("fusion")
window = table_3_6()
window.show()
font = QFontDatabase()
font.addApplicationFont('./맑은 고딕.ttf')
app.setFont(QFont('맑은 고딕'))
app.exec_()
```
#### File: Jonghyun-Kim-73/SAMG_Project/Table_5_5.py
```python
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
class table_5_5(QWidget):
""" 중간 디스플레이 위젯 """
qss = """
QWidget {
background: rgb(221, 221, 221);
border:0px solid;
}
QPushButton{
background-color: rgb(221,221,221);
border: 1px solid rgb(0,0,0);
font-size: 14pt;
font-weight: bold
}
QTableView {
gridline-color : black;
}
QHeaderView::section {
background: black;
}
QTextEdit{
font-size: 16pt;
Color : black;
border : 0px solid
}
"""
def __init__(self, parent=None):
super(table_5_5, self).__init__()
self.setAttribute(Qt.WA_StyledBackground, True)
self.setStyleSheet(self.qss)
self.setContentsMargins(0, 0, 0, 0)
# 기본 속성
layout = QVBoxLayout(self)
label = QTextEdit("11. 추가적인 증기발생기 급수 주입이 필요한지를 결정한다.")
label.setStyleSheet("font-size: 18pt;font-weight: bold")
label.setContentsMargins(10, 10, 10, 20)
label.setDisabled(True)
label.setFixedHeight(40) # QTextEdit 때문에 설정해줘야함 (addStretch 안먹음)
label1 = QTextEdit("<p style=\"line-height:130%\">가. 현재의 급수 주입율이 적절한가 평가한다.<p>"
"<p style=\"line-height:130%\"> 1) 계산표-05, “장기붕괴열 제거를 위한 냉각재 주입율” 참조<p>"
"<p style=\"line-height:130%\"> 2) 발전소 반응 감시<p>"
"<p style=\"line-height:130%\"> • 증기발생기 수위 점검 – 안정적이거나 증가함.<p>"
"<p style=\"line-height:130%\"> • 증기발생기 압력 점검 – 안정적이거나 증기발생기로 급수하는 펌프의<p>"
"<p style=\"line-height:130%\"> 체결수두 이하임.<p>")
label1.setStyleSheet("font-size: 14pt;font-weight: bold")
label1.setContentsMargins(10, 10, 10, 20)
label1.setDisabled(True)
label1.setFixedHeight(300) # QTextEdit 때문에 설정해줘야함 (addStretch 안먹음)
label2 = QTextEdit("나. 현재의 급수주입 경로가 적절하지 않고 추가적인 급수 주입 경로가 있다면\n단계 3으로 돌아간다.")
label2.setStyleSheet("font-size: 14pt;font-weight: bold")
label2.setContentsMargins(10, 10, 10, 20)
label2.setDisabled(True)
label2.setFixedHeight(80) # QTextEdit 때문에 설정해줘야함 (addStretch 안먹음)
self.setLayout(layout)
para_table = ParaTable(self)
layout.addWidget(label)
layout.addWidget(label1)
layout.addWidget(para_table)
layout.addWidget(label2)
layout.addStretch()
class ParaTable(QTableWidget):
def __init__(self, parent):
super(ParaTable, self).__init__(parent=parent)
self.setAttribute(Qt.WA_StyledBackground, True)
self.horizontalHeader().setFixedHeight(1)
self.verticalHeader().setFixedWidth(1)
self.setContentsMargins(0, 0, 0, 0)
self.setFixedHeight(300)
self.setColumnCount(2)
self.setRowCount(6)
# 편집 불가
self.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.setFocusPolicy(Qt.NoFocus)
self.setSelectionMode(QAbstractItemView.NoSelection)
# 테이블 행 너비 조절
self.setColumnWidth(0, 150)
self.setColumnWidth(1, 686)
for i in range(0, 6):
self.setRowHeight(i, 40)
self.setSpan(0, 0, 1, 2)
self.setItem(0, 0, QTableWidgetItem("증기발생기 현재 값"))
self.setItem(1, 0, QTableWidgetItem("증기발생기"))
self.setItem(1, 1, QTableWidgetItem("변수값"))
self.setItem(2, 0, QTableWidgetItem("증기발생기 1 수위"))
self.setItem(3, 0, QTableWidgetItem("증기발생기 2 수위"))
self.setItem(4, 0, QTableWidgetItem("증기발생기 1 압력"))
self.setItem(5, 0, QTableWidgetItem("증기발생기 2 압력"))
# 테이블 정렬
delegate = AlignDelegate()
self.setItemDelegate(delegate)
fnt = self.font()
fnt.setBold(True)
fnt.setPointSize(12)
self.setFont(fnt)
class AlignDelegate(QStyledItemDelegate):
def initStyleOption(self, option, index):
super(AlignDelegate, self).initStyleOption(option, index)
option.displayAlignment = Qt.AlignCenter
if __name__ == '__main__':
app = QApplication(sys.argv)
app.setStyle("fusion")
window = table_5_5()
window.show()
font = QFontDatabase()
font.addApplicationFont('./맑은 고딕.ttf')
app.setFont(QFont('맑은 고딕'))
app.exec_()
```
#### File: Jonghyun-Kim-73/SAMG_Project/Table_6_6.py
```python
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
class table_6_6(QWidget):
""" 중간 디스플레이 위젯 """
qss = """
QWidget {
background: rgb(221, 221, 221);
}
QTextEdit{
font-size: 18pt;
Color : black;
border : 0px solid;
}
QPushButton{
background-color: rgb(221,221,221);
border: 1px solid rgb(0,0,0);
font-size: 14pt;
font-weight: bold
}
QCheckBox {
margin-top:0px;
font-size:15px;
}
QCheckBox::indicator {
width: 40px;
height: 40px;
}
QCheckBox::indicator::unchecked {
width: 40px;
height: 40px;
border : 1px solid;
}
QCheckBox::indicator::checked {
image : url(./check.png);
height:40px;
width:40px;
}
"""
def __init__(self, parent=None):
super(table_6_6, self).__init__()
self.setAttribute(Qt.WA_StyledBackground, True)
self.setStyleSheet(self.qss)
self.setContentsMargins(0, 0, 0, 0)
# 기본 속성
layout = QVBoxLayout(self)
label = QTextEdit("12. 증기발생기 급수 주입으로 인한 장기관심사항을 확인한다.")
label.setStyleSheet("font-size: 18pt;font-weight: bold")
label.setContentsMargins(10, 10, 10, 20)
label.setDisabled(True)
label.setFixedHeight(40) # QTextEdit 때문에 설정해줘야함 (addStretch 안먹음)
layout.addWidget(label)
self.s1 = []
self.checkbox = []
self.s1.append(QTextEdit('라. 가능한 회복 조치를 평가한다.'))
self.s1.append(QTextEdit('1) 권고된 범위를 벗어난 변수들에 대한 가능성 회복조치를 파악한다.'))
self.s1.append(QTextEdit('2) 적절한 회복 조치를 선정한다.'))
for i in range(3):
self.s1[i].setDisabled(True)
self.s1[i].setFixedHeight(50) # QTextEdit 때문에 설정해줘야함 (addStretch 안먹음)
self.s1[0].setStyleSheet("font-size: 14pt;font-weight: bold")
self.layout1 = []
for i in range(3):
self.layout1.append(QHBoxLayout())
self.layout1[i].setContentsMargins(20, 5, 0, 25)
self.layout1[i].addWidget(self.s1[i])
if not i == 0:
self.layout1[i].addWidget(QCheckBox(self))
layout.addLayout(self.layout1[i])
self.layout1[1].setContentsMargins(20, 5, 0, 30)
layout.addStretch(1)
layout.setSizeConstraint(QLayout.SetMinimumSize)
self.setLayout(layout)
class AlignDelegate(QStyledItemDelegate):
def initStyleOption(self, option, index):
super(AlignDelegate, self).initStyleOption(option, index)
option.displayAlignment = Qt.AlignCenter
if __name__ == '__main__':
app = QApplication(sys.argv)
app.setStyle("fusion")
window = table_6_6()
window.show()
font = QFontDatabase()
font.addApplicationFont('./맑은 고딕.ttf')
app.setFont(QFont('맑은 고딕'))
app.exec_()
```
#### File: SAMG_Project/TOOL/TOOL_CNS_UDP_FAST.py
```python
import socket
import logging
from struct import unpack, pack
from time import sleep
from numpy import shape
from collections import deque
class CNS:
def __init__(self, threrad_name, CNS_IP, CNS_Port, Remote_IP, Remote_Port, Max_len=10):
# thread name
self.th_name = threrad_name
# Ip, Port
self.Remote_ip, self.Remote_port = Remote_IP, Remote_Port
self.CNS_ip, self.CNS_port = CNS_IP, CNS_Port
# Read Socket
self.resv_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.resv_sock.bind((self.Remote_ip, self.Remote_port))
# Send Socket
self.send_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# SIZE BUFFER
self.size_buffer_mem = 46008
# SEND TICK
self.want_tick = 5
# memory
self.max_len = Max_len
self.mem = self._make_mem_structure(max_len=self.max_len)
# logger path
self.LoggerPath = ''
self.file_name = 0
# Control
self.SaveControlPara = []
self.SaveControlVal = []
def _make_mem_initial(self):
for pid_ in self.mem.keys():
self.mem[pid_]['Val'] = 0
def _make_mem_structure(self, max_len):
# 초기 shared_mem의 구조를 선언한다.
idx = 0
shared_mem = {}
for file_name in ['./db.txt', './db_add.txt']:
with open(file_name, 'r') as f:
while True:
temp_ = f.readline().split('\t')
if temp_[0] == '': # if empty space -> break
break
if temp_[0] == '#': # Pass this value. We don't require this value.
pass # These values are normally static values in SMABRES Code.
else:
sig = 0 if temp_[1] == 'INTEGER' else 1
shared_mem[temp_[0]] = {'Sig': sig, 'Val': 0, 'Num': idx, 'List': deque(maxlen=max_len)}
idx += 1
# 다음과정을 통하여 shared_mem 은 PID : { type. val, num }를 가진다.
return shared_mem
def _update_mem(self):
data, _ = self.resv_sock.recvfrom(self.size_buffer_mem)
data = data[8:]
# print(len(data)) data의 8바이트를 제외한 나머지 버퍼의 크기
for i in range(0, len(data), 20):
sig = unpack('h', data[16 + i: 18 + i])[0]
para = '12sihh' if sig == 0 else '12sfhh'
pid, val, sig, idx = unpack(para, data[i:20 + i])
pid = pid.decode().rstrip('\x00') # remove '\x00'
if pid != '':
self.mem[pid]['Val'] = val
# Alarm reprocessing
self._AlarmReprocessing()
def _append_val_to_list(self):
[self.mem[pid]['List'].append(self.mem[pid]['Val']) for pid in self.mem.keys()]
return 0
# -------
def _send_control_initial(self):
self.SaveControlPara = []
self.SaveControlVal = []
def _send_control_signal(self, para, val):
'''
조작 필요없음
:param para:
:param val:
:return:
'''
for i in range(shape(para)[0]):
self.mem[para[i]]['Val'] = val[i]
UDP_header = b'\x00\x00\x00\x10\xa8\x0f'
buffer = b'\x00' * 4008
temp_data = b''
# make temp_data to send CNS #
for i in range(shape(para)[0]):
pid_temp = b'\x00' * 12
pid_temp = bytes(para[i], 'ascii') + pid_temp[len(para[i]):] # pid + \x00 ..
para_sw = '12sihh' if self.mem[para[i]]['Sig'] == 0 else '12sfhh'
# 만약 para가 CNS DB에 포함되지 않은 Custom para이면 Pass
if para[i][0] != 'c':
temp_data += pack(para_sw,
pid_temp,
self.mem[para[i]]['Val'],
self.mem[para[i]]['Sig'],
self.mem[para[i]]['Num'])
buffer = UDP_header + pack('h', shape(para)[0]) + temp_data + buffer[len(temp_data):]
self.send_sock.sendto(buffer, (self.CNS_ip, self.CNS_port))
def _send_control_save(self, para, val):
"""
para와 val을 받아서 save
:param para: [a, b, c]
:param val: [1, 2, 3]
:return: -
"""
for _ in range(len(para)):
self.SaveControlPara.append(para[_])
self.SaveControlVal.append(val[_])
def _send_control_to_cns(self):
"""
Close send function
ex.
_send_control_save(['Para', 'Para'],[1, 1])
_send_control_to_cns()
:return: 0 or 1
"""
if self.SaveControlPara != []:
self._send_control_signal(self.SaveControlPara, self.SaveControlVal)
self._send_control_initial()
return 0 # Send function Success
else:
return 1 # Send function Fail due to no value in self.SaveControlPara
def _send_malfunction_signal(self, Mal_nub, Mal_opt, Mal_time):
'''
CNS_04_18.tar 버전에서 동작함.
:param Mal_nub: Malfunction 번호
:param Mal_opt: Malfunction operation
:param Mal_time: Malfunction의 동작하는 시간
:return:
'''
if Mal_time == 0:
Mal_time = 5
else:
Mal_time = Mal_time ## 1초 * 5Tick 고려해서 넣을 것
return self._send_control_signal(['KFZRUN', 'KSWO280', 'KSWO279', 'KSWO278'],
[10, Mal_nub, Mal_opt, Mal_time])
def _AlarmReprocessing(self):
"""
현재 파라 메터가 알람관련 변수라면 입력된 값을 Overwrite
# ----------- Left panel : KLARML(3,II) -----------------------------------------------------------------------
#
# ---------------------------------------------------
# | II = 1 | 9 | 17 | 25 | 33 | 41 |
# ---------------------------------------------------
# | 2 | 10 | 18 | 26 | 34 | 42|
# ---------------------------------------------------
# | 3 | 11 | 19 | 27 | 35 | 43 |
# ---------------------------------------------------
# | 4 | 12 | 20 | 28 | 36 | 44 |
# ---------------------------------------------------
# | 5 | 13 | 21 | 29 | 37 | 45 |
# ---------------------------------------------------
# | 6 | 14 | 22 | 30 | 38 | 46 |
# ---------------------------------------------------
# | 7 | 15 | 23 | 31 | 39 | 47 |
# ---------------------------------------------------
# | 8 | 16 | 24 | 32 | 40 | 48 |
# ---------------------------------------------------
#
# ==============================================================================================================
# ----------- Right panel : KLARMR(3,IJ)
#
# -----------------------------------------------------------------
# | IJ=1 | 7 | 13 | 18 | 21 | 26 | 32 | 38 |
# -----------------------------------------------------------------
# | 2 | 8 | 14 | 19 | 22 | 27 | 33 | 39 |
# -----------------------------------------------------------------
# | 3 | 9 | 15 | 20 | | 28 | 34 | 40 |
# -----------------------------------------------------------------
# | 4 | 10 | 16 | | 23 | 29 | 35 | 41 |
# -----------------------------------------------------------------
# | 5 | 11 | | | 24 | 30 | 36 | |
# -----------------------------------------------------------------
# | 6 | 12 | 17 | | 25 | 31 | 37 | 42 |
# -----------------------------------------------------------------
#
# ==============================================================================================================
"""
#
# Left panel
#
# --------- L1 Intermediate range high flux rod stop(20% of FP)
if self.mem['XPIRM']['Val'] > self.mem['CIRFH']['Val']:
self.mem['KLAMPO251']['Val'] = 1
else:
self.mem['KLAMPO251']['Val'] = 0
# --------- L2 Power range overpower rod stop(103% of FP)
if self.mem['QPROREL']['Val'] > self.mem['CPRFH']['Val']:
self.mem['KLAMPO252']['Val'] = 1
else:
self.mem['KLAMPO252']['Val'] = 0
# --------- L3 Control bank D full rod withdrawl(220 steps)
if self.mem['KZBANK4']['Val'] > 220:
self.mem['KLAMPO253']['Val'] = 1
else:
self.mem['KLAMPO253']['Val'] = 0
# --------- L4 Control bank lo-lo limit
# ******* Insertion limit(Reference : KNU 5&6 PLS)
#
RDTEMP = (self.mem['UMAXDT']['Val']/self.mem['CDT100']['Val']) * 100.0
if RDTEMP >= 100.0: RDTEMP = 100.
if RDTEMP <= 0.0: RDTEMP = 0.
if True:
CRIL = {1: 1.818, 2: 1.824, 3: 1.818, 4: 208.0,
5: 93.0, 6: -22.0, 7: 12.0}
# Control A
KRIL1 = 228
# Control B
KRIL2 = int(CRIL[1] * RDTEMP + CRIL[4])
if KRIL2 >= 228: KRIL2 = 228
# Control C
KRIL3 = int(CRIL[2]*RDTEMP + CRIL[5])
if KRIL3 >= 228: KRIL3 = 228
# Control D
if RDTEMP >= CRIL[7]:
KRIL4 = int(CRIL[3] * RDTEMP + CRIL[6])
if KRIL4 >= 160: KRIL4 = 160
if KRIL4 <= 0: KRIL4 = 0
else:
KRIL4 = 0
if self.mem['KBNKSEL']['Val'] == 1:
KRILM = KRIL1
elif self.mem['KBNKSEL']['Val'] == 2:
KRILM = KRIL2
elif self.mem['KBNKSEL']['Val'] == 3:
KRILM = KRIL3
elif self.mem['KBNKSEL']['Val'] == 4:
KRILM = KRIL4
else:
KRILM = 0
if ((self.mem['KZBANK1']['Val'] < KRIL1) or (self.mem['KZBANK2']['Val'] < KRIL2)
or (self.mem['KZBANK3']['Val'] < KRIL3) or (self.mem['KZBANK4']['Val'] < KRIL4)):
self.mem['KLAMPO254']['Val'] = 1
else:
self.mem['KLAMPO254']['Val'] = 0
# --------- L5 Two or more rod at bottom(ref:A-II-8 p.113 & KAERI87-39)
IROD = 0
for _ in range(1, 53):
if self.mem[f'KZROD{_}']['Val'] < 0.0:
IROD += 1
if IROD > 2:
self.mem['KLAMPO255']['Val'] = 1
else:
self.mem['KLAMPO255']['Val'] = 0
# --------- L6 Axial power distribution limit(3% ~ -12%)
if (self.mem['CAXOFF']['Val'] >= self.mem['CAXOFDL']['Val']) or \
(self.mem['CAXOFF']['Val'] <= (self.mem['CAXOFDL']['Val'] - 0.75)):
self.mem['KLAMPO256']['Val'] = 1
else:
self.mem['KLAMPO256']['Val'] = 0
# --------- L7 CCWS outlet temp hi(49.0 deg C)
if self.mem['UCCWIN']['Val'] >= self.mem['CUCCWH']['Val']:
self.mem['KLAMPO257']['Val'] = 1
else:
self.mem['KLAMPO257']['Val'] = 0
# --------- L8 Instrument air press lo(6.3 kg/cm2)
if self.mem['PINSTA']['Val'] <= (self.mem['CINSTP']['Val'] - 1.5):
self.mem['KLAMPO258']['Val'] = 1
else:
self.mem['KLAMPO258']['Val'] = 0
# --------- L9 RWST level lo-lo(5%)
if self.mem['ZRWST']['Val'] <= self.mem['CZRWSLL']['Val']:
self.mem['KLAMPO259']['Val'] = 1
else:
self.mem['KLAMPO259']['Val'] = 0
# --------- L10 L/D HX outlet flow lo(15 m3/hr)
if self.mem['WNETLD']['Val'] < self.mem['CWLHXL']['Val']:
self.mem['KLAMPO260']['Val'] = 1
else:
self.mem['KLAMPO260']['Val'] = 0
# --------- L11 L/D HX outlet temp hi(58 deg C)
if self.mem['UNRHXUT']['Val'] > self.mem['CULDHX']['Val']:
self.mem['KLAMPO261']['Val'] = 1
else:
self.mem['KLAMPO261']['Val'] = 0
# --------- L12 RHX L/D outlet temp hi(202 deg C)
if self.mem['URHXUT']['Val'] > self.mem['CURHX']['Val']:
self.mem['KLAMPO262']['Val'] = 1
else:
self.mem['KLAMPO262']['Val'] = 0
# --------- L13 VCT level lo(20 %)
if self.mem['ZVCT']['Val'] < self.mem['CZVCT2']['Val']:
self.mem['KLAMPO263']['Val'] = 1
else:
self.mem['KLAMPO263']['Val'] = 0
# --------- L14 VCT press lo(0.7 kg/cm2)
if self.mem['PVCT']['Val'] < self.mem['CPVCTL']['Val']:
self.mem['KLAMPO264']['Val'] = 1
else:
self.mem['KLAMPO264']['Val'] = 0
# --------- L15 RCP seal inj wtr flow lo(1.4 m3/hr)
if (self.mem['WRCPSI1']['Val'] < self.mem['CWRCPS']['Val'] or
self.mem['WRCPSI2']['Val'] < self.mem['CWRCPS']['Val'] or
self.mem['WRCPSI2']['Val'] < self.mem['CWRCPS']['Val']):
self.mem['KLAMPO265']['Val'] = 1
else:
self.mem['KLAMPO265']['Val'] = 0
# --------- L16 Charging flow cont flow lo(5 m3/hr)
if self.mem['WCHGNO']['Val'] < self.mem['CWCHGL']['Val']:
self.mem['KLAMPO266']['Val'] = 1
else:
self.mem['KLAMPO266']['Val'] = 0
# --------- R17 Not used
self.mem['KLAMPO267']['Val'] = 0
# --------- L18 L/D HX outlet flow hi (30 m3/hr)
if self.mem['WNETLD']['Val'] > self.mem['CWLHXH']['Val']:
self.mem['KLAMPO268']['Val'] = 1
else:
self.mem['KLAMPO268']['Val'] = 0
# --------- L19 PRZ press lo SI
CSAFEI = {1: 124.e5, 2: 40.3e5}
if (self.mem['PPRZN']['Val'] < CSAFEI[1]) and (self.mem['KSAFEI']['Val'] == 1):
self.mem['KLAMPO269']['Val'] = 1
else:
self.mem['KLAMPO269']['Val'] = 0
# --------- L20 CTMT spray actuated
if self.mem['KCTMTSP']['Val'] == 1:
self.mem['KLAMPO270']['Val'] = 1
else:
self.mem['KLAMPO270']['Val'] = 0
# --------- L21 VCT level hi(80 %)
if self.mem['ZVCT']['Val'] > self.mem['CZVCT6']['Val']:
self.mem['KLAMPO271']['Val'] = 1
else:
self.mem['KLAMPO271']['Val'] = 0
# --------- L22 VCT press hi (4.5 kg/cm2)
if self.mem['PVCT']['Val'] > self.mem['CPVCTH']['Val']:
self.mem['KLAMPO272']['Val'] = 1
else:
self.mem['KLAMPO272']['Val'] = 0
# --------- L23 CTMT phase B iso actuated
if self.mem['KCISOB']['Val'] == 1:
self.mem['KLAMPO273']['Val'] = 1
else:
self.mem['KLAMPO273']['Val'] = 0
# --------- L24 Charging flow cont flow hi(27 m3/hr)
if self.mem['WCHGNO']['Val'] > self.mem['CWCHGH']['Val']:
self.mem['KLAMPO274']['Val'] = 1
else:
self.mem['KLAMPO274']['Val'] = 0
# ---------
# --------- R43 Not used
self.mem['KLAMPO293']['Val'] = 0
# --------- R44 Not used
self.mem['KLAMPO294']['Val'] = 0
# --------- L45 CTMT sump level hi
CZSMPH = {1: 2.492, 2: 2.9238}
if self.mem['ZSUMP']['Val'] > CZSMPH[1]:
self.mem['KLAMPO295']['Val'] = 1
else:
self.mem['KLAMPO295']['Val'] = 0
# --------- L46 CTMT sump level hi-hi
if self.mem['ZSUMP']['Val'] > CZSMPH[2]:
self.mem['KLAMPO296']['Val'] = 1
else:
self.mem['KLAMPO296']['Val'] = 0
# --------- L47 CTMT air temp hi(48.89 deg C)
if self.mem['UCTMT']['Val'] > self.mem['CUCTMT']['Val']:
self.mem['KLAMPO297']['Val'] = 1
else:
self.mem['KLAMPO297']['Val'] = 0
# --------- L48 CTMT moisture hi(70% of R.H.)
if self.mem['HUCTMT']['Val'] > self.mem['CHCTMT']['Val']:
self.mem['KLAMPO298']['Val'] = 1
else:
self.mem['KLAMPO298']['Val'] = 0
#
# Right panel
#
# --------- R1 Rad hi alarm
if (self.mem['DCTMT']['Val'] > self.mem['CRADHI']['Val']) or \
(self.mem['DSECON']['Val'] >= 3.9E-3):
self.mem['KLAMPO301']['Val'] = 1
else:
self.mem['KLAMPO301']['Val'] = 0
# --------- R2 CTMT press hi 1 alert
CPCMTH = {1: 0.3515, 2: 1.02, 3: 1.62}
if self.mem['PCTMT']['Val'] * self.mem['PAKGCM']['Val'] > CPCMTH[1]:
self.mem['KLAMPO302']['Val'] = 1
else:
self.mem['KLAMPO302']['Val'] = 0
# --------- R3 CTMT press hi 2 alert
if self.mem['PCTMT']['Val'] * self.mem['PAKGCM']['Val'] > CPCMTH[2]:
self.mem['KLAMPO303']['Val'] = 1
else:
self.mem['KLAMPO303']['Val'] = 0
# --------- R4 CTMT press hi 3 alert
if self.mem['PCTMT']['Val'] * self.mem['PAKGCM']['Val'] > CPCMTH[3]:
self.mem['KLAMPO304']['Val'] = 1
else:
self.mem['KLAMPO304']['Val'] = 0
# --------- R5 Accum. Tk press lo (43.4 kg/cm2)
if self.mem['PACCTK']['Val'] < self.mem['CPACCL']['Val']:
self.mem['KLAMPO305']['Val'] = 1
else:
self.mem['KLAMPO305']['Val'] = 0
# --------- R6 Accum. Tk press hi ( /43.4 kg/cm2)
if self.mem['PACCTK']['Val'] > self.mem['CPACCH']['Val']:
self.mem['KLAMPO306']['Val'] = 1
else:
self.mem['KLAMPO306']['Val'] = 0
# --------- R7 PRZ press hi alert(162.4 kg/cm2)
if self.mem['PPRZ']['Val'] > self.mem['CPPRZH']['Val']:
self.mem['KLAMPO307']['Val'] = 1
else:
self.mem['KLAMPO307']['Val'] = 0
# --------- R8 PRZ press lo alert(153.6 kg/cm2)
if self.mem['PPRZ']['Val'] < self.mem['CPPRZL']['Val']:
self.mem['KLAMPO308']['Val'] = 1
else:
self.mem['KLAMPO308']['Val'] = 0
# --------- R9 PRZ PORV opening(164.2 kg/cm2)
if self.mem['BPORV']['Val'] > 0.01:
self.mem['KLAMPO309']['Val'] = 1
else:
self.mem['KLAMPO309']['Val'] = 0
# --------- R10 PRZ cont level hi heater on(over 5%) !%deail....
DEPRZ = self.mem['ZINST63']['Val'] / 100
if (DEPRZ > (self.mem['ZPRZSP']['Val'] + self.mem['CZPRZH']['Val'])) and (self.mem['QPRZB']['Val'] > self.mem['CQPRZP']['Val']):
self.mem['KLAMPO310']['Val'] = 1
else:
self.mem['KLAMPO310']['Val'] = 0
# --------- R11 PRZ cont level lo heater off(17%) !%deail....
if (DEPRZ < self.mem['CZPRZL']['Val']) and (self.mem['QPRZ']['Val'] >= self.mem['CQPRZP']['Val']):
self.mem['KLAMPO311']['Val'] = 1
else:
self.mem['KLAMPO311']['Val'] = 0
# --------- R12 PRZ press lo back-up heater on(153.6 kg/cm2)
if (self.mem['PPRZN']['Val'] < self.mem['CQPRZB']['Val']) and (self.mem['KBHON']['Val'] == 1):
self.mem['KLAMPO312']['Val'] = 1
else:
self.mem['KLAMPO312']['Val'] = 0
# --------- R13 Tref/Auct. Tavg Deviation(1.67 deg C)
if ((self.mem['UAVLEGS']['Val'] - self.mem['UAVLEGM']['Val']) > self.mem['CUTDEV']['Val']) or\
((self.mem['UAVLEGM']['Val'] - self.mem['UAVLEGS']['Val']) > self.mem['CUTDEV']['Val']):
self.mem['KLAMPO313']['Val'] = 1
else:
self.mem['KLAMPO313']['Val'] = 0
# --------- R14 RCS 1,2,3 Tavg hi(312.78 deg C)
if self.mem['UAVLEGM']['Val'] > self.mem['CUTAVG']['Val']:
self.mem['KLAMPO314']['Val'] = 1
else:
self.mem['KLAMPO314']['Val'] = 0
# --------- R15 RCS 1,2,3 Tavg/auct Tavg hi/lo(1.1 deg C)
RUAVMX = max(self.mem['UAVLEG1']['Val'], self.mem['UAVLEG2']['Val'],
self.mem['UAVLEG3']['Val'])
RAVGT = {1: abs((self.mem['UAVLEG1']['Val']) - RUAVMX),
2: abs((self.mem['UAVLEG2']['Val']) - RUAVMX),
3: abs((self.mem['UAVLEG3']['Val']) - RUAVMX)}
if max(RAVGT[1], RAVGT[2], RAVGT[3]) > self.mem['CUAUCT']['Val']:
self.mem['KLAMPO315']['Val'] = 1
else:
self.mem['KLAMPO315']['Val'] = 0
# --------- R16 RCS 1,2,3 lo flow alert(92% from KAERI87-37)
CWSGRL = {1: 4232.0, 2: 0.0}
if ((self.mem['WSGRCP1']['Val'] < CWSGRL[1] and self.mem['KRCP1']['Val'] == 1) or
(self.mem['WSGRCP1']['Val'] < CWSGRL[1] and self.mem['KRCP1']['Val'] == 1) or
(self.mem['WSGRCP1']['Val'] < CWSGRL[1] and self.mem['KRCP1']['Val'] == 1)):
self.mem['KLAMPO316']['Val'] = 1
else:
self.mem['KLAMPO316']['Val'] = 0
# --------- R17 PRT temp hi(45deg C )
if self.mem['UPRT']['Val'] > self.mem['CUPRT']['Val']:
self.mem['KLAMPO317']['Val'] = 1
else:
self.mem['KLAMPO317']['Val'] = 0
# --------- R18 PRT press hi( 0.6kg/cm2)
if (self.mem['PPRT']['Val'] - 0.98E5) > self.mem['CPPRT']['Val']:
self.mem['KLAMPO318']['Val'] = 1
else:
self.mem['KLAMPO318']['Val'] = 0
# --------- R19 SG 1,2,3 level lo(25% of span)
if (self.mem['ZINST78']['Val']*0.01 < self.mem['CZSGW']['Val']) \
or (self.mem['ZINST77']['Val']*0.01 < self.mem['CZSGW']['Val']) \
or (self.mem['ZINST76']['Val']*0.01 < self.mem['CZSGW']['Val']):
self.mem['KLAMPO319']['Val'] = 1
else:
self.mem['KLAMPO319']['Val'] = 0
# --------- R20 SG 1,2,3 stm/FW flow deviation(10% of loop flow)
RSTFWD = {1: self.mem['WSTM1']['Val'] * 0.1,
2: self.mem['WSTM2']['Val'] * 0.1,
3: self.mem['WSTM3']['Val'] * 0.1}
if (((self.mem['WSTM1']['Val'] - self.mem['WFWLN1']['Val']) > RSTFWD[1]) or
((self.mem['WSTM2']['Val'] - self.mem['WFWLN2']['Val']) > RSTFWD[2]) or
((self.mem['WSTM3']['Val'] - self.mem['WFWLN3']['Val']) > RSTFWD[3])):
self.mem['KLAMPO320']['Val'] = 1
else:
self.mem['KLAMPO320']['Val'] = 0
# --------- R21 RCP 1,2,3 trip
if self.mem['KRCP1']['Val'] + self.mem['KRCP2']['Val'] + self.mem['KRCP3']['Val'] != 3:
self.mem['KLAMPO321']['Val'] = 1
else:
self.mem['KLAMPO321']['Val'] = 0
# --------- R22 Condensate stor Tk level lo
CZCTKL = {1: 8.55, 2: 7.57}
if self.mem['ZCNDTK']['Val'] < CZCTKL[1]:
self.mem['KLAMPO322']['Val'] = 1
else:
self.mem['KLAMPO322']['Val'] = 0
# --------- R23 Condensate stor Tk level lo-lo
if self.mem['ZCNDTK']['Val'] < CZCTKL[2]:
self.mem['KLAMPO323']['Val'] = 1
else:
self.mem['KLAMPO323']['Val'] = 0
# --------- R24 Condensate stor Tk level hi
if self.mem['ZCNDTK']['Val'] > self.mem['CZCTKH']['Val']:
self.mem['KLAMPO324']['Val'] = 1
else:
self.mem['KLAMPO324']['Val'] = 0
# --------- R25 MSIV tripped
if self.mem['BHV108']['Val'] == 0 or self.mem['BHV208']['Val'] == 0 or self.mem['BHV308']['Val'] == 0:
self.mem['KLAMPO325']['Val'] = 1
else:
self.mem['KLAMPO325']['Val'] = 0
# --------- R26 MSL press rate hi steam iso
if len(self.mem['KLAMPO325']['List']) >= 3:
PSGLP = {1: self.mem['PSG1']['List'][-2],
2: self.mem['PSG2']['List'][-2],
3: self.mem['PSG3']['List'][-2]}
RMSLPR = {1: abs((PSGLP[1] - self.mem['PSG1']['List'][-1]) * 5.0),
2: abs((PSGLP[2] - self.mem['PSG2']['List'][-1]) * 5.0),
3: abs((PSGLP[3] - self.mem['PSG3']['List'][-1]) * 5.0)}
if (((RMSLPR[1] >= self.mem['CMSLH']['Val']) or
(RMSLPR[2] >= self.mem['CMSLH']['Val']) or
(RMSLPR[3] >= self.mem['CMSLH']['Val'])) and (self.mem['KMSISO']['Val'] == 1)):
self.mem['KLAMPO326']['Val'] = 1
else:
self.mem['KLAMPO326']['Val'] = 0
# --------- RK27 MSL 1,2,3 press rate hi(-7.03 kg/cm*2/sec = 6.89E5 Pa/sec)
if ((RMSLPR[1] >= self.mem['CMSLH']['Val']) or
(RMSLPR[2] >= self.mem['CMSLH']['Val']) or
(RMSLPR[3] >= self.mem['CMSLH']['Val'])):
self.mem['KLAMPO327']['Val'] = 1
else:
self.mem['KLAMPO327']['Val'] = 0
# --------- R28 MSL 1,2,3 press low(41.1 kg/cm*2 = 0.403E7 pas)
if ((self.mem['PSG1']['Val'] < self.mem['CPSTML']['Val']) or
(self.mem['PSG2']['Val'] < self.mem['CPSTML']['Val']) or
(self.mem['PSG3']['Val'] < self.mem['CPSTML']['Val'])):
self.mem['KLAMPO328']['Val'] = 1
else:
self.mem['KLAMPO328']['Val'] = 0
# --------- R29 AFW(MD) actuated
if (self.mem['KAFWP1']['Val'] == 1) or (self.mem['KAFWP3']['Val'] == 1):
self.mem['KLAMPO329']['Val'] = 1
else:
self.mem['KLAMPO329']['Val'] = 0
# --------- R30 Condenser level lo(27")
if self.mem['ZCOND']['Val'] < self.mem['CZCNDL']['Val']:
self.mem['KLAMPO330']['Val'] = 1
else:
self.mem['KLAMPO330']['Val'] = 0
# --------- R31 FW pump discharge header press hi
if self.mem['PFWPOUT']['Val'] > self.mem['CPFWOH']['Val']:
self.mem['KLAMPO331']['Val'] = 1
else:
self.mem['KLAMPO331']['Val'] = 0
# --------- R32 FW pump trip
if (self.mem['KFWP1']['Val'] + self.mem['KFWP2']['Val'] + self.mem['KFWP3']['Val']) == 0:
self.mem['KLAMPO332']['Val'] = 1
else:
self.mem['KLAMPO332']['Val'] = 0
# --------- R33 FW temp hi(231.1 deg C)
if self.mem['UFDW']['Val'] > self.mem['CUFWH']['Val']:
self.mem['KLAMPO333']['Val'] = 1
else:
self.mem['KLAMPO333']['Val'] = 0
# --------- R34 Condensate pump flow lo(1400 gpm=88.324 kg/s)
if self.mem['WCDPO']['Val'] * 0.047 > self.mem['CWCDPO']['Val']:
self.mem['KLAMPO334']['Val'] = 1
else:
self.mem['KLAMPO334']['Val'] = 0
# --------- R35 Condenser abs press hi(633. mmmHg)
if self.mem['PVAC']['Val'] < self.mem['CPVACH']['Val']:
self.mem['KLAMPO335']['Val'] = 1
else:
self.mem['KLAMPO335']['Val'] = 0
# --------- R36 Condenser level hi (45" )
if self.mem['ZCOND']['Val'] > self.mem['CZCNDH']['Val']:
self.mem['KLAMPO336']['Val'] = 1
else:
self.mem['KLAMPO336']['Val'] = 0
# --------- R37 TBN trip P-4
if (self.mem['KTBTRIP']['Val'] == 1) and (self.mem['KRXTRIP']['Val'] == 1):
self.mem['KLAMPO337']['Val'] = 1
else:
self.mem['KLAMPO337']['Val'] = 0
# --------- R38 SG 1,2,3 wtr level hi-hi TBN trip
CPERMS8 = 0.78
if (self.mem['ZSGNOR1']['Val'] > CPERMS8) or \
(self.mem['ZSGNOR2']['Val'] > CPERMS8) or \
(self.mem['ZSGNOR3']['Val'] > CPERMS8):
self.mem['KLAMPO338']['Val'] = 1
else:
self.mem['KLAMPO338']['Val'] = 0
# --------- R39 Condenser vacuum lo TBN trip
if (self.mem['PVAC']['Val'] < 620.0) and (self.mem['KTBTRIP']['Val'] == 1):
self.mem['KLAMPO339']['Val'] = 1
else:
self.mem['KLAMPO339']['Val'] = 0
# --------- R40 TBN overspeed hi TBN trip
if (self.mem['FTURB']['Val'] > 1980.0) and (self.mem['KTBTRIP']['Val'] == 1):
self.mem['KLAMPO340']['Val'] = 1
else:
self.mem['KLAMPO340']['Val'] = 0
# --------- R42 Gen. brk open
if self.mem['KGENB']['Val'] == 0:
self.mem['KLAMPO341']['Val'] = 1
else:
self.mem['KLAMPO341']['Val'] = 0
return 0
# -------
def run_cns(self):
para = []
sig = []
# if self.mem['QPROREL']['Val'] >= 0.04 and self.mem['KBCDO17']['Val'] <= 1800:
# if self.mem['KBCDO17']['Val'] < 1780: # 1780 -> 1872
# para.append('KSWO213')
# sig.append(1)
# elif self.mem['KBCDO17']['Val'] >= 1780:
# para.append('KSWO213')
# sig.append(0)
# if self.mem['KBCDO19']['Val'] >= 1780 and self.mem['KLAMPO224']['Val'] == 0: # and self.mem['QPROREL']['Val'] >= 0.15:
# para.append('KSWO244')
# sig.append(1)
para.append('KFZRUN')
# sig.append(3)
sig.append(self.want_tick+100) # 400 - 100 -> 300 tick 20 sec
return self._send_control_signal(para, sig)
def init_cns(self, initial_nub):
# UDP 통신에 쌇인 데이터를 새롭게 하는 기능
self._send_control_signal(['KFZRUN', 'KSWO277'], [5, initial_nub])
while True:
self._update_mem()
if self.mem['KFZRUN']['Val'] == 6:
# initial 상태가 완료되면 6으로 되고, break
break
elif self.mem['KFZRUN']['Val'] == 5:
# 아직완료가 안된 상태
pass
else:
# 4가 되는 경우: 이전의 에피소드가 끝나고 4인 상태인데
self._send_control_signal(['KFZRUN'], [5])
pass
def run_freeze_CNS(self):
old_cont = self.mem['KCNTOMS']['Val'] + self.want_tick
self.run_cns()
while True:
self._update_mem()
new_cont = self.mem['KCNTOMS']['Val']
if old_cont == new_cont:
if self.mem['KFZRUN']['Val'] == 4:
# 1회 run 완료 시 4로 변환
# 데이터가 최신으로 업데이트 되었음으로 val를 List에 append
# 이때 반드시 모든 Val은 업데이트 된 상태이며 Append 및 데이터 로깅도 이부분에서 수행된다.
self.mem['cMALA']['Val'] = 1 if self.mem['cMALT']['Val'] <= self.mem['KCNTOMS']['Val'] else 0
self.mem['cMALCA']['Val'] = self.mem['cMALC']['Val'] if self.mem['cMALT']['Val'] <= self.mem['KCNTOMS']['Val'] else 0
self.save_line()
break
else:
pass
else:
pass
def get_CNS_time(self):
return self.mem['KCNTOMS']['Val']
# logger
def init_line(self):
with open(f"./{self.LoggerPath}/{self.file_name}.txt", 'w') as f:
DIS = ''
for para_name in self.mem.keys():
DIS += f'{para_name},'
f.write(f'{DIS}\n')
def save_line(self):
with open(f"./{self.LoggerPath}/{self.file_name}.txt", 'a') as f:
DIS = ''
for para_name in self.mem.keys():
DIS += f"{self.mem[para_name]['Val']},"
f.write(f'{DIS}\n')
# 실제 사용 Level
def reset(self, initial_nub=1, mal=False, mal_case=0, mal_opt=0, mal_time=0, file_name=0):
self.file_name = file_name # Update ep number
self.init_line()
# mem reset
# self.mem = self._make_mem_structure(max_len=self.max_len)
self._make_mem_initial()
self.mem['cINIT']['Val'] = initial_nub
self.mem['cMAL']['Val'] = 1 if mal is True else 0
self.mem['cMALA']['Val'] = 0
self.mem['cMALC']['Val'] = mal_case
self.mem['cMALO']['Val'] = mal_opt
self.mem['cMALT']['Val'] = mal_time
self.init_cns(initial_nub=initial_nub)
sleep(1)
if mal:
self._send_malfunction_signal(Mal_nub=mal_case, Mal_opt=mal_opt, Mal_time=mal_time)
sleep(2)
# if mal_case2 != 0:
# self._send_malfunction_signal(Mal_nub=mal_case2, Mal_opt=mal_opt2, Mal_time=mal_time2)
# sleep(2)
def step(self):
self.run_freeze_CNS()
if __name__ == '__main__':
module = CNS('Main', '192.168.0.103', 7101, '192.168.0.29', 7101)
module.init_cns(1)
print(module.mem['KFZRUN']['Val'], module.mem['KCNTOMS']['Val'])
module._send_malfunction_signal(12, 100100, 10)
sleep(1)
print(module.mem['KFZRUN']['Val'], module.mem['KCNTOMS']['Val'])
for _ in range(20):
module.run_freeze_CNS()
print(module.mem['KFZRUN']['Val'], module.mem['KCNTOMS']['Val'])
module.init_cns(2)
print(module.mem['KFZRUN']['Val'], module.mem['KCNTOMS']['Val'])
for _ in range(5):
module.run_freeze_CNS()
print(module.mem['KFZRUN']['Val'], module.mem['KCNTOMS']['Val'])
```
#### File: SAMG_Project/TOOL/TOOL_CSF.py
```python
class CSFTree:
@staticmethod
def CSF1(TRIP, PR, IR, SR):
"""
미임계 상태 추적도 만족 불만족
:param TRIP: Trip 1: Trip 0: Operation
:param PR: Power Range [100 ~ 0]
:param IR: Intermediate Range [-3 ~ .. ]
:param SR: Source Range [0.0 ~ ..]
:return: {'L': 0 만족, 1: 노랑, 2: 주황, 3: 빨강, 'N': 탈출 단계, 'P': 절차서}
"""
if TRIP == 1:
if not PR < 5: # 5%
return {'L': 3, 'N': 0, 'P': 'S1'} # GOTO 회복 S.1
else:
if IR <= 0:
if IR < 1E-9:
if SR <= 2: # 원래는 1
return {'L': 0, 'N': 1, 'P': 'Ok'} # OK!
else:
return {'L': 1, 'N': 2, 'P': 'S2'} # GOTO 회복 S.2
else:
if IR < -0.2:
return {'L': 0, 'N': 3, 'P': 'Ok'} # OK!
else:
return {'L': 1, 'N': 4, 'P': 'S2'} # GOTO 회복 S.2
else:
return {'L': 2, 'N': 5, 'P': 'S1'} # GOTO 회복 S.1
else:
return {'L': 0, 'N': 6, 'P': 'Ok'} # Ok!
@staticmethod
def CSF2(TRIP, CET, PT):
"""
노심냉각 상태 추적도
:param TRIP: Trip 1: Trip 0: Operation
:param CET: CoreExitTemp [ .. ~ 326 ]
:param PT: PTCurve [ 0 만족, 1 불만족 ]
:return: {'L': 0 만족, 1: 노랑, 2: 주황, 3: 빨강, 'N': 탈출 단계, 'P': 절차서}
"""
if TRIP == 1:
if CET < 649:
if PT == 0:
return {'L': 0, 'N': 0, 'P': 'Ok'} # OK!
else:
if CET < 371:
return {'L': 1, 'N': 1, 'P': 'C3'} # GOTO 회복 C.3
else:
return {'L': 2, 'N': 2, 'P': 'C2'} # GOTO 회복 C.2
else:
return {'L': 3, 'N': 3, 'P': 'C1'} # GOTO 회복 C.1
else:
return {'L': 0, 'N': 4, 'P': 'Ok'} # Ok!
@staticmethod
def CSF3(TRIP, SG1N, SG2N, SG3N, SG1P, SG2P, SG3P, SG1F, SG2F, SG3F):
"""
열제거원 상태 추적도
:param TRIP: Trip 1: Trip 0: Operation
:param SG1N: SG 1 Narrow Level [0 ~ 50]
:param SG2N: SG 2 Narrow Level [0 ~ 50]
:param SG3N: SG 3 Narrow Level [0 ~ 50]
:param SG1P: SG 1 Pressrue [ 0 ~ 100 ]
:param SG2P: SG 2 Pressrue [ 0 ~ 100 ]
:param SG3P: SG 3 Pressrue [ 0 ~ 100 ]
:param SG1F: SG 1 Feedwater [ 0 ~ 25 ] in emergency
:param SG2F: SG 2 Feedwater [ 0 ~ 25 ] in emergency
:param SG3F: SG 3 Feedwater [ 0 ~ 25 ] in emergency
:return: {'L': 0 만족, 1: 노랑, 2: 주황, 3: 빨강, 'N': 탈출 단계, 'P': 절차서}
"""
if TRIP == 1:
if SG1N >= 6 or SG2N >= 6 or SG3N >= 6:
pass
else:
if SG1F + SG2F + SG3F >= 33:
pass
else:
return {'L': 3, 'N': 1, 'P': 'H1'} # GOTO 회복 H.1
# --
if not SG1P < 88.6 and not SG2P < 88.6 and not SG3P < 88.6:
return {'L': 1, 'N': 2, 'P': 'H2'} # GOTO 회복 H.2
else:
if not SG1N < 78 and not SG2N < 78 and not SG3N < 78:
return {'L': 1, 'N': 3, 'P': 'H3'} # GOTO 회복 H.3
else:
if not SG1P < 83.3 and not SG2P < 83.3 and not SG3P < 83.3:
return {'L': 1, 'N': 4, 'P': 'H4'} # GOTO 회복 H.4
else:
if not SG1N > 6 and not SG2N > 6 and not SG3N > 6:
return {'L': 1, 'N': 5, 'P': 'H5'} # GOTO 회복 H.5
else:
return {'L': 0, 'N': 6, 'P': 'Ok'} # OK!
else:
return {'L': 0, 'N': 7, 'P': 'Ok'} # Ok!
@staticmethod
def CSF4(TRIP, RC1, RC2, RC3, RP, PT, TIME):
"""
RCS 건전성 상태 추적도
:param TRIP: Trip 1: Trip 0: Operation
:param RC1: RCS Cool LOOP 1 [List] [270 ..]
:param RC2: RCS Cool LOOP 2 [List] [270 ..]
:param RC3: RCS Cool LOOP 3 [List] [270 ..]
:param RP: RCS pressure [160 ~ ..]
:param PT: PTCurve [ 0 만족, 1 불만족 ]
:param TIME: CNS TIME [List] [5 tick ~ ..]
:return: {'L': 0 만족, 1: 노랑, 2: 주황, 3: 빨강, 'N': 탈출 단계, 'P': 절차서}
"""
if TRIP == 1:
RC1AVG = sum(list(RC1)[:-1]) / len(list(RC1)[:-1])
RC2AVG = sum(list(RC2)[:-1]) / len(list(RC2)[:-1])
RC3AVG = sum(list(RC3)[:-1]) / len(list(RC3)[:-1])
if not RC1[-1] < RC1AVG and not RC2[-1] < RC2AVG and not RC3[-1] < RC3AVG:
if not PT == 0:
return {'L': 3, 'N': 0, 'P': 'P1'} # GOTO 회복 P.1
else:
if not RC1[-1] > 106 and not RC2[-1] > 106 and not RC3[-1] > 106:
return {'L': 2, 'N': 1, 'P': 'P1'} # GOTO 회복 P.1
else:
if not RC1[-1] > 136 and not RC2[-1] > 136 and not RC3[-1] > 136:
return {'L': 1, 'N': 2, 'P': 'P2'} # GOTO 회복 P.2
else:
return {'L': 0, 'N': 3, 'P': 'Ok'} # Ok!
else:
if not RC1[-1] > 177 and not RC2[-1] > 177 and not RC3[-1] > 177:
if not PT == 0:
if not RC1[-1] > 106 and not RC2[-1] > 106 and not RC3[-1] > 106:
return {'L': 2, 'N': 4, 'P': 'P1'} # GOTO 회복 P.1
else:
return {'L': 1, 'N': 5, 'P': 'P2'} # GOTO 회복 P.2
else:
return {'L': 0, 'N': 6, 'P': 'Ok'} # Ok!
else:
return {'L': 0, 'N': 7, 'P': 'Ok'} # Ok!
else:
return {'L': 0, 'N': 8, 'P': 'Ok'} # Ok!
@staticmethod
def CSF5(TRIP, CTP, CTS, CTR):
"""
격납용기 건전상 상태 추적도
:param TRIP: Trip 1: Trip 0: Operation
:param CTP: CTMTPressre [... ~ 0.2]
:param CTS: CTMTSumpLevel [0 ~ ... ]
:param CTR: CTMTRad [2.0 ~ ... ]
:return: {'L': 0 만족, 1: 노랑, 2: 주황, 3: 빨강, 'N': 탈출 단계, 'P': 절차서}
"""
if TRIP == 1:
if not CTP < 4.2:
return {'L': 3, 'N': 0, 'P': 'Z1'} # GOTO 회복 Z.1
else:
if not CTP < 1.55:
return {'L': 2, 'N': 1, 'P': 'Z1'} # GOTO 회복 Z.1
else:
if not CTS < 0.345:
return {'L': 2, 'N': 2, 'P': 'Z2'} # GOTO 회복 Z.2
else:
if not CTR < 1E4:
return {'L': 1, 'N': 3, 'P': 'Z3'} # GOTO 회복 Z.3
else:
return {'L': 0, 'N': 4, 'P': 'Ok'} # Ok!
else:
return {'L': 0, 'N': 5, 'P': 'Ok'} # Ok!
@staticmethod
def CSF6(TRIP, PZRL):
"""
RCS 재고량 상태 추적도
:param TRIP: Trip 1: Trip 0: Operation
:param PZRL: PZRLevel
:return: {'L': 0 만족, 1: 노랑, 2: 주황, 3: 빨강, 'N': 탈출 단계, 'P': 절차서}
"""
if TRIP == 1:
if not PZRL < 101: # <----------------------- 원래 92 임
return {'L': 1, 'N': 0, 'P': 'I1'} # GOTO 회복 I.1
else:
if not PZRL > 17:
return {'L': 1, 'N': 1, 'P': 'I2'} # GOTO 회복 I.2
else:
if not 17 <= PZRL <= 92:
return {'L': 1, 'N': 2, 'P': 'I2'} # GOTO 회복 I.2
else:
return {'L': 0, 'N': 3, 'P': 'Ok'} # Ok!
else:
return {'L': 0, 'N': 4, 'P': 'Ok'} # Ok.
```
#### File: SAMG_Project/TOOL/TOOL_MatGP3.py
```python
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import sys
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backend_bases import MouseEvent
import matplotlib.pyplot as plt
class Trend(QWidget):
def __init__(self, parent, w, h, para_name: str, para_id: str,
para_range: list = [None, None], xtitle:str = 'None', ytitle:str = 'None'):
"""
그래프 위젯
:param parent: 시뮬레이터에서 공유 메모리에 업데이트된 값을 취득하기위해 부모 위젯 아이디 받기 위해서 사용
:param w: 위젯 너비
:param h: 위젯 높이
:param para_name: 그래프의 타이틀
:param para_id: 그래프에 그려질 시뮬레이터의 변수 명 ** 공유 메모리의 save_mem 에 변수명이 등록되어야함.
:param para_range: 변수의 y축의 min / max range
:param xtitle, ytitle: x, y 축의 라벨
"""
super(Trend, self).__init__()
self.parent = parent
if parent is not None:
self.shmem = parent.shmem
# QWidget info
self.setGeometry(10, 10, w, h)
self.set_main_frame()
self.start = QPoint(0, 0) # 마우스 클릭을 통한 창 이동 관련
self.pressing = False # ..
# para info
self.para_name = para_name
self.para_id = para_id
self.para_range = para_range
self.max_time_leg = 24 # hour
# figure
self.fig = plt.Figure(tight_layout=True, facecolor=[240/255, 232/255, 208/255]) #
# self.fig.subplots_adjust(left=0.1, right=0.98, top=0.95, bottom=0.05)
self.canvas = FigureCanvas(self.fig)
self.canvas.mpl_connect('motion_notify_event', self.mouse_move_in_trend)
self.canvas.mpl_connect('button_press_event', self.mouse_press_in_trend)
# ax
self.ax = self.fig.add_subplot(111)
self.ax.set_title(f'{self.para_name}')
self.ax.set_xlim(0, self.max_time_leg)
self.ax.set_ylim(0, 5)
self.ax.set_xlabel(xtitle)
self.ax.set_ylabel(ytitle)
self.ax.grid()
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self.canvas)
self.setLayout(layout)
# Line
self.line1, = self.ax.plot([1.092683,1.7170732,2.7707317,4.097561,5.8146343,7.6878047,9.873171,12.52683,
14.868293],
[249.46808,219.14894,194.68085,173.40425,157.97873,145.74467,138.29787,129.78723,
123.93617], color=[39/255, 39/255, 141/255], linewidth=1, label='Flow line')
self.SG1, = self.ax.plot([], [], color=[197/255, 224/255, 180/255], linewidth=0, marker='o', label='SG1 Flow')
self.SG2, = self.ax.plot([], [], color=[255/255, 121/255, 121/255], linewidth=0, marker='o', label='SG2 Flow')
#Q Timer ------------------------------------------------------------------------------------------------------
timer = QTimer(self)
timer.setInterval(500) # 500 ms run = 0.5 sec
timer.timeout.connect(self.local_loop)
timer.start()
self.vallist = [0] # For test
def set_main_frame(self):
""" 라운드 테두리 """
path = QPainterPath()
path.addRoundedRect(QRectF(self.rect()), 10, 10)
mask = QRegion(path.toFillPolygon().toPolygon())
self.setMask(mask)
def resizeEvent(self, a0: QResizeEvent) -> None:
""" 그래프 크기 변경시 """
self.set_main_frame()
def mouse_move_in_trend(self, event: MouseEvent):
""" 그래프 클릭 시 이동 """
if event.button == 1:
self.end = self.mapToGlobal(QPoint(event.x, abs(event.y - self.height())))
self.movement = self.end - self.start
self.setGeometry(self.mapToGlobal(self.movement).x(),
self.mapToGlobal(self.movement).y(),
self.width(),
self.height())
self.start = self.end
def mouse_press_in_trend(self, event: MouseEvent):
""" 클릭 한 마우스 global pos 저장 """
if event.button == 3: # 오른쪽 클릭
self.close()
self.start = self.mapToGlobal(QPoint(event.x, abs(event.y - self.height())))
def local_loop(self):
""" 그래프 QTimer interval 간격으로 업데이트 """
if self.parent is not None:
saved_db = self.shmem.get_shmem_save_db()
self._update_trend(saved_db)
else:
self.vallist.append(self.vallist[-1] + 1)
self._update_trend(self.vallist)
def _update_trend(self, val_list):
""" 그래프 업데이트 """
# if len(val_list) > 2: # 2개 이상 포이트가 저장되면 드로잉 시작
# if self.parent is not None:
# time
# x_, ax1_, ax2_ = [], [], []
# for x, ax1, ax2 in zip(val_list['KCNTOMS'], val_list['WAFWS1'], val_list['WAFWS2']):
# if x % 3000 == 0:
# x_.append(x)
# ax1_.append(ax1)
# ax2_.append(ax2)
#
# self.SG1.set_data(x_, ax1_)
# self.SG2.set_data(x_, ax2_)
#
# x축 드로잉
# y축 드로잉
self.ax.set_ylim(0, 300)
self.ax.set_xlim(0, 24)
tick_ = [i for i in range(0, 26, 2)]
self.ax.set_xticks(tick_)
self.ax.set_xticklabels([f'{i}' for i in tick_])
self.canvas.draw()
if __name__ == '__main__':
app = QApplication(sys.argv)
window = Trend(None, w=500, h=500, para_name='Flow', para_id='KCNTOMS', para_range=[0, 300],
xtitle='Time Since Reactor Shutdown (Hours)', ytitle='Minimum Injection Flowrate (gpm)')
window.show()
sys.exit(app.exec_())
```
#### File: SAMG_Project/TOOL/TOOL_PTCurve.py
```python
import math
class PTCureve:
"""
0 : 만족, 1: 불만족
PTCureve().Check(Temp=110, Pres=0)
"""
def __init__(self):
self.UpTemp = [0, 37.000000, 65.500000, 93.000000, 104.400000, 110.000000,
115.500000, 121.000000, 148.800000, 176.500000, 186.500000, 350.0]
self.UpPres = [29.5, 29.500000, 30.500000, 36.500000, 42.000000, 45.600000,
49.000000, 54.200000, 105.000000, 176.000000, 200.000000, 592]
self.BotTemp = [0, 37.000000, 149.000000, 159.000000, 169.000000, 179.000000,
204.000000, 232.000000, 260.000000, 287.700000, 350.000000]
self.BotPres = [17.0, 17.000000, 17.000000, 17.300000, 17.600000, 20.000000,
31.600000, 44.300000, 58.000000, 71.000000, 100.000000]
self.UpLineFunc = []
self.BotLineFunc = []
# 직교 함수를 그려 현재 포인트에서 PT 커브까지 거리를 계산하기 위해서 사용
self.UpLineOrtFunc = []
self.BotLineOrtFunc = []
self._make_bound_UpLine()
self._make_bound_BotLine()
def _make_bound_func(self, Temp, Pres):
"""
2점에 대한 1차원 함수 반환
:param Temp: [a1, a2] == x
:param Pres: [b1, b2] == y
:return: func
"""
# y1 = ax1 + b
# y2 = ax2 + b
# a = (y1-y2)/(x1-x2)
# b = y1 - {(y1-y2)/(x1-x2) * x1}
get_a = (Pres[0] - Pres[1]) / (Temp[0] - Temp[1])
get_b = Pres[0] - get_a * Temp[0]
return lambda temp: get_a * temp + get_b
def _make_bound_orthogonal_func(self, Temp, Pres):
"""
2점에 대한 ax+by+c = 0
:param Temp: [a1, a2] == x
:param Pres: [b1, b2] == y
:return: [a, b, c] List
"""
# y1 = ax1 + b
# y2 = ax2 + b
# a = (y1-y2)/(x1-x2)
# b = y1 - {(y1-y2)/(x1-x2) * x1}
get_a = (Pres[0] - Pres[1]) / (Temp[0] - Temp[1]) # slop
get_b = Pres[0] - get_a * Temp[0]
# y = get_a * x + get_b ==> ax + by + c = 0
a = - get_a
b = 1
c = - get_b
return [a, b, c]
def _make_bound_UpLine(self):
for i in range(len(self.UpTemp) - 1):
self.UpLineFunc.append(self._make_bound_func(Temp=self.UpTemp[i:i+2], Pres=self.UpPres[i:i+2]))
self.UpLineOrtFunc.append(self._make_bound_orthogonal_func(Temp=self.UpTemp[i:i+2], Pres=self.UpPres[i:i+2]))
def _make_bound_BotLine(self):
for i in range(len(self.BotTemp) - 1):
self.BotLineFunc.append(self._make_bound_func(Temp=self.BotTemp[i:i+2], Pres=self.BotPres[i:i+2]))
self.BotLineOrtFunc.append(self._make_bound_orthogonal_func(Temp=self.BotTemp[i:i+2], Pres=self.BotPres[i:i+2]))
def _call_fun(self, Temp):
UpF, BotF = 0, 0
for i in range(len(self.UpTemp) - 1):
if self.UpTemp[i] <= Temp < self.UpTemp[i + 1]:
UpF = self.UpLineFunc[i]
for i in range(len(self.BotTemp) - 1):
if self.BotTemp[i] <= Temp < self.BotTemp[i + 1]:
BotF = self.BotLineFunc[i]
return UpF, BotF
def _call_ort_fun(self, Temp):
UpOrtF, BotOrtF = 0, 0
for i in range(len(self.UpTemp) - 1):
if self.UpTemp[i] <= Temp < self.UpTemp[i + 1]:
UpOrtF = self.UpLineOrtFunc[i]
for i in range(len(self.BotTemp) - 1):
if self.BotTemp[i] <= Temp < self.BotTemp[i + 1]:
BotOrtF = self.BotLineOrtFunc[i]
return UpOrtF, BotOrtF
def _get_pres(self, Temp):
"""
온도 받아서 위아래 Pres 조건 반환
:param Temp: [0~..]
:return: [Up_pres, Bot_pres]
"""
UpF, BotF = self._call_fun(Temp=Temp)
Up_pres, Bot_pres = UpF(Temp), BotF(Temp)
return Up_pres, Bot_pres
def _check_up_or_under(self, fun, Temp, Pres):
Get_Pres = fun(Temp)
if Get_Pres > Pres:
return 0 # 입력된 Pres가 그래프보다 아래쪽에 존재
elif Get_Pres == Pres:
return 1 # 입력된 Pres가 그래프에 존재
else:
return 2 # 입력된 Pres가 그래프보다 위쪽에 존재
def _check_in_or_out(self, Temp, Pres):
UpF, BotF = self._call_fun(Temp=Temp)
Upcond = self._check_up_or_under(UpF, Temp, Pres)
Botcond = self._check_up_or_under(BotF, Temp, Pres)
Reason = 0
if Upcond == 2: Reason = 1 # Upcond 벗어난 경우
if Botcond == 0: Reason = 2 # Botcond 벗어난 경우
if Upcond == 2 or Botcond == 0:
return [1, Reason] # PT커브 초과
else:
return [0, Reason] # PT커브에서 운전 중
def _check_distance(self, Temp, Pres):
"""
현재 온도 압력을 기준으로 Upline과 Botline과의 거리 계산
:param Temp: 현재 온도
:param Pres: 현재 압력
:return: UpDis, BotDis
"""
d = 0
UpOrtF, BotOrtF = self._call_ort_fun(Temp=Temp) # [a,b,c]
# d = abs(a*x_1 + b*y_1 + c) / (math.sqrt(math.pow(a, 2) + math.pow(b, 2)))
# x_1 = Temp
# y_1 = Pres
UpDis = abs(UpOrtF[0] * Temp + UpOrtF[1] * Pres + UpOrtF[2]) / \
(math.sqrt(math.pow(UpOrtF[0], 2) + math.pow(UpOrtF[1], 2)))
BotDis = abs(BotOrtF[0] * Temp + BotOrtF[1] * Pres + BotOrtF[2]) / \
(math.sqrt(math.pow(BotOrtF[0], 2) + math.pow(BotOrtF[1], 2)))
return UpDis, BotDis
def Check(self, Temp, Pres):
"""
PT curve에 운전 중인지 확인
:param Temp: 현재 온도
:param Pres: 현재 압력
:return: 0 만족, 1 불만족
"""
return self._check_in_or_out(Temp, Pres)[0]
def Check_Dis(self, Temp, Pres):
"""
현재 온도 압력을 기준으로 PT 커브에서 벗어난 경우 벗어난 거리 제공
:param Temp: 현재 온도
:param Pres: 현재 압력
:return: 벗어난 거리
"""
Satisfiy, Reason =self._check_in_or_out(Temp, Pres)
Updis, Botdis = self._check_distance(Temp, Pres)
if Satisfiy == 0:
return 0
else:
# 가장 짧은 거리
return Updis if Updis < Botdis else Botdis
``` |
{
"source": "jongiddy/dcos-e2e",
"score": 2
} |
#### File: backends/_docker/_containers.py
```python
import configparser
import io
import shlex
from pathlib import Path
from typing import Dict, List, Optional
import docker
from dcos_e2e.docker_storage_drivers import DockerStorageDriver
from dcos_e2e.docker_versions import DockerVersion
def _docker_service_file(
storage_driver: DockerStorageDriver,
docker_version: DockerVersion,
) -> str:
"""
Return the contents of a systemd unit file for a Docker service.
Args:
storage_driver: The Docker storage driver to use.
docker_version: The version of Docker to start.
"""
storage_driver_name = {
DockerStorageDriver.AUFS: 'aufs',
DockerStorageDriver.OVERLAY: 'overlay',
DockerStorageDriver.OVERLAY_2: 'overlay2',
}[storage_driver]
daemon = {
DockerVersion.v1_11_2: '/usr/bin/docker daemon',
DockerVersion.v1_13_1: '/usr/bin/docker daemon',
DockerVersion.v17_12_1_ce: '/usr/bin/dockerd',
DockerVersion.v18_06_3_ce: '/usr/bin/dockerd',
}[docker_version]
docker_cmd = (
'{daemon} '
'-D '
'-s {storage_driver_name} '
'--exec-opt=native.cgroupdriver=cgroupfs '
'--cgroup-parent=${{CGROUP_PARENT}}'
).format(
storage_driver_name=storage_driver_name,
daemon=daemon,
)
docker_service_contents = {
'Unit': {
'Description': 'Docker Application Container Engine',
'Documentation': 'https://docs.docker.com',
'After': 'dbus.service',
},
'Service': {
'EnvironmentFile': '/etc/docker/env',
'ExecStart': docker_cmd,
'LimitNOFILE': '1048576',
'LimitNPROC': '1048576',
'LimitCORE': 'infinity',
'Delegate': 'yes',
'TimeoutStartSec': '0',
},
'Install': {
'WantedBy': 'default.target',
},
}
config = configparser.ConfigParser()
# Ignore erroneous error https://github.com/python/typeshed/issues/1857.
config.optionxform = str # type: ignore
config.read_dict(docker_service_contents)
config_string = io.StringIO()
config.write(config_string)
config_string.seek(0)
return config_string.read()
def start_dcos_container(
container_base_name: str,
container_number: int,
mounts: List[docker.types.Mount],
tmpfs: Dict[str, str],
docker_image: str,
labels: Dict[str, str],
public_key_path: Path,
docker_storage_driver: DockerStorageDriver,
docker_version: DockerVersion,
network: Optional[docker.models.networks.Network] = None,
ports: Optional[Dict[str, int]] = None,
) -> None:
"""
Start a master, agent or public agent container.
In this container, start Docker and `sshd`.
Run Mesos without `systemd` support. This is not supported by DC/OS.
See https://jira.mesosphere.com/browse/DCOS_OSS-1131.
Args:
container_base_name: The start of the container name.
container_number: The end of the container name.
mounts: See `mounts` on
http://docker-py.readthedocs.io/en/latest/containers.html.
tmpfs: See `tmpfs` on
http://docker-py.readthedocs.io/en/latest/containers.html.
docker_image: The name of the Docker image to use.
labels: Docker labels to add to the cluster node containers. Akin to
the dictionary option in
http://docker-py.readthedocs.io/en/stable/containers.html.
public_key_path: The path to an SSH public key to put on the node.
docker_version: The Docker version to use on the node.
docker_storage_driver: The storage driver to use for Docker on the
node.
network: The network to connect the container to other than the default
``docker0`` bridge network.
ports: The ports to expose on the host.
"""
hostname = container_base_name + str(container_number)
environment = {'container': hostname}
client = docker.from_env(version='auto')
container = client.containers.create(
name=hostname,
privileged=True,
detach=True,
tty=True,
environment=environment,
hostname=hostname,
image=docker_image,
mounts=mounts,
tmpfs=tmpfs,
labels=labels,
stop_signal='SIGRTMIN+3',
command=['/sbin/init'],
ports=ports or {},
)
if network:
network.connect(container)
container.start()
disable_systemd_support_cmd = (
"echo 'MESOS_SYSTEMD_ENABLE_SUPPORT=false' >> "
'/var/lib/dcos/mesos-slave-common'
)
setup_mesos_cgroup_root = (
'MESOS_CGROUPS_ROOT=`grep memory /proc/1/cgroup | cut -d: -f3`/mesos; '
'MESOS_CGROUPS_ROOT=${MESOS_CGROUPS_ROOT:1}; '
'echo "MESOS_CGROUPS_ROOT=$MESOS_CGROUPS_ROOT" >> '
'/var/lib/dcos/mesos-slave-common'
)
docker_service_name = 'docker.service'
docker_service_text = _docker_service_file(
storage_driver=docker_storage_driver,
docker_version=docker_version,
)
docker_service_dst = '/lib/systemd/system/' + docker_service_name
echo_docker = [
'echo',
'-e',
shlex.quote(docker_service_text),
'>',
docker_service_dst,
]
docker_env_setup = (
'CGROUP=`grep memory /proc/1/cgroup | cut -d: -f3`; '
'echo "CGROUP_PARENT=$CGROUP/docker" >> '
'/etc/docker/env'
)
public_key = public_key_path.read_text()
echo_key = ['echo', public_key, '>>', '/root/.ssh/authorized_keys']
for cmd in [
['mkdir', '-p', '/var/lib/dcos'],
['/bin/bash', '-c', docker_env_setup],
['mkdir', '-p', '/lib/systemd/system'],
'/bin/bash -c "{cmd}"'.format(cmd=' '.join(echo_docker)),
['systemctl', 'enable', docker_service_name],
['systemctl', 'start', docker_service_name],
['/bin/bash', '-c', disable_systemd_support_cmd],
['/bin/bash', '-c', setup_mesos_cgroup_root],
['mkdir', '--parents', '/root/.ssh'],
'/bin/bash -c "{cmd}"'.format(cmd=' '.join(echo_key)),
['rm', '-f', '/run/nologin', '||', 'true'],
['systemctl', 'start', 'sshd'],
# Work around https://jira.mesosphere.com/browse/DCOS_OSS-1361.
['systemd-tmpfiles', '--create', '--prefix', '/var/log/journal'],
['systemd-tmpfiles', '--create', '--prefix', '/run/log/journal'],
]:
exit_code, output = container.exec_run(cmd=cmd)
assert exit_code == 0, ' '.join(cmd) + ': ' + output.decode()
```
#### File: dcos_e2e_cli/common/commands.py
```python
from pathlib import Path
import click
import requests
from tqdm import tqdm
@click.command('download-installer')
@click.option(
'--dcos-version',
type=str,
default='stable',
show_default=True,
help=(
'The DC/OS Open Source installer version to download. '
'This can be in one of the following formats: '
'``stable``, '
'``testing/master``, '
'``testing/<DC/OS MAJOR RELEASE>``, '
'``stable/<DC/OS MINOR RELEASE>``, '
'``testing/pull/<GITHUB-PR-NUMBER>``.\n'
'See https://dcos.io/releases/ for available releases.'
'\n'
'If an HTTP or HTTPS URL is given, that is downloaded.'
),
)
@click.option(
'--download-path',
type=str,
default='./dcos_generate_config.sh',
show_default=True,
help='The path to download an installer to.',
)
@click.pass_context
def download_installer(
ctx: click.core.Context,
dcos_version: str,
download_path: str,
) -> None:
"""
Download a DC/OS Open Source installer.
For DC/OS Enterprise installers, contact your sales representative.
"""
path = Path(download_path)
path.parent.mkdir(exist_ok=True, parents=True)
path = path.parent.resolve() / path.name
click.echo('Downloading to {path}.'.format(path=path))
if dcos_version.startswith('http'):
url = dcos_version
else:
base_url = 'https://downloads.dcos.io/dcos/'
url = base_url + dcos_version + '/dcos_generate_config.sh'
head_resp = requests.head(url)
if not head_resp.ok:
message = 'Cannot download installer from {url}.'.format(url=url)
ctx.fail(message=message)
if path.is_dir():
path = path / 'dcos_generate_config.sh'
if not path.exists():
path.parent.mkdir(parents=True, exist_ok=True)
# See
# https://stackoverflow.com/questions/16694907/how-to-download-large-file-in-python-with-requests-py
stream = requests.get(url, stream=True)
assert stream.ok
content_length = int(stream.headers['Content-Length'])
total_written = 0
chunk_size = 1024
# See http://click.pocoo.org/7/arguments/#file-args for parameter
# information.
content_iter = stream.iter_content(chunk_size=chunk_size)
progress_bar = tqdm(
iterable=content_iter,
total=content_length / chunk_size,
dynamic_ncols=True,
bar_format='{l_bar}{bar}',
unit_scale=None,
)
with click.open_file(
filename=str(path),
mode='wb',
atomic=True,
lazy=True,
) as file_descriptor:
for chunk in progress_bar:
# Enable at the start of each chunk, disable at the end, to avoid
# showing statistics at the end.
progress_bar.disable = False
# Filter out keep-alive new chunks.
if chunk:
total_written += len(chunk)
file_descriptor.write(chunk) # type: ignore
progress_bar.disable = True
message = (
'Downloaded {total_written} bytes. '
'Expected {content_length} bytes.'
).format(
total_written=total_written,
content_length=content_length,
)
assert total_written == content_length, message
```
#### File: dcos_e2e_cli/common/run_command.py
```python
import subprocess
import sys
from typing import Dict, List
import click
from dcos_e2e.cluster import Cluster
from dcos_e2e.node import Node, Output, Transport
def run_command(
args: List[str],
cluster: Cluster,
host: Node,
transport: Transport,
use_test_env: bool,
dcos_login_uname: str,
dcos_login_pw: str,
env: Dict[str, str],
) -> None:
"""
Run a command on a given cluster / host.
Args:
args: The arguments to run on a node.
cluster: The cluster to run a command on.
host: the node to run a command on.
transport: The transport to use to communicate with the cluster.
use_test_env: Whether to use the DC/OS integration test environment to
run the command in.
dcos_login_uname: The DC/OS login username. This is only used if using
the test environment and DC/OS Enterprise.
dcos_login_pw: The DC/OS login password. This is only used if using
the test environment and DC/OS Enterprise.
env: Environment variables to set before running the command.
"""
columns, rows = click.get_terminal_size()
tty = sys.stdout.isatty()
env = {
# LINES and COLUMNS are needed if using the ``DOCKER_EXEC`` transport.
# See https://github.com/moby/moby/issues/35407.
'COLUMNS': str(columns),
'LINES': str(rows),
'DCOS_LOGIN_UNAME': dcos_login_uname,
'DCOS_LOGIN_PW': dcos_login_pw,
**env,
}
if not use_test_env:
try:
host.run(
args=args,
output=Output.NO_CAPTURE,
tty=tty,
shell=True,
env=env,
transport=transport,
)
except subprocess.CalledProcessError as exc:
sys.exit(exc.returncode)
return
try:
cluster.run_with_test_environment(
args=args,
tty=tty,
env=env,
node=host,
transport=transport,
output=Output.NO_CAPTURE,
)
except subprocess.CalledProcessError as exc:
sys.exit(exc.returncode)
```
#### File: dcos_e2e_cli/common/variants.py
```python
import subprocess
import sys
from pathlib import Path
from shutil import rmtree
from typing import Optional
import click
from halo import Halo
from dcos_e2e.cluster import Cluster
from dcos_e2e.exceptions import DCOSNotInstalledError
from dcos_e2e.node import DCOSVariant
from dcos_e2e_cli._vendor import dcos_installer_tools as installer_tools
def get_install_variant(
given_variant: str,
installer_path: Optional[Path],
doctor_message: str,
workspace_dir: Path,
enable_spinner: bool,
) -> DCOSVariant:
"""
Get the variant of DC/OS to install.
Args:
given_variant: The variant string given by the user to the
``variant_option``. One of "auto", "enterprise" and "oss". If
"auto" is given, use the DC/OS installer to find the variant.
installer_path: The path to a DC/OS installer, if available.
workspace_dir: A directory to work in, given that this function uses
large files.
doctor_message: The message to show if something goes wrong.
enable_spinner: Whether to enable the spinner animation.
Returns:
The variant of DC/OS to install.
Raises:
CalledProcessError: There was an error unpacking the installer.
"""
if given_variant == 'auto':
assert installer_path is not None
spinner = Halo(enabled=enable_spinner)
spinner.start(text='Determining DC/OS variant')
try:
details = installer_tools.get_dcos_installer_details(
installer=installer_path,
workspace_dir=workspace_dir,
)
except subprocess.CalledProcessError as exc:
rmtree(path=str(workspace_dir), ignore_errors=True)
spinner.stop()
click.echo(doctor_message)
click.echo()
click.echo('Original error:', err=True)
click.echo(exc.stderr, err=True)
raise
except ValueError as exc:
click.echo(str(exc), err=True)
sys.exit(1)
spinner.succeed()
variant_map = {
installer_tools.DCOSVariant.ENTERPRISE: DCOSVariant.ENTERPRISE,
installer_tools.DCOSVariant.OSS: DCOSVariant.OSS,
}
return variant_map[details.variant]
return {
'oss': DCOSVariant.OSS,
'enterprise': DCOSVariant.ENTERPRISE,
}[given_variant]
def get_cluster_variant(cluster: Cluster) -> Optional[DCOSVariant]:
"""
Get the variant of DC/OS running on a cluster.
Args:
cluster: The cluster running DC/OS.
Returns:
The variant of DC/OS installed on the given cluster or ``None`` if the
file required for us to know is not ready.
"""
master = next(iter(cluster.masters))
try:
return master.dcos_build_info().variant
except DCOSNotInstalledError:
return None
```
#### File: dcos_aws/commands/destroy.py
```python
from typing import List
import click
from halo import Halo
from dcos_e2e_cli.common.options import (
enable_spinner_option,
existing_cluster_id_option,
verbosity_option,
)
from dcos_e2e_cli.common.utils import check_cluster_id_exists
from ._common import ClusterInstances, existing_cluster_ids
from ._options import aws_region_option
def destroy_cluster(
cluster_id: str,
enable_spinner: bool,
aws_region: str,
) -> None:
"""
Destroy a cluster.
Args:
cluster_id: The ID of the cluster.
enable_spinner: Whether to enable the spinner animation.
aws_region: The region the cluster is in.
"""
with Halo(enabled=enable_spinner):
check_cluster_id_exists(
new_cluster_id=cluster_id,
existing_cluster_ids=existing_cluster_ids(aws_region=aws_region),
)
cluster_vms = ClusterInstances(
cluster_id=cluster_id,
aws_region=aws_region,
)
cluster_vms.destroy()
@click.command('destroy-list')
@aws_region_option
@enable_spinner_option
@verbosity_option
@click.argument('cluster_ids', nargs=-1, type=str)
def destroy_list(
cluster_ids: List[str],
enable_spinner: bool,
aws_region: str,
) -> None:
"""
Destroy clusters.
To destroy all clusters, run
``minidcos aws destroy $(minidcos aws list)``.
"""
for cluster_id in cluster_ids:
if cluster_id in existing_cluster_ids(aws_region=aws_region):
destroy_cluster(
enable_spinner=enable_spinner,
cluster_id=cluster_id,
aws_region=aws_region,
)
click.echo(cluster_id)
else:
warning = 'Cluster "{cluster_id}" does not exist'.format(
cluster_id=cluster_id,
)
click.echo(warning, err=True)
continue
@click.command('destroy')
@enable_spinner_option
@aws_region_option
@verbosity_option
@existing_cluster_id_option
def destroy(cluster_id: str, enable_spinner: bool, aws_region: str) -> None:
"""
Destroy a cluster.
"""
destroy_cluster(
cluster_id=cluster_id,
enable_spinner=enable_spinner,
aws_region=aws_region,
)
click.echo(cluster_id)
```
#### File: dcos_aws/commands/sync.py
```python
from pathlib import Path
import click
from dcos_e2e_cli.common.arguments import dcos_checkout_dir_argument
from dcos_e2e_cli.common.options import (
existing_cluster_id_option,
verbosity_option,
)
from dcos_e2e_cli.common.sync import SYNC_HELP, sync_code_to_masters
from dcos_e2e_cli.common.utils import check_cluster_id_exists
from ._common import ClusterInstances, existing_cluster_ids
from ._options import aws_region_option
@click.command('sync', help=SYNC_HELP)
@existing_cluster_id_option
@dcos_checkout_dir_argument
@aws_region_option
@verbosity_option
def sync_code(
cluster_id: str,
dcos_checkout_dir: Path,
aws_region: str,
) -> None:
"""
Sync files from a DC/OS checkout to master nodes.
"""
check_cluster_id_exists(
new_cluster_id=cluster_id,
existing_cluster_ids=existing_cluster_ids(aws_region=aws_region),
)
cluster_instances = ClusterInstances(
cluster_id=cluster_id,
aws_region=aws_region,
)
cluster = cluster_instances.cluster
sync_code_to_masters(
cluster=cluster,
dcos_checkout_dir=dcos_checkout_dir,
sudo=True,
)
```
#### File: dcos_aws/commands/_wait_for_dcos.py
```python
from typing import Callable
import click
def wait_for_dcos_option(command: Callable[..., None]) -> Callable[..., None]:
"""
Option to choose waiting for DC/OS to be ready after starting the
installation.
"""
function = click.option(
'--wait-for-dcos',
is_flag=True,
help=(
'Wait for DC/OS after creating the cluster. '
'This is equivalent to using "minidcos aws wait" after this '
'command. '
'"minidcos aws wait" has various options available and so may be '
'more appropriate for your use case.'
),
)(command) # type: Callable[..., None]
return function
```
#### File: dcos_docker/commands/_cgroup_mount_option.py
```python
from typing import Callable
import click
def cgroup_mount_option(command: Callable[..., None]) -> Callable[..., None]:
"""
Option for choosing to mount `/sys/fs/cgroup` into the container.
"""
function = click.option(
'--mount-sys-fs-cgroup/--no-mount-sys-fs-cgroup',
default=True,
show_default=True,
help=(
'Mounting ``/sys/fs/cgroup`` from the host is required to run '
'applications which require ``cgroup`` isolation. '
'Choose to not mount ``/sys/fs/cgroup`` if it is not available on '
'the host.'
),
)(command) # type: Callable[..., None]
return function
```
#### File: dcos_docker/commands/_nodes.py
```python
from typing import Callable
import click
def node_option(command: Callable[..., None]) -> Callable[..., None]:
"""
An option decorator for choosing a node.
"""
function = click.option(
'--node',
type=str,
default=('master_0', ),
show_default=True,
multiple=True,
help=(
'A reference to a particular node to run the command on. '
'This can be one of: '
"The node's IP address, "
"the node's Docker container name, "
"the node's Docker container ID, "
'a reference in the format "<role>_<number>". '
'These details be seen with ``minidcos docker inspect``.'
),
)(command) # type: Callable[..., None]
return function
```
#### File: dcos_docker/commands/web.py
```python
import click
from dcos_e2e.node import Transport
from dcos_e2e_cli.common.options import (
existing_cluster_id_option,
verbosity_option,
)
from dcos_e2e_cli.common.utils import check_cluster_id_exists
from dcos_e2e_cli.common.web import launch_web_ui
from ._common import ClusterContainers, existing_cluster_ids
from ._options import node_transport_option
@click.command('web')
@existing_cluster_id_option
@verbosity_option
@node_transport_option
def web(cluster_id: str, transport: Transport) -> None:
"""
Open the browser at the web UI.
Note that the web UI may not be available at first.
Consider using ``minidcos docker wait`` before running this command.
"""
check_cluster_id_exists(
new_cluster_id=cluster_id,
existing_cluster_ids=existing_cluster_ids(),
)
cluster_containers = ClusterContainers(
cluster_id=cluster_id,
transport=transport,
)
launch_web_ui(cluster=cluster_containers.cluster)
```
#### File: dcos_vagrant/commands/list_clusters.py
```python
import click
from ._common import existing_cluster_ids
@click.command('list')
def list_clusters() -> None:
"""
List all clusters.
"""
for cluster_id in existing_cluster_ids():
click.echo(cluster_id)
```
#### File: dcos_launch/platforms/arm.py
```python
import contextlib
import copy
import logging
import re
import requests
import retrying
from azure.common.credentials import ServicePrincipalCredentials
from azure.common.exceptions import CloudError
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.resource.resources.v2016_02_01 import ResourceManagementClient
from azure.mgmt.resource.resources.v2016_02_01.models import (DeploymentMode,
DeploymentProperties,
ResourceGroup)
from azure.monitor import MonitorClient
from ...dcos_launch.util import DeploymentError
from ...dcos_test_utils.helpers import Host
log = logging.getLogger(__name__)
# This interface is designed to only use a single deployment.
# Being as the azure interface is based around resource groups, deriving
# deployment name from group names makes it easier to attach to creating deploys
DEPLOYMENT_NAME = '{}-Deployment'
def validate_hostname_prefix(prefix):
"""Hostname prefixes in azure templates are used to link a variety of resources
Not all of these resources will have their constraints checked at when ARM
valiation occurs. This check in particular was aggravating as no docs surfaced
this issue, so logs needed to be scanned just to discover this error
"""
assert re.match('^[a-z][a-z0-9-]{1,61}[a-z0-9]$', prefix), 'Invalid DNS prefix: {}'.format(prefix)
def check_json_object(obj):
""" Simple check to fill in the map for automatic parameter casting
JSON objects must be represented as dict at this level
"""
assert isinstance(obj, dict), 'Invalid JSON object: {}'.format(obj)
return obj
def check_array(arr):
""" Simple check to fill in the map for automatic parameter casting
JSON arrays must be represented as lists at this level
"""
assert isinstance(arr, list), 'Invalid array: {}'.format(arr)
return arr
def nic_to_host(nic, public_ip=None):
assert len(nic.ip_configurations) == 1
ip_config = nic.ip_configurations[0]
if ip_config.public_ip_address is None and public_ip is None:
return Host(ip_config.private_ip_address, None)
if public_ip is None:
return Host(ip_config.private_ip_address, ip_config.public_ip_address.ip_address)
return Host(ip_config.private_ip_address, public_ip)
class AzureWrapper:
def __init__(self, location: str, subscription_id: str, client_id: str, client_secret: str, tenant_id: str):
self.credentials = ServicePrincipalCredentials(
client_id=client_id,
secret=client_secret,
tenant=tenant_id)
self.rmc = ResourceManagementClient(self.credentials, subscription_id)
self.nmc = NetworkManagementClient(self.credentials, subscription_id)
self.mc = MonitorClient(self.credentials, subscription_id)
# location is included to keep a similar model as dcos_launch.platforms.aws.BotoWrapper
self.location = location
def deploy_template_to_new_resource_group(
self, template_url, group_name, parameters, tags=None, template=None):
if tags is None:
tags = dict()
log.info('Checking deployment parameters vs template before starting...')
deployment_properties = self.create_deployment_properties(
template_url, parameters, template=template)
deployment_name = DEPLOYMENT_NAME.format(group_name)
# Resource group must be created before validation can occur
if self.rmc.resource_groups.check_existence(group_name):
raise Exception("Group name already exists / taken: {}".format(group_name))
log.info('Starting resource group_creation')
def get_all_details(error):
formatted_message = '{}: {}\n\n'.format(error.code, error.message)
if error.details is None:
return formatted_message
for d in error.details:
formatted_message += get_all_details(d)
return formatted_message
with contextlib.ExitStack() as stack:
self.rmc.resource_groups.create_or_update(
group_name,
ResourceGroup(location=self.location, tags=tags))
# Ensure the resource group will be deleted if the following steps fail
stack.callback(self.rmc.resource_groups.delete, group_name)
log.info('Resource group created: {}'.format(group_name))
log.info('Checking with Azure to validate template deployment')
result = self.rmc.deployments.validate(
group_name, deployment_name, properties=deployment_properties)
if result.error:
raise Exception("Template verification failed!\n{}".format(get_all_details(result.error)))
log.info('Template successfully validated')
log.info('Starting template deployment')
self.rmc.deployments.create_or_update(
group_name, deployment_name, deployment_properties, raw=True)
stack.pop_all()
log.info('Successfully started template deployment')
def create_deployment_properties(self, template_url, parameters, template: dict=None):
""" Pulls the targeted template, checks parameter specs and casts
user provided parameters to the appropriate type. Assertion is raised
if there are unused parameters or invalid casting
"""
user_parameters = copy.deepcopy(parameters)
type_cast_map = {
'string': str,
'securestring': str,
'int': int,
'bool': bool,
'object': check_json_object,
'secureObject': check_json_object,
'array': check_array}
log.debug('Pulling Azure template for parameter validation...')
if template is None:
r = requests.get(template_url)
r.raise_for_status()
template = r.json()
if 'parameters' not in template:
assert user_parameters is None, 'This template does not support parameters, ' \
'yet parameters were supplied: {}'.format(user_parameters)
log.debug('Constructing DeploymentProperties from user parameters: {}'.format(parameters))
template_parameters = {}
for k, v in template['parameters'].items():
if k in user_parameters:
# All templates parameters are required to have a type field.
# Azure requires that parameters be provided as {key: {'value': value}}.
template_parameters[k] = {
'value': type_cast_map[v['type']](user_parameters.pop(k))}
log.debug('Final template parameters: {}'.format(template_parameters))
if len(user_parameters) > 0:
raise Exception('Unrecognized template parameters were supplied: {}'.format(user_parameters))
return DeploymentProperties(
template=template,
mode=DeploymentMode.incremental,
parameters=template_parameters)
class DcosAzureResourceGroup:
""" An abstraction for cleanly handling the life cycle of a DC/OS template
deployment. Operations include: create, wait, describe host IPs, and delete
"""
def __init__(self, group_name, azure_wrapper):
self.group_name = group_name
self.azure_wrapper = azure_wrapper
@classmethod
def deploy_acs_template(
cls, azure_wrapper: AzureWrapper, template_url: str, group_name: str,
public_key, master_prefix, agent_prefix, admin_name, oauth_enabled,
vm_size, agent_count, name_suffix, vm_diagnostics_enabled):
""" Creates a new resource group and deploys a ACS DC/OS template to it
using a subset of parameters for a simple deployment. To see a full
listing of parameters, including description and formatting, go to:
gen/azure/templates/acs.json in this repository.
Args:
azure_wrapper: see above
template_url: Azure-accessible location for the desired ACS template
group_name: name used for the new resource group that will be created
for this template deployment
Args that wrap template parameters:
public_key -> sshRSAPublicKey
master_prefix -> masterEndpointDNSNamePrefix
agent_prefix -> agentEndpointDNSNamePrefix
admin_name -> linuxAdminUsername
vm_size -> agentVMSize
agent_count -> agentCount
name_suffix -> nameSuffix
oauth_enabled -> oauthEnabled
vm_diagnostics_enabled -> enableVMDiagnostics
"""
assert master_prefix != agent_prefix, 'Master and agents must have unique prefixs'
validate_hostname_prefix(master_prefix)
validate_hostname_prefix(agent_prefix)
parameters = {
'sshRSAPublicKey': public_key,
'masterEndpointDNSNamePrefix': master_prefix,
'agentEndpointDNSNamePrefix': agent_prefix,
'linuxAdminUsername': admin_name,
'agentVMSize': vm_size,
'agentCount': agent_count,
'nameSuffix': name_suffix,
'oauthEnabled': oauth_enabled,
'enableVMDiagnostics': vm_diagnostics_enabled}
azure_wrapper.deploy_template_to_new_resource_group(template_url, group_name, parameters)
return cls(group_name, azure_wrapper)
def get_deployment_state(self):
return self.azure_wrapper.rmc.deployments.get(
self.group_name, DEPLOYMENT_NAME.format(self.group_name)).properties.provisioning_state
def wait_for_deployment(self, timeout=60 * 60):
"""
Azure will not register a template instantly after deployment, so
CloudError must be expected as retried. Once the ops are retrieved, this
loops through all operations in the group's only deployment
if any operations are still in progress, then this function will sleep
once all operations are complete, if there any failures, those will be
printed to the log stream
"""
log.info('Waiting for deployment to finish')
def azure_failure_report():
deploy_ops = self.azure_wrapper.rmc.deployment_operations.list(
self.group_name, DEPLOYMENT_NAME.format(self.group_name))
failures = [(op.properties.status_code, op.properties.status_message) for op
in deploy_ops if op.properties.provisioning_state == 'Failed']
for failure in failures:
log.error('Deployment operation failed! {}: {}'.format(*failure))
@retrying.retry(
wait_fixed=60 * 1000, stop_max_delay=timeout * 1000,
retry_on_result=lambda res: res is False,
retry_on_exception=lambda ex: isinstance(ex, CloudError))
def check_deployment_operations():
deploy_state = self.get_deployment_state()
if deploy_state == 'Succeeded':
return True
elif deploy_state == 'Failed':
log.info('Deployment failed. Checking deployment operations.')
azure_failure_report()
raise DeploymentError('Azure Deployment Failed!')
else:
log.info('Waiting for deployment. Current state: {}. It should either be Succeeded/Failed.'.format(
deploy_state))
return False
try:
check_deployment_operations()
except retrying.RetryError:
log.info('Deployment failed. Checking deployment operations.')
azure_failure_report()
raise DeploymentError("Azure Deployment Failed!")
def list_resources(self, filter_string):
yield from self.azure_wrapper.rmc.resource_groups.list_resources(
self.group_name, filter=(filter_string))
def get_scale_set_nics(self, name_substring=None):
for resource in self.list_resources("resourceType eq 'Microsoft.Compute/virtualMachineScaleSets'"):
if name_substring and name_substring not in resource.name:
continue
yield from self.azure_wrapper.nmc.network_interfaces.list_virtual_machine_scale_set_network_interfaces(
self.group_name, resource.name)
def get_public_ip_address(self, name_substring=None):
for resource in self.list_resources("resourceType eq 'Microsoft.Network/publicIPAddresses'"):
if name_substring and name_substring not in resource.name:
continue
return self.azure_wrapper.nmc.public_ip_addresses.get(self.group_name, resource.name)
@property
def public_agent_lb_fqdn(self):
return self.get_public_ip_address('agent-ip').dns_settings.fqdn
@property
def public_master_lb_fqdn(self):
return self.get_public_ip_address('master-ip').dns_settings.fqdn
@property
def master_nics(self):
""" The only instances of networkInterface Resources are for masters
"""
for resource in self.list_resources("resourceType eq 'Microsoft.Network/networkInterfaces'"):
assert 'master' in resource.name, 'Expected to only find master NICs, not: {}'.format(resource.name)
yield self.azure_wrapper.nmc.network_interfaces.get(self.group_name, resource.name)
def get_master_ips(self):
""" Traffic from abroad is routed to a master wth the public master
loadbalancer FQDN and the VM index plus 2200 (so the first master will be at 2200)
"""
public_lb_ip = self.public_master_lb_fqdn
return [Host(nic_to_host(nic).private_ip, '{}:{}'.format(public_lb_ip, 2200 + int(nic.name[-1])))
for nic in self.master_nics]
def get_private_agent_ips(self):
return [nic_to_host(nic) for nic in self.get_scale_set_nics('private')]
def get_public_agent_ips(self):
""" public traffic is routed to public agents via a specific load balancer """
public_lb_ip = self.public_agent_lb_fqdn
return [Host(nic_to_host(nic).private_ip, public_lb_ip)
for nic in self.get_scale_set_nics('public')]
def update_tags(self, new_tags: dict):
rg = self.azure_wrapper.rmc.resource_groups.get(self.group_name)
if rg.tags is None:
rg.tags = dict()
rg.tags.update(new_tags)
self.azure_wrapper.rmc.resource_groups.patch(rg.name, {
'tags': rg.tags,
'location': rg.location}, raw=True)
def delete(self):
log.info('Triggering delete')
self.azure_wrapper.rmc.resource_groups.delete(self.group_name, raw=True)
def __enter__(self):
return self
def __exit__(self, exc_type, exc, exc_tb):
self.delete()
class HybridDcosAzureResourceGroup(DcosAzureResourceGroup):
@property
def master_nics(self):
master_nics = []
for resource in self.list_resources("resourceType eq 'Microsoft.Network/networkInterfaces'"):
if 'master' in resource.name:
master_nics.append(resource.name)
assert len(master_nics) > 0, 'Cannot find any master NICs into resource group {}'.format(self.group_name)
for name in master_nics:
yield self.azure_wrapper.nmc.network_interfaces.get(self.group_name, name)
def get_master_ips(self):
public_lb_ip = self.public_master_lb_fqdn
return [nic_to_host(nic, public_lb_ip) for nic in self.master_nics]
def get_linux_private_agent_ips(self):
return [nic_to_host(nic) for nic in self.get_scale_set_nics('linpri')]
def get_linux_public_agent_ips(self):
return [nic_to_host(nic, self.linux_public_agent_lb_fqdn)
for nic in self.get_scale_set_nics('linpub')]
def get_windows_public_agent_ips(self):
# this VMSS name is derived from this being the 0-th element in the VMSS list
return [nic_to_host(nic, self.windows_public_agent_lb_fqdn)
for nic in self.get_scale_set_nics('900-vmss')]
def get_windows_private_agent_ips(self):
# this VMSS name is derived from this being the 1-th element in the VMSS list
return [nic_to_host(nic) for nic in self.get_scale_set_nics('901-vmss')]
@property
def linux_public_agent_lb_fqdn(self):
return self.get_public_ip_address('agent-ip-linpub').dns_settings.fqdn
@property
def windows_public_agent_lb_fqdn(self):
return self.get_public_ip_address('agent-ip-wpub').dns_settings.fqdn
```
#### File: _vendor/dcos_test_utils/jobs.py
```python
import logging
import retrying
import requests
from ..dcos_test_utils import helpers
REQUIRED_HEADERS = {'Accept': 'application/json, text/plain, */*'}
log = logging.getLogger(__name__)
class Jobs(helpers.RetryCommonHttpErrorsMixin, helpers.ApiClientSession):
""" Specialized client for interacting with DC/OS jobs functionality
:param default_url: URL of the jobs service to bind to
:type default_url: helpers.Url
:param session: option session to bootstrap this session with
:type session: requests.Session
"""
def __init__(self, default_url: helpers.Url, session: requests.Session=None):
super().__init__(default_url)
if session is not None:
self.session = session
self.session.headers.update(REQUIRED_HEADERS)
self._api_version = '/v1'
def _http_req_json(self, fn: callable,
*args: list,
**kwargs: dict) -> dict:
"""Helper method that executes the HTTP request, calls
`raise_for_status()` and returns the `json()` response.
`fn` is a callable, such as `self.post`.
Example:
self._http_req_json(self.get, 'https://example.com')
:param fn: Function from helpers to run
:type fn: callable
:param args: args
:type args: list
:param kwargs: kwargs
:type kwargs: dict
:return: JSON response
:rtype: dict
"""
r = fn(*args, **kwargs)
r.raise_for_status()
return r.json()
def _is_history_available(self, job_id: str, run_id: str) -> bool:
""" When job run is finished, history might not be available right ahead.
This method returns true if run of given id is already present in the history endpoint.
"""
result = self.details(job_id, history=True)
history = result['history']
for field in ('successfulFinishedRuns', 'failedFinishedRuns'):
for result in history[field]:
if result['id'] == run_id:
return True
return False
def wait_for_run(self, job_id: str, run_id: str, timeout=600):
"""Wait for a given run to complete or timeout seconds to
elapse.
:param job_id: Job ID
:type job_id: str
:param run_id: Run ID
:type run_id: str
:param timeout: Time in seconds to wait before giving up
:type timeout: int
:return: None
"""
@retrying.retry(wait_fixed=1000, stop_max_delay=timeout * 1000,
retry_on_result=lambda ret: ret is False,
retry_on_exception=lambda x: False)
def _wait_for_run_completion(j_id: str, r_id: str) -> bool:
try:
# 200 means the run is still in progress
self.run_details(job_id=j_id, run_id=r_id)
log.info('Waiting on job run {} to finish.'.format(r_id))
return False
except requests.HTTPError as http_error:
rc = http_error.response
# 404 means the run is complete and this is done
# anything else is a problem and should not happen
if rc.status_code == 404:
history_available = self._is_history_available(j_id, r_id)
if history_available:
log.info('Job run {} finished.'.format(r_id))
return True
else:
log.warning(
'Waiting for job run {} to be finished, but history for that job run is not available'
.format(r_id))
return False
else:
raise requests.HTTPError(
'Waiting for job run {} to be finished, but getting HTTP status code {}'
.format(r_id, rc.status_code), response=rc)
try:
# wait for the run to complete and then return the
# run's result
_wait_for_run_completion(job_id, run_id)
except retrying.RetryError as ex:
raise Exception("Job run failed - operation was not "
"completed in {} seconds.".format(timeout)) from ex
def details(self, job_id: str, history=False) -> dict:
"""Get the details of a specific Job.
:param job_id: Job ID
:type job_id: str
:param history: Include embedded history in details
:type history: bool
:return: Job details as JSON
:rtype: dict
"""
url = '{api}/jobs/{job_id}'.format(api=self._api_version,
job_id=job_id)
params = {'embed': 'history'} if history else None
return self._http_req_json(self.get, url, params=params)
def create(self, job_definition: dict) -> dict:
"""Create a new job with given definition.
:param job_definition: Job definition
:type job_definition: dict
:return: Response from Jobs service as JSON
:rtype: dict
"""
url = '{api}/jobs'.format(api=self._api_version)
return self._http_req_json(self.post, url, json=job_definition)
def destroy(self, job_id: str):
"""Delete an existing job and all data.
:param job_id: Job ID
:type job_id: str
"""
url = '{api}/jobs/{job_id}'.format(
api=self._api_version, job_id=job_id)
return self._http_req_json(self.delete,
url,
params={'stopCurrentJobRuns': 'true'})
def start(self, job_id: str) -> dict:
"""Create a run and return the Run.
:param job_id: Job ID
:type job_id: str
:return: Run creation response from Jobs service
:rtype: dict
"""
url = '{api}/jobs/{job_id}/runs'.format(
api=self._api_version,
job_id=job_id)
r_json = self._http_req_json(self.post, url)
log.info("Started job {}, run id {}".format(job_id, r_json['id']))
return r_json
def run(self, job_id: str, timeout=600) -> (bool, dict, dict):
"""Create a run, wait for it to finish, and return whether it was
successful and the run itself.
This will run the job immediately and block until
the run is complete.
:param job_id: Job ID
:type job_id: str
:param timeout: Timeout in seconds
:type timeout: int
:return: tuple of success, Run details, Job details
:rtype: bool, dict, dict
"""
run_json = self.start(job_id)
run_id = run_json['id']
self.wait_for_run(job_id, run_id, timeout)
result = self.details(job_id, history=True)
history = result['history']
for field in ('successfulFinishedRuns', 'failedFinishedRuns'):
success = field == 'successfulFinishedRuns'
for job_run in history[field]:
if job_run['id'] == run_id:
return success, job_run, result
return False, None, result
def run_details(self, job_id: str, run_id: str) -> dict:
"""Return details about the given Run ID.
:param job_id: Job ID
:type job_id: str
:param run_id: Run ID
:type run_id: str
:return: Run details
:rtype: dict
"""
url = '{api}/jobs/{job_id}/runs/{run_id}'.format(
api=self._api_version,
job_id=job_id,
run_id=run_id)
return self._http_req_json(self.get, url)
def run_stop(self, job_id: str, run_id: str) -> dict:
"""Stop the run `run_id` if it is in-progress.
:param job_id: Job ID
:type job_id: str
:param run_id: Run ID
:type run_id: str
:return: JSON response
:rtype: dict
"""
url = '{api}/jobs/{job_id}/runs/{run_id}/actions/stop'.format(
api=self._api_version, job_id=job_id, run_id=run_id)
return self._http_req_json(self.post, url)
```
#### File: _vendor/vertigo_py/vertigo.py
```python
import re
import subprocess
from . import constants
from .error import *
# functions that don't fall into the VM class
# basic utility function to execute some arguments and return the result
def execute (args):
try:
result = subprocess.check_output(args)
except subprocess.CalledProcessError as e:
raise CommandError(args, e)
return result
# Return the current Virtualbox version as a string
def version():
return subprocess.check_output([constants.cmd, "-v"])
# Public: List available virtual machines, virtual devices and their relevant
# properties. Currently only returns a string representation. Will eventually
# return a more structured format, probably a dictionary
#
# option - the resource to list. Possible options listed in constants.py and the
# VBoxManage manual
# longform - supply the --long switch to VBoxManage. Only relevant for a few
# options
#
# Returns a string representation of the requested option, or a dictionary of
# all of them
def ls(option="all", longform=False):
cmd = [constants.cmd, "list"]
if longform:
cmd.append("--long")
if not option in constants.lsopts and not option == "all":
raise UnknownOptionError("list", option)
if option == "all":
result = {}
for opt in constants.lsopts:
result[opt] = subprocess.check_output(cmd + [opt])
return result
else:
return subprocess.check_output(cmd + [option])
# Public: Create a new virtual with the given options.
#
# name - String that is the name of the new VM
# ostype - String that should be the OS type
# register - Boolean whether or not to register this VM in Virtualbox
# basefolder - String giving the path where to store the VM files
# uuid - Hexadecimal String to be the UUID of the VM
#
# Returns a VM object (eventually) wrapping the VM
def createvm(name,ostype=None,register=False,basefolder=None,uuid=None):
cmd = [constants.cmd, "createvm", "--name", name]
if ostype:
cmd += ["--ostype", ostype]
if register:
cmd += ["--register"]
if basefolder:
cmd += ["--basefolder", basefolder]
if uuid:
cmd += ["--uuid", uuid]
# TODO: change to return VM object
return subprocess.check_output(cmd)
# Public: Register a VM from its XML file
#
# filename - String giving the filepath to the XML file to use
#
# Returns True if the registration succeeded.
# Raises RegistrationError otherwise
def registervm(self, filename):
args = [constants.cmd, "registervm", filename]
try:
result = subprocess.check_output(args)
except subprocess.CalledProcessError as e:
raise RegistrationError(filename, e)
return True
# Public: Close a device based on a UUID or a filename
#
# device - one of "dvd", "floppy" or "disk"
# target - UUID or filename
# delete - whether or not to delete the device after closing
#
# Returns True if the registration succeeded.
# Raises NoMediumError if the device type is invalid, CommandError if there's
# some other error
def closemedium(self, device, target, delete=False):
if not device in constants.closemediumopts:
raise NoMediumError(device, target, delete)
args = [constants.cmd, "closemedium", target]
if delete:
args.append("--delete")
execute(args)
return True
# Public: Class that wraps a Virtualbox VM and lets you interact with it and
# configure. Does not interact with the Guest OS in any way.
class VM(object):
# Public: Initialize a VM object to wrap a particular Virtualbox VM. At
# least one of name or UUID must be provided to locate the VM and the VM
# referenced must already exist.
#
# name - String that is the name of VirtualBox VM.
# uuid - Hexadecimal String that is the UUID of the VirtualBox VM.
#
# Returns a VM object wrapping the VirtualBox VM
# Raises UnknownVMError if VM corresponding to the name or UUID is not found
def __init__(self, name=None, uuid=None):
if name == None and uuid == None:
raise UnknownVMError(name, uuid)
if not name:
argid = uuid
else:
argid = name
try:
args = [constants.cmd, "showvminfo", "--machinereadable", argid]
self.vminfo = subprocess.check_output(args)
except subprocess.CalledProcessError:
raise UnknownVMError(name, uuid)
self.info = self.parse_info(self.vminfo)
self.__name = self.info['name']
self.__uuid = self.info['UUID']
self.started = False
# Public: Parse a raw VM information string as returned by showvminfo and
# turn it into a machine-usable Python dictionary.
#
# rawinfo - String that is the raw information dump from showvminfo
# machine - Boolean saying if the raw information is from using the
# machinereadable switch
# pythonize - Boolean saying if values should be swapped with their Python
# equivalents (True for on, False for off, None for <none>)
#
# Returns a dictionary of information keys to their provided values
def parse_info(self, rawinfo=None,machine=True, pythonize=True):
if not rawinfo:
rawinfo = self.vminfo
info = {}
longkey = None
longval = None
if machine:
sep = "="
else:
sep = ":"
for line in rawinfo.splitlines():
line = line.decode()
parts = line.split(sep)
# Work with multiline key-value pairs
if not machine:
if len(parts) == 1 and not longkey:
longkey = parts[0].strip()
longval = ""
continue
elif len(parts) == 1:
longval + "\n"
longval += line
continue
else:
longkey = None
longval = None
key = parts[0].strip()
value = ':'.join(parts[1:]).strip()
else:
key = parts[0].strip()
value = parts[1].strip(' \"')
if pythonize:
# Turn numbers to ints
try:
value = int(value)
except ValueError:
pass
# Turn on/off/none to True/False/None
if value == "on":
value = True
elif value == "off":
value = False
elif value == "none":
value = None
info[key] = value
return info
# Public: Create a Python dictionary representing the output from the
# showvminfo command. Uses parse_info to parse the raw string and places the
# raw string into a 'string' key in the dictionary.
#
# details - Boolean to use the --details flag
# machine - Boolean to use the --machinereadable flag (easier to parse)
# pythonize - Boolean saying if values should be swapped with their Python
# equivalents (True for on, False for off, None for <none>)
#
# Returns the parsed dictionary representation
def showvminfo(self, details=False, machine=True, pythonize=True):
args = [constants.cmd, "showvminfo"]
if details:
args += ["--details"]
if machine:
args += ["--machinereadable"]
args += [self.__uuid]
info = subprocess.check_output(args)
parsed = self.parse_info(info, machine, pythonize)
parsed['string'] = info
return parsed
# Public: Unregister the VM and optionally delete
#
# delete - Boolean to delete the VM as well as unregister
#
# Returns True if unregistering was successful
# Raises the generic CommandError otherwise
def unregistervm(self, delete=False):
args = [constants.cmd, "unregistervm", self.__uuid]
if delete:
args += ["--delete"]
try:
result = subprocess.check_output(args)
except subprocess.CalledProcessError as e:
raise CommandError(args, e)
return True
# Public: Make modifications to the current VM
#
# option - string option to be modified
# optargs - List of arguments relevant to the option
#
# Returns the output of the modifyvm command
# Raises UnknownOptionError if the option or arguments are incorrect
# Raises CommandError if the modifyvm command fails for some reason
def modifyvm(self,option=None,*optargs):
optargs = list(optargs)
if not option in constants.modopts:
raise UnknownOptionError("modifyvm", option)
else:
args = [constants.cmd, "modifyvm", self.name]
if option in constants.modboolopts:
if optargs[0] == True or optargs[0] == "on":
args += ["on"]
elif optargs[1] == False or optargs[0] == "off":
args += ["off"]
else:
raise UnknownOptionError("modifyvm " + option, optargs[0])
elif option in constants.modindexopts:
try:
index = int(optargs[0])
except ValueError:
raise UnknownOptionError("modifyvm " + option, optargs[0])
args += ["--" + option + str(index)] + optargs[1:]
elif option in constants.modenumopts.keys():
if not optargs[0] in constants.modenumopts[option]:
raise UnknownOptionError("modifyvm " + option, optargs[0])
else:
args += ["--" + option, optargs[0]]
else:
args += ["--" + option] + optargs
try:
args = map(str, args)
result = subprocess.check_output(args)
except subprocess.CalledProcessError as e:
raise CommandError(args, e)
return result
def start(self, gui="gui"):
args = [constants.cmd, "startvm", self.name, "--type", gui]
try:
result = subprocess.check_output(args)
except subprocess.CalledProcessError as e:
raise CommandError(args, e)
self.started = True
return result
def controlvm(self,option=None,*optargs):
optargs = list(optargs)
if not option in constants.ctrlopts:
raise UnknownOptionError("controlvm", option)
else:
args = [constants.cmd, "controlvm", self.name]
if option in constants.ctrlboolopts:
if optargs[0] == True or optargs[0] == "on":
args += ["on"]
elif optargs[1] == False or optargs[0] == "off":
args += ["off"]
else:
raise UnknownOptionError("modifyvm " + option, optargs[0])
elif option in constants.ctrlindexopts:
try:
index = int(optargs[0])
except ValueError:
raise UnknownOptionError("modifyvm " + option, optargs[0])
args += ["--" + option + str(index)] + optargs[1:]
# elif option in constants.ctrlenumopts.keys():
# if not optargs[0] in constants.ctrlenumopts[option]:
# raise UnknownOptionError("modifyvm " + option, optargs[0])
# else:
# args += ["--" + option, optargs[0]]
else:
args += [option] + optargs
args = map(str, args)
return execute(args)
# Public: Discard current VM state
#
# Return True if the discard happened properly
# Raise CommandError otherwise
def discardstate(self):
args = [constants.cmd, "discardstate", self.UUID]
execute(args)
return True
# Public: Load VM state from a given filepath
#
# filepath - String giving path to the state path
#
# Return True if the adoption succeeded
# Raise IOError if there is no such file
# CommandError if the command fails otherwise
def adoptstate(self, filepath):
args = [constants.cmd, self.UUID]
if os.path.isfile(filepath):
args = [constants.cmd, "adopstate", self.UUID, filepath]
else:
raise IOError("No such state file: " + filepath)
execute(args)
return True
def __getattr__(self, name):
try:
value = self.info[constants.mod_to_ls[name]]
except KeyError:
value = self.info[name]
return value
def __setattr__(self, name, value):
m = re.match('([a-zA-Z]+)([0-9])', name)
if m:
name = m.group(1)
value = [value]
value.insert(0,m.group(2))
if name in constants.modopts and not self.started:
self.modifyvm(name, *value)
elif name in constants.ctrlopts and self.started:
self.controlvm(name, *value)
else:
pass
self.__dict__[name] = value
```
#### File: src/dcos_e2e/cluster.py
```python
import logging
import subprocess
from contextlib import ContextDecorator
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
from retry import retry
from . import _wait_for_dcos
from ._existing_cluster import ExistingCluster as _ExistingCluster
from .base_classes import ClusterManager # noqa: F401
from .base_classes import ClusterBackend
from .node import Node, Output, Role, Transport
LOGGER = logging.getLogger(__name__)
@retry(
exceptions=(subprocess.CalledProcessError),
tries=5,
delay=1,
)
def _wait_for_ssh(node: Node) -> None:
"""
Retry up to five times (arbitrary) until SSH is available on the given
node.
"""
# In theory we could just use any args and specify the transport as SSH.
# However, this would not work on macOS without a special network set up.
args = [
'systemctl',
'status',
'sshd.socket',
'||',
'systemctl',
'status',
'sshd',
]
node.run(
args=args,
output=Output.LOG_AND_CAPTURE,
shell=True,
)
class Cluster(ContextDecorator):
"""
A record of a DC/OS cluster.
This is intended to be used as context manager.
"""
def __init__(
self,
cluster_backend: ClusterBackend,
masters: int = 1,
agents: int = 1,
public_agents: int = 1,
) -> None:
"""
Create a DC/OS cluster.
Args:
cluster_backend: The backend to use for the cluster.
masters: The number of master nodes to create.
agents: The number of agent nodes to create.
public_agents: The number of public agent nodes to create.
"""
self._cluster = cluster_backend.cluster_cls(
masters=masters,
agents=agents,
public_agents=public_agents,
cluster_backend=cluster_backend,
) # type: ClusterManager
self._base_config = cluster_backend.base_config
for node in {
*self.masters,
*self.agents,
*self.public_agents,
}:
_wait_for_ssh(node=node)
@classmethod
def from_nodes(
cls,
masters: Set[Node],
agents: Set[Node],
public_agents: Set[Node],
) -> 'Cluster':
"""
Create a cluster from existing nodes.
Args:
masters: The master nodes in an existing cluster.
agents: The agent nodes in an existing cluster.
public_agents: The public agent nodes in an existing cluster.
Returns:
A cluster object with the nodes of an existing cluster.
"""
backend = _ExistingCluster(
masters=masters,
agents=agents,
public_agents=public_agents,
)
return cls(
masters=len(masters),
agents=len(agents),
public_agents=len(public_agents),
cluster_backend=backend,
)
def wait_for_dcos_oss(
self,
http_checks: bool = True,
) -> None:
"""
Wait until the DC/OS OSS boot process has completed.
Args:
http_checks: Whether or not to wait for checks which involve HTTP.
If this is `False`, this function may return before DC/OS is
fully ready. This is useful in cases where an HTTP connection
cannot be made to the cluster. For example, this is useful on
macOS without a VPN set up.
Raises:
dcos_e2e.exceptions.DCOSTimeoutError: Raised if cluster components
did not become ready within one hour.
"""
_wait_for_dcos.wait_for_dcos_oss(
masters=self.masters,
agents=self.agents,
public_agents=self.public_agents,
http_checks=http_checks,
)
def wait_for_dcos_ee(
self,
superuser_username: str,
superuser_password: str,
http_checks: bool = True,
) -> None:
"""
Wait until the DC/OS Enterprise boot process has completed.
Args:
superuser_username: Username of a user with superuser privileges.
superuser_password: Password of a <PASSWORD> superuser privileges.
http_checks: Whether or not to wait for checks which involve HTTP.
If this is `False`, this function may return before DC/OS is
fully ready. This is useful in cases where an HTTP connection
cannot be made to the cluster. For example, this is useful on
macOS without a VPN set up.
Raises:
dcos_e2e.exceptions.DCOSTimeoutError: Raised if cluster components
did not become ready within one hour.
"""
_wait_for_dcos.wait_for_dcos_ee(
masters=self.masters,
agents=self.agents,
public_agents=self.public_agents,
superuser_username=superuser_username,
superuser_password=<PASSWORD>,
http_checks=http_checks,
)
def install_dcos_from_url(
self,
dcos_installer: str,
dcos_config: Dict[str, Any],
ip_detect_path: Path,
output: Output = Output.CAPTURE,
files_to_copy_to_genconf_dir: Iterable[Tuple[Path, Path]] = (),
) -> None:
"""
Installs DC/OS using the DC/OS advanced installation method.
Args:
dcos_installer: A URL pointing to an installer to install DC/OS
from.
dcos_config: The contents of the DC/OS ``config.yaml``.
ip_detect_path: The path to a ``ip-detect`` script that will be
used when installing DC/OS.
files_to_copy_to_genconf_dir: Pairs of host paths to paths on
the installer node. These are files to copy from the host to
the installer node before installing DC/OS.
output: What happens with stdout and stderr.
"""
self._cluster.install_dcos_from_url(
dcos_installer=dcos_installer,
dcos_config=dcos_config,
ip_detect_path=ip_detect_path,
files_to_copy_to_genconf_dir=files_to_copy_to_genconf_dir,
output=output,
)
def install_dcos_from_path(
self,
dcos_installer: Path,
dcos_config: Dict[str, Any],
ip_detect_path: Path,
output: Output = Output.CAPTURE,
files_to_copy_to_genconf_dir: Iterable[Tuple[Path, Path]] = (),
) -> None:
"""
Installs DC/OS using the DC/OS advanced installation method.
Args:
dcos_installer: The ``Path`` to a local installer to install DC/OS
from.
dcos_config: The contents of the DC/OS ``config.yaml``.
ip_detect_path: The path to a ``ip-detect`` script that will be
used when installing DC/OS.
files_to_copy_to_genconf_dir: Pairs of host paths to paths on
the installer node. These are files to copy from the host to
the installer node before installing DC/OS.
output: What happens with stdout and stderr.
"""
self._cluster.install_dcos_from_path(
dcos_installer=dcos_installer,
dcos_config=dcos_config,
ip_detect_path=ip_detect_path,
files_to_copy_to_genconf_dir=files_to_copy_to_genconf_dir,
output=output,
)
def upgrade_dcos_from_url(
self,
dcos_installer: str,
dcos_config: Dict[str, Any],
ip_detect_path: Path,
output: Output = Output.CAPTURE,
files_to_copy_to_genconf_dir: Iterable[Tuple[Path, Path]] = (),
) -> None:
"""
Upgrade DC/OS.
Args:
dcos_installer: A URL pointing to an installer to upgrade DC/OS
from.
dcos_config: The DC/OS configuration to use.
ip_detect_path: The path to a ``ip-detect`` script that will be
used when installing DC/OS.
files_to_copy_to_genconf_dir: Pairs of host paths to paths on
the installer node. These are files to copy from the host to
the installer node before installing DC/OS.
output: What happens with stdout and stderr.
"""
for nodes, role in (
(self.masters, Role.MASTER),
(self.agents, Role.AGENT),
(self.public_agents, Role.PUBLIC_AGENT),
):
for node in nodes:
node.upgrade_dcos_from_url(
dcos_installer=dcos_installer,
dcos_config=dcos_config,
ip_detect_path=ip_detect_path,
role=role,
files_to_copy_to_genconf_dir=(
files_to_copy_to_genconf_dir
),
output=output,
)
def upgrade_dcos_from_path(
self,
dcos_installer: Path,
dcos_config: Dict[str, Any],
ip_detect_path: Path,
output: Output = Output.CAPTURE,
files_to_copy_to_genconf_dir: Iterable[Tuple[Path, Path]] = (),
) -> None:
"""
Upgrade DC/OS.
Args:
dcos_installer: The ``Path`` to a local installer or a ``str`` to
which is a URL pointing to an installer to install DC/OS from.
dcos_config: The DC/OS configuration to use.
ip_detect_path: The path to a ``ip-detect`` script that will be
used when installing DC/OS.
files_to_copy_to_genconf_dir: Pairs of host paths to paths on
the installer node. These are files to copy from the host to
the installer node before installing DC/OS.
output: What happens with stdout and stderr.
"""
for nodes, role in (
(self.masters, Role.MASTER),
(self.agents, Role.AGENT),
(self.public_agents, Role.PUBLIC_AGENT),
):
for node in nodes:
node.upgrade_dcos_from_path(
dcos_installer=dcos_installer,
dcos_config=dcos_config,
ip_detect_path=ip_detect_path,
role=role,
files_to_copy_to_genconf_dir=(
files_to_copy_to_genconf_dir
),
output=output,
)
def __enter__(self) -> 'Cluster':
"""
Enter a context manager.
The context manager receives this ``Cluster`` instance.
"""
return self
@property
def masters(self) -> Set[Node]:
"""
Return all DC/OS master :class:`.node.Node` s.
"""
return self._cluster.masters
@property
def agents(self) -> Set[Node]:
"""
Return all DC/OS agent :class:`.node.Node` s.
"""
return self._cluster.agents
@property
def public_agents(self) -> Set[Node]:
"""
Return all DC/OS public agent :class:`.node.Node` s.
"""
return self._cluster.public_agents
@property
def base_config(self) -> Dict[str, Any]:
"""
Return a base configuration for installing DC/OS OSS.
"""
def ip_list(nodes: Set[Node]) -> List[str]:
return list(map(lambda node: str(node.private_ip_address), nodes))
config = {
'agent_list': ip_list(nodes=self.agents),
'master_list': ip_list(nodes=self.masters),
'public_agent_list': ip_list(nodes=self.public_agents),
}
return {
**config,
**self._base_config,
}
def run_with_test_environment(
self,
args: List[str],
env: Optional[Dict[str, Any]] = None,
output: Output = Output.CAPTURE,
tty: bool = False,
node: Optional[Node] = None,
transport: Optional[Transport] = None,
) -> subprocess.CompletedProcess:
"""
Run a command on a node using the Mesosphere test environment.
Args:
args: The command to run on the node.
env: Environment variables to be set on the node before running
the command. On enterprise clusters, ``DCOS_LOGIN_UNAME`` and
``DCOS_LOGIN_PW`` must be set.
output: What happens with stdout and stderr.
node: The node to run the given command on. if not given, an
arbitrary master node is used.
tty: If ``True``, allocate a pseudo-tty. This means that the users
terminal is attached to the streams of the process.
This means that the values of stdout and stderr will not be in
the returned ``subprocess.CompletedProcess``.
transport: The transport to use for communicating with nodes. If
``None``, the ``Node``'s ``default_transport`` is used.
Returns:
The result of the given command.
Raises:
subprocess.CalledProcessError: If the command fails.
"""
args = [
'.',
'/opt/mesosphere/environment.export',
'&&',
'cd',
'/opt/mesosphere/active/dcos-integration-test/',
'&&',
*args,
]
env = env or {}
def ip_addresses(nodes: Iterable[Node]) -> str:
return ','.join(
map(lambda node: str(node.private_ip_address), nodes),
)
# Tests are run on a random master node if no node is given.
node = node or next(iter(self.masters))
environment_variables = {
# This is needed for 1.9 (and below?)
'PUBLIC_MASTER_HOSTS': ip_addresses(self.masters),
'MASTER_HOSTS': ip_addresses(self.masters),
'SLAVE_HOSTS': ip_addresses(self.agents),
'PUBLIC_SLAVE_HOSTS': ip_addresses(self.public_agents),
'DCOS_DNS_ADDRESS': 'http://' + str(node.private_ip_address),
# This is only used by DC/OS 1.9 integration tests
'DCOS_NUM_MASTERS': len(self.masters),
'DCOS_NUM_AGENTS': len(self.agents) + len(self.public_agents),
**env,
}
return node.run(
args=args,
output=output,
env=environment_variables,
tty=tty,
shell=True,
transport=transport,
)
def destroy(self) -> None:
"""
Destroy all nodes in the cluster.
"""
self._cluster.destroy()
def destroy_node(self, node: Node) -> None:
"""
Destroy a node in the cluster.
"""
self._cluster.destroy_node(node=node)
def __exit__(
self,
exc_type: Optional[type],
exc_value: Optional[Exception],
traceback: Any,
) -> bool:
"""
On exiting, destroy all nodes in the cluster if the backend supports
it.
"""
# This is a hack to make Vulture not think that these are unused
# arguments. We have to receive them to be a valid context manager.
for _ in (exc_type, exc_value, traceback):
pass
try:
self.destroy()
except NotImplementedError:
pass
return False
``` |
{
"source": "jongiddy/jute",
"score": 3
} |
#### File: jute/test/test_jute_call.py
```python
import unittest
from jute import Opaque, DynamicInterface, implements
class Callable(Opaque):
def __call__(self):
"""Interface for a callable."""
class CallTestMixin:
def get_test_object(self):
return object()
def test_call(self):
callable = self.get_test_object()
self.assertEqual(callable(), 0)
def test_getattr(self):
callable = self.get_test_object()
self.assertEqual(getattr(callable, '__call__')(), 0)
def test_attribute(self):
callable = self.get_test_object()
self.assertEqual(callable.__call__(), 0)
@implements(Callable)
class BasicCallable:
def __call__(self):
return 0
class CallableInstanceTests(CallTestMixin, unittest.TestCase):
def get_test_object(self):
return BasicCallable()
class CallableInterfaceTests(CallTestMixin, unittest.TestCase):
def get_test_object(self):
return Callable(BasicCallable())
@implements(DynamicInterface)
class CallableProxy:
def __init__(self, wrapped_callable):
self.wrapped = wrapped_callable
def provides_interface(self, interface):
return interface.implemented_by(Callable)
def __call__(self):
return self.wrapped()
class CallableDynamicInstanceTests(CallTestMixin, unittest.TestCase):
def get_test_object(self):
return CallableProxy(BasicCallable())
class CallableDynamicInterfaceTests(CallTestMixin, unittest.TestCase):
def get_test_object(self):
return Callable(CallableProxy(BasicCallable()))
@implements(Callable)
class GeneratedCallable:
"""A class that generates the __call__ method dynamically."""
def __getattr__(self, name):
if name == '__call__':
def f():
return 0
return f
raise AttributeError(name)
class GeneratedCallTestMixin(CallTestMixin):
"""Test __call__ for a provider that generates __call__."""
def test_call(self):
callable = self.get_test_object()
with self.assertRaises(TypeError):
callable()
class GeneratedCallInstanceTests(GeneratedCallTestMixin, unittest.TestCase):
def get_test_object(self):
return GeneratedCallable()
class GeneratedCallInterfaceTests(GeneratedCallTestMixin, unittest.TestCase):
def get_test_object(self):
return Callable(GeneratedCallable())
```
#### File: jute/test/test_jute_special.py
```python
import unittest
from jute import Opaque, InvalidAttributeName
class SpecialTests(unittest.TestCase):
def test_init(self):
with self.assertRaises(InvalidAttributeName):
class AnInterface(Opaque):
def __init__(self):
pass
def test_repr(self):
with self.assertRaises(InvalidAttributeName):
class AnInterface(Opaque):
def __repr__(self):
pass
def test_dir(self):
with self.assertRaises(InvalidAttributeName):
class AnInterface(Opaque):
def __dir__(self):
pass
def test_getattribute(self):
with self.assertRaises(InvalidAttributeName):
class AnInterface(Opaque):
def __getattribute__(self, name):
pass
def test_getattr(self):
with self.assertRaises(InvalidAttributeName):
class AnInterface(Opaque):
def __getattr__(self, name):
pass
def test_setattr(self):
with self.assertRaises(InvalidAttributeName):
class AnInterface(Opaque):
def __setattr__(self, name, value):
pass
def test_delattr(self):
with self.assertRaises(InvalidAttributeName):
class AnInterface(Opaque):
def __delattr__(self):
pass
```
#### File: jongiddy/jute/setup.py
```python
from distutils.core import setup
import sys
GITHUB_URL = 'https://github.com/jongiddy/jute'
VERSION = '0.2.1'
if sys.version_info[0] < 3:
sys.exit('The jute package requires Python 3.')
def contents_of(filename):
with open(filename, encoding='utf-8') as f:
return f.read()
setup(
name='jute',
packages=['jute'],
package_dir={'jute': 'python3/jute'},
version=VERSION,
description='Interface module that verifies both providers and callers',
long_description=contents_of('README.rst'),
keywords=['interface', 'polymorphism'],
author='<NAME>',
author_email='<EMAIL>',
url=GITHUB_URL,
download_url='{}/tarball/v{}'.format(GITHUB_URL, VERSION),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Software Development :: Libraries :: Python Modules",
],
extras_require={
'doc': [
"pyenchant", # pre-requisite for sphinxcontrib-spelling
"sphinx",
"sphinxcontrib-spelling",
],
},
)
``` |
{
"source": "jongiddy/sublime-gidterm",
"score": 2
} |
#### File: jongiddy/sublime-gidterm/gidterm.py
```python
import codecs
from collections import namedtuple
from datetime import datetime, timedelta, timezone
import errno
import fcntl
import html
import os
import pty
import re
from select import select
import shlex
import signal
import tempfile
import traceback
import sublime # type: ignore
import sublime_plugin # type: ignore
this_package = os.path.dirname(__file__)
config_dir = os.path.join(this_package, 'config')
terminal_rows = 24
terminal_cols = 80
_initial_profile = r'''
# Read the standard profile, to give a familiar environment. The profile can
# detect that it is in GidTerm using the `TERM_PROGRAM` environment variable.
export TERM_PROGRAM=Sublime-GidTerm
if [ -r ~/.profile ]; then . ~/.profile; fi
# Replace the settings needed for GidTerm to work, notably the prompt formats.
PROMPT_DIRTRIM=
_gidterm_ps1 () {
status=$?
old_prompt_command=$1
PS1="\$ ";
eval "${old_prompt_command}";
PS1="\\[\\e[1p${status}@\\w\\e[~\\e[5p\\]${PS1}\\[\\e[~\\]";
tmpfile=${GIDTERM_CACHE}.$$;
{
shopt -p &&
declare -p | grep -v '^declare -[a-qs-z]*r' &&
declare -f &&
alias -p;
} > ${tmpfile} && mv ${tmpfile} ${GIDTERM_CACHE};
}
# The old `PROMPT_COMMAND` may be a function that, on reload, has not been
# declared when `_gidterm_ps1` is being declared. If `${GIDTERM_PC}` appears
# directly in the `_gidterm_ps1` declaration, the undefined function can cause
# an error. Instead we pass the old `PROMPT_COMMAND` as a parameter.
GIDTERM_PC=${PROMPT_COMMAND:-:}
PROMPT_COMMAND='_gidterm_ps1 "${GIDTERM_PC}"'
PS0='\e[0!p'
PS2='\e[2!p'
export TERM=ansi
# Set LINES and COLUMNS to a standard size for commands run by the shell to
# avoid tools creating wonky output, e.g. many tools display a completion
# percentage on the right side of the screen. man pages are formatted to fit
# the width COLUMNS. Prevent bash from resetting these variables.
#
shopt -u checkwinsize
export COLUMNS=%d
export LINES=%d
# Avoid paging by using `cat` as the default pager. This is generally nicer
# because you can scroll and search using Sublime Text. For situations where
# the pager is typically used to see the first entries, use command options
# like `git log -n 5` or pipe to `head`.
export PAGER=cat
# Don't add control commands to the history
export HISTIGNORE=${HISTIGNORE:+${HISTIGNORE}:}'*# [@gidterm@]'
# Specific configuration to make applications work well with GidTerm
GIDTERM_CONFIG="%s"
export RIPGREP_CONFIG_PATH=${GIDTERM_CONFIG}/ripgrep
''' % (terminal_cols, terminal_rows, config_dir)
_exit_status_info = {} # type: dict[str, str]
for name in dir(signal):
if name.startswith('SIG') and not name.startswith('SIG_'):
if name in ('SIGRTMIN', 'SIGRTMAX'):
continue
try:
signum = int(getattr(signal, name))
except Exception:
pass
_exit_status_info[str(signum + 128)] = '\U0001f5f2' + name
def warn(message):
# type: (str) -> None
print('GidTerm: [WARN] {}'.format(message))
def timedelta_seconds(seconds):
# type: (float) -> timedelta
s = int(round(seconds))
return timedelta(seconds=s)
TITLE_LENGTH = 32
PROMPT = '$'
ELLIPSIS = '\u2025'
LONG_ELLIPSIS = '\u2026'
def _get_package_location(winvar):
# type: (dict[str, str]) -> str
packages = winvar['packages']
this_package = os.path.dirname(__file__)
assert this_package.startswith(packages)
unwanted = os.path.dirname(packages)
# add one to remove pathname delimiter /
return this_package[len(unwanted) + 1:]
panel_cache = {} # type: dict[int, DisplayPanel|LivePanel]
def cache_panel(view, panel):
# type: (sublime.View, DisplayPanel|LivePanel) -> None
panel_cache[view.id()] = panel
def uncache_panel(view):
# type: (sublime.View) -> None
try:
del panel_cache[view.id()]
except KeyError:
warn('panel not found: {}'.format(panel_cache))
def get_panel(view):
# type: (sublime.View) -> DisplayPanel|LivePanel|None
panel = panel_cache.get(view.id())
if panel is None:
settings = view.settings()
if settings.get('is_gidterm_display'):
panel = DisplayPanel(view)
cache_panel(view, panel)
return panel
def get_display_panel(view):
# type: (sublime.View) -> DisplayPanel
panel = get_panel(view)
assert isinstance(panel, DisplayPanel)
return panel
def gidterm_decode_error(e):
# type: (...) -> tuple[str, int]
# If text is not Unicode, it is most likely Latin-1. Windows-1252 is a
# superset of Latin-1 and may be present in downloaded files.
# TODO: Use the LANG setting to select appropriate fallback encoding
b = e.object[e.start:e.end]
try:
s = b.decode('windows-1252')
except UnicodeDecodeError:
# If even that can't decode, fallback to using Unicode replacement char
s = b.decode('utf8', 'replace')
warn('{}: replacing {!r} with {!r}'.format(e.reason, b, s.encode('utf8')))
return s, e.end
codecs.register_error('gidterm', gidterm_decode_error)
class Terminal:
def __init__(self):
# type: () -> None
self.pid = None # type: int|None
self.fd = None # type: int|None
utf8_decoder_factory = codecs.getincrementaldecoder('utf8')
self.decoder = utf8_decoder_factory(errors='gidterm')
def __del__(self):
# type: () -> None
self.stop()
def start(self, workdir, init_file):
# type: (str, str) -> None
args = [
'bash', '--rcfile', init_file
]
env = os.environ.copy()
env.update({
# If COLUMNS is the default of 80, the shell will break long
# prompts over two lines, making them harder to search for. It also
# allows the shell to use UP control characters to edit lines
# during command history navigation, which is difficult to replicate
# correctly. Setting COLUMNS to a very large value avoids these
# behaviours.
#
# When displaying command completion lists, bash pages them based
# on the LINES variable. A large LINES value avoids paging.
#
# Note that we tell bash that we have a very large terminal, then,
# through the init script, tell applications started by bash that
# they have a more typical terminal size.
'COLUMNS': '32767',
'LINES': '32767',
'TERM': 'ansi',
})
self.pid, self.fd = pty.fork()
if self.pid == 0:
# child
try:
os.chdir(os.path.expanduser(workdir))
except Exception:
traceback.print_exc()
os.execvpe('bash', args, env)
else:
# Prevent this file descriptor ending up opened in any subsequent
# child processes, blocking the close(fd) in this process from
# terminating the shell.
state = fcntl.fcntl(self.fd, fcntl.F_GETFD)
fcntl.fcntl(self.fd, fcntl.F_SETFD, state | fcntl.FD_CLOEXEC)
def stop(self):
# type: () -> None
if self.fd is not None:
os.close(self.fd)
self.fd = None
if self.pid is not None:
pid, status = os.waitpid(self.pid, 0)
if os.WIFEXITED(status) or os.WIFSIGNALED(status):
self.pid = None
def send(self, s):
# type: (str) -> bool
if self.fd is None:
return False
if s:
os.write(self.fd, s.encode('utf8'))
return True
def ready(self):
# type: () -> bool
fd = self.fd
if fd is None:
return True
rfds, wfds, xfds = select((fd,), (), (), 0)
return fd in rfds
def receive(self):
# type: () -> str
fd = self.fd
if fd is None:
return ''
try:
buf = os.read(fd, 2048)
except OSError as e:
if e.errno == errno.EIO:
return self.decoder.decode(b'', final=True)
raise
return self.decoder.decode(buf, final=not buf)
class TerminalOutput:
# Pattern to match control characters from the terminal that
# need to be handled specially.
_escape_pat = re.compile(
r'('
r'\x07|' # BEL
r'\x08+|' # BACKSPACE's
r'\r+|' # CR's
r'\n|' # NL
r'\x1b(?:' # Escapes:
r'[()*+]B|' # - codeset
r'\]0;.*?(?:\x07|\x1b\\)|' # - set title
r'\[[\x30-\x3f]*[\x20-\x2f]*[\x40-\x7e]' # - CSI
r'))'
)
# Pattern to match the prefix of above. If it occurs at the end of
# text, wait for more text to find escape.
_partial_pat = re.compile(
r'\x1b([()*+]|\](?:0;?)?.*|\[[\x30-\x3f]*[\x20-\x2f]*)?$'
)
NotReady = namedtuple('NotReady', ())
Text = namedtuple('Text', 'text')
Prompt1Starts = namedtuple('Prompt1Starts', ())
Prompt1Stops = namedtuple('Prompt1Stops', ())
Prompt2Starts = namedtuple('Prompt2Starts', ())
Prompt2Stops = namedtuple('Prompt2Stops', ())
OutputStarts = namedtuple('OutputStarts', ())
OutputStops = namedtuple('OutputStops', ('status', 'pwd'))
CursorUp = namedtuple('CursorUp', 'n')
CursorDown = namedtuple('CursorDown', 'n')
CursorLeft = namedtuple('CursorLeft', 'n')
CursorRight = namedtuple('CursorRight', 'n')
CursorMoveTo = namedtuple('CursorMoveTo', 'row col')
CursorReturn = namedtuple('CursorReturn', 'n')
LineFeed = namedtuple('LineFeed', ())
ClearToEndOfLine = namedtuple('ClearToEndOfLine', ())
ClearToStartOfLine = namedtuple('ClearToStartOfLine', ())
ClearLine = namedtuple('ClearLine', ())
Insert = namedtuple('Insert', 'n')
Delete = namedtuple('Delete', 'n')
SelectGraphicRendition = namedtuple('SelectGraphicRendition', ('foreground', 'background'))
def __init__(self, terminal):
# type: (Terminal) -> None
self.saved = ''
self.prompt_text = ''
self.in_prompt = None # type: str|None
self._csi_map = {
'@': self.handle_insert,
'A': self.handle_cursor_up,
'B': self.handle_cursor_down,
'C': self.handle_cursor_right,
'D': self.handle_cursor_left,
'H': self.handle_cursor_moveto,
'K': self.handle_clear_line,
'P': self.handle_delete,
'f': self.handle_cursor_moveto,
'm': self.handle_rendition,
}
self.iterator = self.loop(terminal)
def __iter__(self):
return self.iterator
def loop(self, terminal):
# (Terminal) -> Iterator[namedtuple]
while terminal:
if terminal.ready():
s = terminal.receive()
if s:
yield from self.handle_output(s)
else:
# terminal closed output channel
terminal = None
else:
yield TerminalOutput.NotReady()
def handle_output(self, text):
# (str) -> Iterator[namedtuple]
# Add any saved text from previous iteration, split text on control
# characters that are handled specially, then save any partial control
# characters at end of text.
text = self.saved + text
parts = self._escape_pat.split(text)
last = parts[-1]
match = self._partial_pat.search(last)
if match:
i = match.start()
parts[-1], self.saved = last[:i], last[i:]
else:
self.saved = ''
# Loop over alternating plain and control items
plain = False
for part in parts:
plain = not plain
if self.in_prompt is None:
if plain:
if part:
yield TerminalOutput.Text(part)
else:
if part[0] == '\x1b':
command = part[-1]
if command == 'p':
yield from self.handle_prompt(part)
else:
yield from self.handle_escape(part)
else:
yield from self.handle_control(part)
else:
if not plain and part == '\x1b[~':
yield from self.handle_prompt_end(part)
else:
self.prompt_text += part
def handle_prompt(self, part):
# (str) -> Iterator[namedtuple]
arg = part[2:-1]
if arg.endswith('!'):
# standalone prompt
in_prompt = arg[0]
if in_prompt == '0':
yield TerminalOutput.OutputStarts()
elif in_prompt == '2':
# command input continues
yield TerminalOutput.Prompt2Starts()
yield TerminalOutput.Text('> ')
yield TerminalOutput.Prompt2Stops()
else:
# start of prompt with interpolated text
assert self.in_prompt is None, self.in_prompt
self.in_prompt = arg
self.prompt_text = ''
def handle_prompt_end(self, part):
# (str) -> Iterator[namedtuple]
if self.in_prompt == '1':
# output ends, command input starts
status, pwd = self.prompt_text.split('@', 1)
yield TerminalOutput.OutputStops(status, pwd)
else:
assert self.in_prompt == '5', self.in_prompt
yield TerminalOutput.Prompt1Starts()
ps1 = self.prompt_text
parts = self._escape_pat.split(ps1)
plain = False
for part in parts:
plain = not plain
if plain:
if part:
yield TerminalOutput.Text(part)
else:
if part[0] == '\x1b':
yield from self.handle_escape(part)
else:
yield from self.handle_control(part)
yield TerminalOutput.Prompt1Stops()
self.in_prompt = None
self.prompt_text = ''
def handle_control(self, part):
# (str) -> Iterator[namedtuple]
if part == '\x07':
return
if part[0] == '\x08':
n = len(part)
yield TerminalOutput.CursorLeft(n)
return
if part[0] == '\r':
# move cursor to start of line
n = len(part)
yield TerminalOutput.CursorReturn(n)
return
if part == '\n':
yield TerminalOutput.LineFeed()
return
warn('unknown control: {!r}'.format(part))
def handle_escape(self, part):
# (str) -> Iterator[namedtuple]
if part[1] != '[':
assert part[1] in '()*+]', part
# ignore codeset and set-title
return
command = part[-1]
method = self._csi_map.get(command)
if method is None:
warn('Unhandled escape code: {!r}'.format(part))
else:
yield from method(part[2:-1])
def handle_insert(self, arg):
# (str) -> Iterator[namedtuple]
if arg:
n = int(arg)
else:
n = 1
yield TerminalOutput.Insert(n)
def handle_cursor_up(self, arg):
# (str) -> Iterator[namedtuple]
if arg:
n = int(arg)
else:
n = 1
yield TerminalOutput.CursorUp(n)
def handle_cursor_down(self, arg):
# (str) -> Iterator[namedtuple]
if arg:
n = int(arg)
else:
n = 1
yield TerminalOutput.CursorDown(n)
def handle_cursor_right(self, arg):
# (str) -> Iterator[namedtuple]
if arg:
n = int(arg)
else:
n = 1
yield TerminalOutput.CursorRight(n)
def handle_cursor_left(self, arg):
# (str) -> Iterator[namedtuple]
if arg:
n = int(arg)
else:
n = 1
yield TerminalOutput.CursorLeft(n)
def handle_cursor_moveto(self, arg):
# (str) -> Iterator[namedtuple]
if not arg:
row = 0
col = 0
elif ';' in arg:
parts = arg.split(';')
row = int(parts[0]) - 1
col = int(parts[1]) - 1
else:
row = int(arg) - 1
col = 0
yield TerminalOutput.CursorMoveTo(row, col)
def handle_clear_line(self, arg):
# (str) -> Iterator[namedtuple]
if not arg or arg == '0':
# clear to end of line
yield TerminalOutput.ClearToEndOfLine()
elif arg == '1':
# clear to start of line
yield TerminalOutput.ClearToStartOfLine()
elif arg == '2':
# clear line
yield TerminalOutput.ClearLine()
def handle_delete(self, arg):
# (str) -> Iterator[namedtuple]
n = int(arg)
yield TerminalOutput.Delete(n)
def handle_rendition(self, arg):
# (str) -> Iterator[namedtuple]
if not arg:
# ESC[m -> default
yield TerminalOutput.SelectGraphicRendition('default', 'default')
return
fg = 'default'
bg = 'default'
nums = arg.split(';')
i = 0
while i < len(nums):
num = nums[i]
if num in ('0', '00'):
fg = 'default'
bg = 'default'
elif num in ('1', '01', '2', '02', '22'):
# TODO: handle bold/faint intensity
pass
elif num.startswith('1') and len(num) == 2:
# TODO: handle these
pass
elif num == '30':
fg = 'black'
elif num == '31':
fg = 'red'
elif num == '32':
fg = 'green'
elif num == '33':
fg = 'yellow'
elif num == '34':
fg = 'blue'
elif num == '35':
fg = 'cyan'
elif num == '36':
fg = 'magenta'
elif num == '37':
fg = 'white'
elif num == '38':
i += 1
selector = nums[i]
if selector == '2':
# r, g, b
i += 3
continue
elif selector == '5':
# 8-bit
idx = int(nums[i + 1])
if idx < 8:
nums[i + 1] = str(30 + idx)
elif idx < 16:
nums[i + 1] = str(90 + idx - 8)
elif idx >= 254:
nums[i + 1] = '97' # mostly white
elif idx >= 247:
nums[i + 1] = '37' # light grey
elif idx >= 240:
nums[i + 1] = '90' # dark grey
elif idx >= 232:
nums[i + 1] = '30' # mostly black
else:
assert 16 <= idx <= 231, idx
rg, b = divmod(idx - 16, 6)
r, g = divmod(rg, 6)
r //= 3
g //= 3
b //= 3
x = {
(0, 0, 0): '90',
(0, 0, 1): '94',
(0, 1, 0): '92',
(0, 1, 1): '96',
(1, 0, 0): '91',
(1, 0, 1): '95',
(1, 1, 0): '93',
(1, 1, 1): '37',
}
nums[i + 1] = x[(r, g, b)]
elif num == '39':
fg = 'default'
elif num == '40':
bg = 'black'
elif num == '41':
bg = 'red'
elif num == '42':
bg = 'green'
elif num == '43':
bg = 'yellow'
elif num == '44':
bg = 'blue'
elif num == '45':
bg = 'cyan'
elif num == '46':
bg = 'magenta'
elif num == '47':
bg = 'white'
elif num == '48':
i += 1
selector = nums[i]
if selector == '2':
# r, g, b
i += 3
elif selector == '5':
# 8-bit
idx = int(nums[i + 1])
if idx < 8:
nums[i + 1] = str(40 + idx)
elif idx < 16:
nums[i + 1] = str(100 + idx - 8)
elif idx >= 254:
nums[i + 1] = '107' # mostly white
elif idx >= 247:
nums[i + 1] = '47' # light grey
elif idx >= 240:
nums[i + 1] = '100' # dark grey
elif idx >= 232:
nums[i + 1] = '40' # mostly black
else:
assert 16 <= idx <= 231, idx
rg, b = divmod(idx - 16, 6)
r, g = divmod(rg, 6)
r //= 3
g //= 3
b //= 3
x = {
(0, 0, 0): '100',
(0, 0, 1): '104',
(0, 1, 0): '102',
(0, 1, 1): '106',
(1, 0, 0): '101',
(1, 0, 1): '105',
(1, 1, 0): '103',
(1, 1, 1): '47',
}
nums[i + 1] = x[(r, g, b)]
elif num == '49':
bg = 'default'
elif num == '90':
fg = 'brightblack'
elif num == '91':
fg = 'brightred'
elif num == '92':
fg = 'brightgreen'
elif num == '93':
fg = 'brightyellow'
elif num == '94':
fg = 'brightblue'
elif num == '95':
fg = 'brightcyan'
elif num == '96':
fg = 'brightmagenta'
elif num == '97':
fg = 'brightwhite'
elif num == '100':
bg = 'brightblack'
elif num == '101':
bg = 'brightred'
elif num == '102':
bg = 'brightgreen'
elif num == '103':
bg = 'brightyellow'
elif num == '104':
bg = 'brightblue'
elif num == '105':
bg = 'brightcyan'
elif num == '106':
bg = 'brightmagenta'
elif num == '107':
bg = 'brightwhite'
else:
warn('Unhandled SGR code: {} in {}'.format(num, arg))
i += 1
yield TerminalOutput.SelectGraphicRendition(fg, bg)
class CommandHistory:
def __init__(self, view):
# type: (sublime.View) -> None
self.settings = view.settings()
self.load()
def load(self):
# type: () -> None
# Settings allow None, bool, int, float, str, dict, list (with tuples
# converted to lists).
# list[int] is a Region(begin, end) using above types
# next list is multiple Region's for a multi-line command
# next list is each command
self.commands = self.settings.get('gidterm_command_history', []) # type: list[list[list[int]]]
def save(self):
# type: () -> None
self.settings.set('gidterm_command_history', self.commands)
def append(self, regions, offset):
# type: (list[sublime.Region], int) -> None
command = [[r.begin() + offset, r.end() + offset] for r in regions]
self.commands.append(command)
self.save()
def regions(self, index):
# type: (int) -> list[sublime.Region]
return [sublime.Region(c[0], c[1]) for c in self.commands[index]]
def first_command_before(self, pos):
# type: (int) -> list[sublime.Region]|None
commands = self.commands
low = 0
if not commands or commands[0][-1][1] > pos:
return None
# low is a known valid object and high is index before the first known invalid object
high = len(commands) - 1
while high > low:
index = low + 1 + (high - low) // 2
command = self.commands[index]
if command[-1][1] <= pos:
low = index
else:
high = index - 1
return self.regions(low)
def first_command_after(self, pos):
# type: (int) -> list[sublime.Region]|None
commands = self.commands
low = 0
if not commands or commands[-1][0][0] < pos:
return None
high = len(self.commands) - 1
# high is a known valid object and low is index after the first known invalid object
while high > low:
index = low + (high - low) // 2
command = self.commands[index]
if command[0][0] >= pos:
high = index
else:
low = index + 1
return self.regions(low)
class DisplayPanel:
def __init__(self, view):
# type: (sublime.View) -> None
self.view = view
settings = view.settings()
init_file = settings.get('gidterm_init_file')
if init_file is None or not os.path.exists(init_file):
contents = settings.get('gidterm_init_script', _initial_profile)
init_file = create_init_file(contents)
settings.set('gidterm_init_file', init_file)
self.init_file = init_file
self._live_panel_name = 'gidterm-{}'.format(view.id())
self.command_history = CommandHistory(view)
self.set_tab_label('gidterm starting\u2026')
self.preview_phantoms = sublime.PhantomSet(view, 'preview')
self.live_panel = LivePanel(
self,
self.live_panel_name(),
self.view.settings().get('current_working_directory'),
self.init_file,
)
def get_color_scheme(self):
# type: () -> str
return self.view.settings().get('color_scheme')
def live_panel_name(self):
# type: () -> str
return self._live_panel_name
def handle_input(self, text):
# type: (str) -> None
self.live_panel.handle_input(text)
def close(self):
# type: () -> None
panel_name = self.live_panel_name()
window = sublime.active_window()
live_view = window.find_output_panel(panel_name)
if live_view:
self.live_panel.close()
uncache_panel(live_view)
window.destroy_output_panel(panel_name)
if os.path.exists(self.init_file):
with open(self.init_file) as f:
self.view.settings().set('gidterm_init_script', f.read())
os.unlink(self.init_file)
def get_display_panel(self):
# type: () -> DisplayPanel
return self
def setpwd(self, pwd):
# type: (str) -> None
settings = self.view.settings()
settings.set('current_working_directory', pwd)
def cursor_position(self):
# type: () -> int|None
view = self.view
sel = view.sel()
if len(sel) != 1:
return None
region = sel[0]
if not region.empty():
return None
return region.begin()
def append_text(self, text, scopes):
# type: (str, dict[str, list[sublime.Region]]) -> None
if text:
text_begin = self.view.size()
# `force` to override read_only state
# `scroll_to_end: False` to stay at same position in text.
self.view.run_command('append', {'characters': text, 'force': True, 'scroll_to_end': False})
text_end = self.view.size()
# `scroll_to_end: False` prevents the cursor and text being moved when
# text is added. However, if the cursor is at the end of the text, this
# is ignored and the cursor moves and the text scrolls to the end.
# This is desirable as provides a way to follow the display if desired.
# As an exception, if `text_begin` is 0, then the cursor stays at start.
# We override this to follow initially until explict move away from end.
if text_begin == 0:
self.view.run_command('gidterm_cursor', {'position': text_end})
for scope, new_regions in scopes.items():
regions = self.view.get_regions(scope)
for new_region in new_regions:
# shift region to where text was appended
begin = text_begin + new_region.begin()
end = text_begin + new_region.end()
# trim region to where text was appended
if begin >= text_end:
continue
if end > text_end:
end = text_end
if regions and regions[-1].end() == begin:
# merge into previous region
prev = regions.pop()
region = sublime.Region(prev.begin(), end)
else:
region = sublime.Region(begin, end)
regions.append(region)
self.view.add_regions(
scope, regions, scope,
flags=sublime.DRAW_NO_OUTLINE | sublime.PERSISTENT
)
def set_tab_label(self, label):
# type: (str) -> None
self.view.set_name(label)
def focus_display(self):
# type: () -> None
window = self.view.window()
n = window.num_groups()
for group in range(n):
if self.view in window.views_in_group(group):
window.focus_group(group)
window.focus_view(self.view)
break
def focus_live(self):
# type: () -> None
panel_name = self.live_panel_name()
window = self.view.window()
view = window.find_output_panel(panel_name)
window.run_command('show_panel', {'panel': 'output.{}'.format(panel_name)})
window.focus_view(view)
def show_live(self):
# type: () -> None
panel_name = self.live_panel_name()
window = self.view.window()
window.run_command('show_panel', {'panel': 'output.{}'.format(panel_name)})
def add_command_range(self, command_range):
# type: (list[sublime.Region]) -> None
self.command_history.append(command_range, self.view.size())
def add_output(self, text, home, cursor, scopes):
# type: (str, int, int, dict[str, list[sublime.Region]]) -> None
self.append_text(text[:home], scopes)
self.preview(text[home:], cursor)
def preview(self, text, cursor):
# type: (str, int) -> None
def escape(text):
# type: (str) -> str
return html.escape(text, quote=False).replace(' ', ' ')
text = text.rstrip('\n') # avoid showing extra line for newline
if 0 <= cursor <= len(text):
before = text[:cursor]
here = text[cursor:cursor + 1] or ' '
after = text[cursor + 1:]
text = '%s<u>%s</u>%s' % (escape(before), escape(here), escape(after))
else:
text = escape(text)
end = self.view.size()
parts = text.split('\n')
if end == 0:
# we use LAYOUT_INLINE which needs extra spaces to keep terminal background wide
parts[0] = parts[0] + ' ' * 240
while len(parts) <= terminal_rows:
parts.append('')
text = '<br>'.join(parts)
text = '<body><style>div {background-color: #80808040;}</style><div>%s</div></body>' % text
if end == 0:
# Initially, use INLINE to keep preview on first line, not after it
layout = sublime.LAYOUT_INLINE
else:
# Otherwsie, use BLOCK to put after second-last line (assuming last line is empty).
end = end - 1
layout = sublime.LAYOUT_BLOCK
phantom = sublime.Phantom(sublime.Region(end, end), text, layout)
self.preview_phantoms.update([phantom])
def next_command(self):
# type: () -> None
view = self.view
sel = view.sel()
try:
pos = sel[-1].end()
except IndexError:
pos = 0
regions = self.command_history.first_command_after(pos)
if regions is None:
size = view.size()
regions = [sublime.Region(size, size)]
sel = view.sel()
sel.clear()
sel.add_all(regions)
view.show(sel)
def prev_command(self):
# type: () -> None
view = self.view
sel = view.sel()
try:
pos = sel[0].begin()
except IndexError:
pos = view.size()
regions = self.command_history.first_command_before(pos)
if regions is None:
regions = [sublime.Region(0, 0)]
sel = view.sel()
sel.clear()
sel.add_all(regions)
view.show(sel)
def follow(self):
# type: () -> None
# Move cursor to end of view, causing window to follow new output
self.view.run_command('gidterm_cursor', {'position': self.view.size()})
colors = (
'black',
'red',
'green',
'yellow',
'blue',
'magenta',
'cyan',
'white',
'brightblack',
'brightred',
'brightgreen',
'brightyellow',
'brightblue',
'brightmagenta',
'brightcyan',
'brightwhite',
)
def get_scopes(view):
# type: (sublime.View) -> dict[str, list[sublime.Region]]
scopes = {}
for foreground in colors:
scope = 'sgr.{}-on-default'.format(foreground)
regions = view.get_regions(scope)
if regions:
scopes[scope] = regions
for background in colors:
scope = 'sgr.default-on-{}'.format(background)
regions = view.get_regions(scope)
if regions:
scopes[scope] = regions
for foreground in colors:
for background in colors:
scope = 'sgr.{}-on-{}'.format(foreground, background)
regions = view.get_regions(scope)
if regions:
scopes[scope] = regions
return scopes
class LivePanel:
def __init__(self, display_panel, panel_name, pwd, init_file):
# type: (sublime.View, DisplayPanel, str, str, str) -> None
self.display_panel = display_panel
self.panel_name = panel_name
self.pwd = <PASSWORD>
self.init_file = init_file
self.is_active = False
view = self.view = self.reset_view(display_panel, panel_name, pwd)
settings = view.settings()
settings.set('current_working_directory', pwd)
settings.set('gidterm_init_file', init_file)
# State of the output stream
self.scope = '' # type: str
cursor = view.size() # type: int
row, col = view.rowcol(cursor)
if col != 0:
view.run_command('append', {'characters': '\n', 'force': True, 'scroll_to_end': True})
cursor = view.size()
row += 1
self.cursor = cursor
self.home_row = row
# Things that get set during command execution
self.command_start = None # type: int|None
self.command_range = None # type: list[sublime.Region]|None
self.command_words = [] # type: list[str]
self.out_start_time = None # type: datetime|None
self.update_running = False
self.terminal = Terminal() # type: Terminal|None
self.terminal.start(self.pwd, self.init_file)
self.terminal_output = TerminalOutput(self.terminal)
self.buffered = ''
sublime.set_timeout(self.wait_for_prompt, 100)
def close(self):
# type: () -> None
if self.terminal:
self.terminal_closed()
def setpwd(self, pwd):
# type: (str) -> None
self.pwd = pwd
settings = self.view.settings()
settings.set('current_working_directory', pwd)
self.display_panel.setpwd(pwd)
def get_display_panel(self):
# type: () -> DisplayPanel
return self.display_panel
def set_active(self, is_active):
# type: (bool) -> None
self.is_active = is_active
def focus_display(self):
# type: () -> None
self.display_panel.focus_display()
def set_title(self, extra=''):
# type: (str) -> None
if extra:
size = TITLE_LENGTH - len(extra) - 1
if size < 1:
label = extra
else:
label = '{}\ufe19{}'.format(self.make_label(size), extra)
else:
label = self.make_label(TITLE_LENGTH)
self.display_panel.set_tab_label(label)
def make_label(self, size):
# type: (int) -> str
pwd = self.pwd
command_words = self.command_words
if size < 3:
if size == 0:
return ''
if size == 1:
return PROMPT
if len(pwd) < size:
return pwd + PROMPT
return ELLIPSIS + PROMPT
size -= 1 # for PROMPT
if command_words:
arg0 = command_words[0]
if len(command_words) == 1:
if len(arg0) <= size - 1:
# we can fit '> arg0'
right = ' ' + arg0
else:
return '{} {}{}'.format(PROMPT, arg0[:size - 2], ELLIPSIS)
else:
if len(arg0) <= size - 3:
# we can fit '> arg0 ..'
right = ' {} {}'.format(arg0[:size - 3], ELLIPSIS)
else:
return '{} {}{}'.format(
PROMPT, arg0[:size - 3], LONG_ELLIPSIS
)
else:
right = ''
parts = pwd.split('/')
if len(parts) >= 3:
short = '**/{}'.format(parts[-1])
else:
short = pwd
path_len = min(len(pwd), len(short))
right_avail = size - path_len
if len(command_words) > 1 and right_avail > len(right):
# we have space to expand the args
full = ' '.join(command_words)
if len(full) < right_avail:
right = ' ' + full
else:
right = ' {}{}'.format(full[:right_avail - 2], ELLIPSIS)
size -= len(right)
if len(pwd) <= size:
left = pwd
elif len(short) <= size:
left = short
start = parts[:2]
end = parts[-1]
parts = parts[2:-1]
while parts:
# keep adding components to the end until we reach the capacity
c = parts.pop() + '/' + end
if len(c) <= size - 3:
left = '**/{}'.format(c)
end = c
continue
# once we cannot add whole components to the end, see if we can
# add whole components to the start.
if parts:
start.append('**')
else:
start.append('*')
start.append(end)
c = '/'.join(start)
if len(c) <= size:
left = c
else:
c = start[0] + '/**/' + end
if len(c) <= size:
left = c
break
else:
# We added everything but the first two path components.
# We know that the whole path doesn't fit, so check if
# we can add the first component.
c = start[0] + '/*/' + end
if len(c) <= size:
left = c
elif size > 4:
end = parts[-1]
left = '**/*{}'.format(end[4 - size:])
else:
left = ''
return '{}{}{}'.format(left, PROMPT, right)
def handle_input(self, text):
# type: (str) -> None
if self.terminal is None:
self.buffered = text
self.terminal = Terminal()
self.terminal.start(self.pwd, self.init_file)
self.terminal_output = TerminalOutput(self.terminal)
sublime.set_timeout(self.wait_for_prompt, 100)
elif self.buffered:
self.buffered += text
else:
self.terminal.send(text)
def reset_view(self, display_panel, panel_name, pwd):
# type: (DisplayPanel, str, str) -> sublime.View
window = sublime.active_window()
view = window.find_output_panel(panel_name)
if view is not None:
# uncache first, so event listeners do not get called.
uncache_panel(view)
window.destroy_output_panel(panel_name)
view = window.create_output_panel(panel_name)
view.set_read_only(True)
view.set_scratch(True)
view.set_line_endings('Unix')
settings = view.settings()
settings.set('color_scheme', display_panel.get_color_scheme())
settings.set('block_caret', True)
settings.set('caret_style', 'solid')
# prevent ST doing work that doesn't help here
settings.set('mini_diff', False)
settings.set('spell_check', False)
settings.set('is_gidterm', True)
settings.set('is_gidterm_live', True)
settings.set('current_working_directory', pwd)
cache_panel(view, self)
if self.is_active:
window.run_command('show_panel', {'panel': 'output.{}'.format(panel_name)})
window.focus_view(view)
return view
def push(self):
# type: () -> None
view = self.view
home = view.text_point(self.home_row, 0)
scopes = get_scopes(self.view)
self.display_panel.add_output(
view.substr(sublime.Region(0, view.size())),
home,
self.cursor,
scopes,
)
view.set_read_only(False)
try:
view.run_command('gidterm_erase_text', {'begin': 0, 'end': home})
finally:
view.set_read_only(True)
assert self.cursor >= home
self.cursor -= home
self.home_row = 0
def wait_for_prompt(self):
# type: () -> None
if self.terminal:
count = 0
for t in self.terminal_output:
if isinstance(t, TerminalOutput.NotReady):
sublime.set_timeout(self.wait_for_prompt, 100)
break
if isinstance(t, TerminalOutput.OutputStops):
# prompt about to be emitted
if self.buffered:
self.terminal.send(self.buffered)
self.buffered = ''
self.set_title()
sublime.set_timeout(self.handle_output, 0)
break
count += 1
if count > 100:
# give other events a chance to run
sublime.set_timeout(self.wait_for_prompt, 0)
break
else:
self.terminal_closed()
def handle_output(self):
# type: () -> None
if self.terminal:
update_preview = False
view = self.view
count = 0
for t in self.terminal_output:
if isinstance(t, TerminalOutput.NotReady):
sublime.set_timeout(self.handle_output, 100)
break
if isinstance(t, TerminalOutput.Prompt1Starts):
self.command_start = None
assert self.cursor == view.size(), (self.cursor, view.size())
elif isinstance(t, TerminalOutput.Prompt1Stops):
assert self.cursor == view.size()
self.command_start = self.cursor
self.command_range = []
self.scope = ''
elif isinstance(t, TerminalOutput.Prompt2Starts):
assert self.cursor == view.size()
end = self.cursor - 1
assert view.substr(end) == '\n'
assert self.command_range is not None
self.command_range.append(sublime.Region(self.command_start, end))
self.command_start = None
self.scope = 'sgr.magenta-on-default'
elif isinstance(t, TerminalOutput.Prompt2Stops):
assert self.cursor == view.size()
assert self.command_start is None
self.command_start = self.cursor
self.scope = ''
elif isinstance(t, TerminalOutput.OutputStarts):
self.out_start_time = datetime.now(timezone.utc)
assert self.cursor == view.size()
end = self.cursor - 1
assert view.substr(end) == '\n'
command_range = self.command_range
assert command_range is not None
command_range.append(sublime.Region(self.command_start, end))
self.command_start = None
self.display_panel.add_command_range(command_range)
command = '\n'.join(view.substr(region) for region in command_range)
self.command_range = None
# view = self.view = self.reset_view(self.display_panel, self.panel_name, self.pwd)
# Re-add the command without prompts. Note that it has been pushed.
# self.append_text(command + '\n')
# self.cursor = self.pushed = view.size()
# view.add_regions('command', [sublime.Region(0, self.cursor)], 'sgr.default-on-yellow', flags=0)
try:
words = shlex.split(command.strip())
except ValueError as e:
# after a PS2 prompt, this indicates the start of a shell interaction
# TODO: handle this properly
warn(str(e))
words = ['shell']
if '/' in words[0]:
words[0] = words[0].rsplit('/', 1)[-1]
self.command_words = words
self.set_title(str(timedelta_seconds(0.0)))
if not self.update_running:
sublime.set_timeout(self.update_elapsed, 1000)
self.update_running = True
elif isinstance(t, TerminalOutput.OutputStops):
if self.command_start is None:
# end of an executed command
status = t.status
self.display_status(status)
self.home_row, col = view.rowcol(view.size())
assert col == 0, col
self.push()
view = self.view = self.reset_view(self.display_panel, self.panel_name, self.pwd)
if t.pwd != self.pwd:
self.setpwd(t.pwd)
# For `cd` avoid duplicating the name in the title to show more
# of the path. There's an implicit `status == '0'` here, since
# the directory doesn't change if the command fails.
if self.command_words and self.command_words[0] in ('cd', 'popd', 'pushd'):
self.command_words.clear()
status = ''
self.set_title(status)
self.command_words = []
else:
# Pressing Enter without a command or end of a shell
# interaction, e.g. Display all possibilities? (y or n)
self.set_title()
self.command_start = None
elif isinstance(t, TerminalOutput.Text):
self.overwrite(t.text)
update_preview = True
elif isinstance(t, TerminalOutput.CursorUp):
row, col = view.rowcol(self.cursor)
row -= t.n
if row < 0:
row = 0
cursor = view.text_point(row, col)
if view.rowcol(cursor)[0] > row:
cursor = view.text_point(row + 1, 0) - 1
self.cursor = cursor
update_preview = True
elif isinstance(t, TerminalOutput.CursorDown):
row, col = view.rowcol(self.cursor)
row += t.n
cursor = view.text_point(row, col)
if view.rowcol(cursor)[0] > row:
cursor = view.text_point(row + 1, 0) - 1
self.cursor = cursor
update_preview = True
elif isinstance(t, TerminalOutput.CursorLeft):
self.cursor = max(self.cursor - t.n, 0)
update_preview = True
elif isinstance(t, TerminalOutput.CursorRight):
self.cursor = min(self.cursor + t.n, view.size())
update_preview = True
elif isinstance(t, TerminalOutput.CursorMoveTo):
row = view.rowcol(view.size())[0] - terminal_rows + 1
if row < self.home_row:
row = self.home_row
else:
self.home_row = row
row += t.row
col = t.col
cursor = view.text_point(row, col)
if view.rowcol(cursor)[0] > row:
cursor = view.text_point(row + 1, 0) - 1
# This puts cursor at end of line `row`. Maybe add spaces
# to get to column `col`?
self.cursor = cursor
update_preview = True
elif isinstance(t, TerminalOutput.CursorReturn):
# move cursor to start of line
classification = view.classify(self.cursor)
if not classification & sublime.CLASS_LINE_START:
bol = view.find_by_class(
self.cursor,
forward=False,
classes=sublime.CLASS_LINE_START
)
self.cursor = bol
update_preview = True
elif isinstance(t, TerminalOutput.LineFeed):
row, col = view.rowcol(self.cursor)
end = view.size()
maxrow, _ = view.rowcol(end)
if row == maxrow:
self.append_text('\n')
self.cursor = view.size()
new_home_row = row - terminal_rows + 1
if new_home_row > self.home_row:
self.home_row = new_home_row
else:
row += 1
cursor = view.text_point(row, col)
if view.rowcol(cursor)[0] > row:
cursor = view.text_point(row + 1, 0) - 1
self.cursor = cursor
update_preview = True
elif isinstance(t, TerminalOutput.ClearToEndOfLine):
classification = view.classify(self.cursor)
if not classification & sublime.CLASS_LINE_END:
eol = view.find_by_class(
self.cursor,
forward=True,
classes=sublime.CLASS_LINE_END
)
self.erase(self.cursor, eol)
update_preview = True
elif isinstance(t, TerminalOutput.ClearToStartOfLine):
classification = view.classify(self.cursor)
if not classification & sublime.CLASS_LINE_START:
bol = view.find_by_class(
self.cursor,
forward=False,
classes=sublime.CLASS_LINE_START
)
self.erase(bol, self.cursor)
update_preview = True
elif isinstance(t, TerminalOutput.ClearLine):
classification = view.classify(self.cursor)
if classification & sublime.CLASS_LINE_START:
bol = self.cursor
else:
bol = view.find_by_class(
self.cursor,
forward=False,
classes=sublime.CLASS_LINE_START
)
if classification & sublime.CLASS_LINE_END:
eol = self.cursor
else:
eol = view.find_by_class(
self.cursor,
forward=True,
classes=sublime.CLASS_LINE_END
)
self.erase(bol, eol)
update_preview = True
elif isinstance(t, TerminalOutput.Insert):
# keep cursor at start
cursor = self.cursor
self.insert_text('\ufffd' * t.n)
self.cursor = cursor
update_preview = True
elif isinstance(t, TerminalOutput.Delete):
self.delete(self.cursor, self.cursor + t.n)
update_preview = True
elif isinstance(t, TerminalOutput.SelectGraphicRendition):
scope = 'sgr.{}-on-{}'.format(t.foreground, t.background)
if scope == 'sgr.default-on-default':
scope = ''
self.scope = scope
else:
warn('unexpected token: {}'.format(t))
count += 1
if count > 100:
# give other events a chance to run
sublime.set_timeout(self.handle_output, 0)
break
else:
self.terminal_closed()
if update_preview:
self.push()
if all(region.empty() for region in view.sel()):
view.run_command('gidterm_cursor', {'position': self.cursor})
def terminal_closed(self):
# type: () -> None
assert self.terminal is not None
self.terminal.stop()
self.terminal = None
self.display_status('DISCONNECTED')
view = self.view
self.home_row, col = view.rowcol(view.size())
assert col == 0, col
self.push()
self.view = self.reset_view(self.display_panel, self.panel_name, self.pwd)
def update_elapsed(self):
# type: () -> None
if self.out_start_time is None:
self.update_running = False
else:
now = datetime.now(timezone.utc)
elapsed = (now - self.out_start_time).total_seconds()
self.set_title(str(timedelta_seconds(elapsed)))
sublime.set_timeout(self.update_elapsed, 1000)
def display_status(self, status):
# type: (str) -> None
# finished displaying output of command
view = self.view
output_end = view.size()
col = view.rowcol(output_end)[1]
if self.cursor == output_end:
if col == 0:
# cursor at end, with final newline
ret_scope = 'sgr.green-on-default'
else:
# cursor at end, but no final newline
ret_scope = 'sgr.yellow-on-default'
else:
# cursor not at end
ret_scope = 'sgr.red-on-default'
if col != 0:
self.append_text('\n')
if status == '0':
self.scope = 'sgr.green-on-default'
else:
self.scope = 'sgr.red-on-default'
self.append_text(status)
info = _exit_status_info.get(status)
if info:
self.scope = 'sgr.yellow-on-default'
self.append_text(info)
self.scope = ret_scope
self.append_text('\u23ce')
self.scope = ''
if self.out_start_time is None:
self.append_text('\n')
else:
elapsed = timedelta_seconds((datetime.now(timezone.utc) - self.out_start_time).total_seconds())
self.append_text(' {}\n'.format(elapsed))
self.out_start_time = None
def _insert(self, view, start, text):
# type: (sublime.View, int, str) -> int
view.set_read_only(False)
try:
view.run_command('gidterm_insert_text', {'point': start, 'characters': text})
finally:
view.set_read_only(True)
return start + len(text)
def insert_text(self, text):
# type: (str) -> None
self._write(text, self._insert)
def _overwrite(self, view, start, text):
# type: (sublime.View, int, str) -> int
# Overwrite text to end of line, then insert additional text
end = start + len(text)
classification = view.classify(start)
if classification & sublime.CLASS_LINE_END:
replace_end = start
else:
replace_end = view.find_by_class(start, forward=True, classes=sublime.CLASS_LINE_END)
if end < replace_end:
replace_end = end
view.set_read_only(False)
try:
view.run_command('gidterm_replace_text', {'begin': start, 'end': replace_end, 'characters': text})
finally:
view.set_read_only(True)
return end
def overwrite(self, text):
# type: (str) -> None
self._write(text, self._overwrite)
def append_text(self, text):
# type: (str) -> None
view = self.view
start = view.size()
end = start + len(text)
view.run_command('append', {'characters': text, 'force': True, 'scroll_to_end': True})
if end != view.size():
warn('cursor not at end after writing {!r} {} {}'.format(text, end, view.size()))
end = view.size()
if self.scope:
regions = view.get_regions(self.scope)
if regions and regions[-1].end() == start:
prev = regions.pop()
region = sublime.Region(prev.begin(), end)
else:
region = sublime.Region(start, end)
regions.append(region)
view.add_regions(self.scope, regions, self.scope, flags=sublime.DRAW_NO_OUTLINE | sublime.PERSISTENT)
self.cursor = end
def _write(self, text, add_text):
# (str, Callable[[sublime.View, int, str], None]) -> None
view = self.view
start = self.cursor
if start == view.size():
self.append_text(text)
else:
end = add_text(view, start, text)
if self.scope:
regions = view.get_regions(self.scope)
if regions and regions[-1].end() == start:
prev = regions.pop()
region = sublime.Region(prev.begin(), end)
else:
region = sublime.Region(start, end)
regions.append(region)
view.add_regions(self.scope, regions, self.scope, flags=sublime.DRAW_NO_OUTLINE | sublime.PERSISTENT)
self.cursor = end
def erase(self, begin, end):
# type: (int, int) -> None
# Erase the region without shifting characters after the region. This
# may require replacing the erased characters with placeholders.
view = self.view
classification = view.classify(begin)
if classification & sublime.CLASS_LINE_END:
eol = begin
else:
eol = view.find_by_class(begin, forward=True, classes=sublime.CLASS_LINE_END)
if eol <= end:
self.delete(begin, eol)
else:
length = end - begin
if length > 0:
view.set_read_only(False)
try:
view.run_command(
'gidterm_replace_text', {'begin': begin, 'end': end, 'characters': '\ufffd' * length}
)
finally:
view.set_read_only(True)
def delete(self, begin, end):
# type: (int, int) -> None
# Delete the region, shifting any later characters into the space.
if begin < end:
view = self.view
assert begin >= view.text_point(self.home_row, 0)
view.set_read_only(False)
try:
view.run_command('gidterm_erase_text', {'begin': begin, 'end': end})
finally:
view.set_read_only(True)
if self.cursor > end:
self.cursor -= (end - begin)
elif self.cursor > begin:
self.cursor = begin
def follow(self):
# type: () -> None
# move prompt panel cursor to current position
self.view.run_command('gidterm_cursor', {'position': self.cursor})
# move display panel cursor to end, causing it to follow output
self.display_panel.follow()
def create_init_file(contents):
# type: (str) -> str
cachedir = os.path.expanduser('~/.cache/sublime-gidterm/profile')
os.makedirs(cachedir, exist_ok=True)
fd, name = tempfile.mkstemp(dir=cachedir)
try:
contents += 'declare -- GIDTERM_CACHE="%s"\n' % name
os.write(fd, contents.encode('utf-8'))
finally:
os.close(fd)
return name
class GidtermCommand(sublime_plugin.TextCommand):
def run(self, edit, pwd=None):
# type: (...) -> None
init_script = None
view = self.view
settings = view.settings()
if settings.get('is_gidterm'):
# If the current view is a GidTerm, use the same
# pwd, configuration, and environment
if pwd is None:
pwd = settings.get('current_working_directory')
init_file = settings.get('gidterm_init_file')
with open(init_file, encoding='utf-8') as f:
init_script = f.read()
if pwd is None:
# If the current view has a filename, use the same
# pwd. Use the initial configuration and environment.
filename = view.file_name()
if filename is not None:
pwd = os.path.dirname(filename)
if init_script is None:
init_script = _initial_profile
window = view.window()
winvar = window.extract_variables()
if pwd is None:
pwd = winvar.get('folder', os.environ.get('HOME', '/'))
package = _get_package_location(winvar)
color_scheme = os.path.join(package, 'gidterm.sublime-color-scheme')
display_view = window.new_file()
display_view.set_read_only(True)
display_view.set_scratch(True)
display_view.set_line_endings('Unix')
settings = display_view.settings()
settings.set('color_scheme', color_scheme)
# prevent ST doing work that doesn't help here
settings.set('mini_diff', False)
settings.set('spell_check', False)
settings.set('is_gidterm', True)
settings.set('is_gidterm_display', True)
settings.set('current_working_directory', pwd)
settings.set('gidterm_init_script', init_script)
settings.set('gidterm_init_file', create_init_file(init_script))
display_panel = DisplayPanel(display_view)
cache_panel(display_view, display_panel)
window.focus_view(display_view)
class GidtermInsertTextCommand(sublime_plugin.TextCommand):
def run(self, edit, point, characters):
# type: (...) -> None
self.view.insert(edit, point, characters)
class GidtermReplaceTextCommand(sublime_plugin.TextCommand):
def run(self, edit, begin, end, characters):
# type: (...) -> None
region = sublime.Region(begin, end)
self.view.replace(edit, region, characters)
class GidtermEraseTextCommand(sublime_plugin.TextCommand):
def run(self, edit, begin, end):
# type: (...) -> None
region = sublime.Region(begin, end)
self.view.erase(edit, region)
class GidtermCursorCommand(sublime_plugin.TextCommand):
# https://github.com/sublimehq/sublime_text/issues/485#issuecomment-337480388
def run(self, edit, position):
# type: (...) -> None
sel = self.view.sel()
sel.clear()
sel.add(position)
self.view.show(position)
class GidtermFollowCommand(sublime_plugin.TextCommand):
def run(self, edit):
# type: (...) -> None
panel = get_panel(self.view)
if panel:
panel.follow()
class GidtermFocusDisplay(sublime_plugin.TextCommand):
def run(self, edit):
# type: (...) -> None
panel = get_panel(self.view)
if panel:
panel.focus_display()
class GidtermFocusLive(sublime_plugin.TextCommand):
def run(self, edit):
# type: (...) -> None
get_display_panel(self.view).focus_live()
class GidtermSendCommand(sublime_plugin.TextCommand):
def run(self, edit, characters):
# type: (...) -> None
panel = get_panel(self.view)
if panel:
panel.handle_input(characters)
_terminal_capability_map = {
'cr': '\r', # CR
'esc': '\x1b\x1b', # escape
'ht': '\t', # tab
'kbs': '\b', # backspace
'kcbt': '\x1b[Z', # shift-tab
'kcuu1': '\x1b[A', # cursor-up
'kcud1': '\x1b[B', # cursor-down
'kcuf1': '\x1b[C', # cursor-right
'kcub1': '\x1b[D', # cursor-left
'kDC': '\x1b[P', # shift-delete
'kdch1': '\x1b[3~', # delete
'kEND': '', # shift-end
'kich1': '\x1b[L', # insert
'kHOM': '', # shift-home
'khome': '\x1b[H', # home
'kLFT': '', # shift-cursor-left
'knp': '', # next-page
'kpp': '', # previous-page
'kRIT': '', # shift-cursor-right
'nel': '\r\x1b[S', # newline
'kDC2': '\x1b[P', # shift-delete
'kDN2': '', # shift-cursor-down
'kEND2': '', # shift-End
'kHOM2': '', # shift-Home
'kLFT2': '', # shift-cursor-left
'kNXT2': '', # shift-Page-Down
'kPRV2': '', # shift-Page-Up
'kRIT2': '', # shift-cursor-right
'kUP2': '', # shift-cursor-up
}
class GidtermCapabilityCommand(sublime_plugin.TextCommand):
def run(self, edit, cap):
# type: (...) -> None
characters = _terminal_capability_map.get(cap)
if characters is None:
warn('unexpected terminal capability: {}'.format(cap))
return
panel = get_panel(self.view)
if panel:
panel.handle_input(characters)
class GidtermInsertCommand(sublime_plugin.TextCommand):
def run(self, edit, strip):
# type: (...) -> None
panel = get_panel(self.view)
if panel is not None:
buf = sublime.get_clipboard()
if strip:
buf = buf.strip()
panel.handle_input(buf)
class GidtermSelectCommand(sublime_plugin.TextCommand):
def run(self, edit, forward):
# type: (...) -> None
view = self.view
panel = get_panel(view)
if panel:
display_panel = panel.get_display_panel()
display_panel.focus_display()
if forward:
display_panel.next_command()
else:
display_panel.prev_command()
class GidtermDisplayListener(sublime_plugin.ViewEventListener):
@classmethod
def is_applicable(cls, settings):
# type: (...) -> bool
return settings.get('is_gidterm_display', False)
@classmethod
def applies_to_primary_view_only(cls):
# type: (...) -> bool
return False
def on_pre_close(self):
# type: () -> None
view = self.view
get_display_panel(view).close()
uncache_panel(view)
# Panels do not trigger `ViewEventListener` so use `EventListener`
class GidtermLiveListener(sublime_plugin.EventListener):
def on_activated(self, view):
# type: (sublime.View) -> None
if view.settings().get('is_gidterm_live', False):
panel = get_panel(view)
if panel is not None:
assert isinstance(panel, LivePanel)
panel.set_active(True)
def on_deactivated(self, view):
# type: (sublime.View) -> None
if view.settings().get('is_gidterm_live', False):
panel = get_panel(view)
if panel is not None:
assert isinstance(panel, LivePanel)
panel.set_active(False)
``` |
{
"source": "jongillies/quickstart-heptio",
"score": 2
} |
#### File: quickstart-heptio/bin/update-bastion-amis.py
```python
import urllib.request as request
import cfn_tools
import yaml
# update-bastion-amis.py is a helper script to get the newer amis from the quickstart-linux-bastion upstream.
AMI_TYPE='US1604HVM'
QUICKSTART_LINUX_BASTION_CF_TEMPLATE = 'https://raw.githubusercontent.com/aws-quickstart/quickstart-linux-bastion/master/templates/linux-bastion.template'
def main():
print(yaml.dump(get_actual_amis(), Dumper=cfn_tools.CfnYamlDumper, default_flow_style = False))
def get_actual_amis():
resp = request.urlopen(QUICKSTART_LINUX_BASTION_CF_TEMPLATE)
cf = yaml.load(resp.read())
mappings = cf['Mappings']['AWSAMIRegionMap']
clean_mappings = {}
for key, value in mappings.items():
if key == 'AMI':
continue
for ami_type, ami_number in value.items():
if ami_type == AMI_TYPE:
# this is our formatting
clean_mappings[key] = {
'64': ami_number
}
return {'Mappings': {'RegionMap': clean_mappings}}
if __name__ == "__main__":
main()
``` |
{
"source": "jongio/azure-iot-cli-extension",
"score": 2
} |
#### File: central/models/__init__.py
```python
from knack.util import CLIError
from abc import abstractmethod, ABCMeta
class BaseTemplate(metaclass=ABCMeta):
def __init__(self, template: dict):
self.id = None
self.schema_names = None
self.raw_template = template
try:
self.name = template.get("displayName")
self.components = self._extract_components(template)
if self.components:
self.component_schema_names = self._extract_schema_names(
self.components
)
except Exception:
raise CLIError("Could not parse iot central device template.")
def _get_schema_name(self, schema) -> str:
return "name" if "name" in schema else "@id"
def _extract_schemas(self, entity: dict) -> dict:
schema = entity.get("schema")
if schema is None:
return {
schema[self._get_schema_name(schema)]: schema
for schema in entity["contents"]
}
elif schema.get("contents"):
return {
content[self._get_schema_name(content)]: content
for content in schema["contents"]
}
else:
return entity
def _extract_schema_names(self, entity: dict) -> dict:
return {
entity_name: list(entity_schemas.keys())
for entity_name, entity_schemas in entity.items()
}
def _get_interface_list_property(self, property_name) -> list:
# returns the list of interfaces where property with property_name is defined
return [
interface
for interface, schema in self.schema_names.items()
if property_name in schema
]
def _extract_components(self, template: dict) -> dict:
try:
dcm = template.get("capabilityModel", {})
if dcm.get("contents"):
rootContents = dcm.get("contents", {})
components = [
entity
for entity in rootContents
if entity.get("@type") == "Component"
]
if components:
return {
component[
self._get_schema_name(component)
]: self._extract_schemas(component)
for component in components
}
return {}
return {}
except Exception:
details = (
"Unable to extract schema for component from template '{}'.".format(
self.id
)
)
raise CLIError(details)
@abstractmethod
def get_id_key(self):
"""Get the Id property name depending on the API Version."""
@abstractmethod
def get_type_key(self):
"""Get the Type property name depending on the API Version."""
```
#### File: central/services/file_upload.py
```python
import requests
from typing import Union
from knack.log import get_logger
from azext_iot.constants import CENTRAL_ENDPOINT
from azext_iot.central.services import _utility
from azext_iot.central.models.v1_1_preview import FileUploadV1_1_preview
from azure.cli.core.util import should_disable_connection_verify
logger = get_logger(__name__)
BASE_PATH = "api/fileUploads"
MODEL = "FileUpload"
def _make_call(
cmd,
app_id: str,
method: str,
token: str,
api_version: str,
central_dns_suffix=CENTRAL_ENDPOINT,
) -> Union[dict, FileUploadV1_1_preview]:
url = "https://{}.{}/{}".format(app_id, central_dns_suffix, BASE_PATH)
headers = _utility.get_headers(token, cmd)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = api_version
response = requests.request(
url=url,
method=method.upper(),
headers=headers,
params=query_parameters,
verify=not should_disable_connection_verify(),
)
return _utility.try_extract_result(response)
def get_fileupload(
cmd,
app_id: str,
token: str,
api_version: str,
central_dns_suffix=CENTRAL_ENDPOINT,
) -> FileUploadV1_1_preview:
"""
Get fileupload info
Args:
cmd: command passed into az
app_id: name of app (used for forming request URL)
token: (OPTIONAL) authorization token to fetch fileupload details from IoTC.
MUST INCLUDE type (e.g. 'SharedAccessToken ...', 'Bearer ...')
central_dns_suffix: {centralDnsSuffixInPath} as found in docs
Returns:
fileupload: dict
"""
result = _make_call(
cmd,
app_id,
"get",
token=token,
api_version=api_version,
central_dns_suffix=central_dns_suffix,
)
return _utility.get_object(result, MODEL, api_version)
def delete_fileupload(
cmd, app_id: str, token: str, api_version: str, central_dns_suffix=CENTRAL_ENDPOINT
) -> FileUploadV1_1_preview:
"""
Delete file upload storage configuration
Args:
cmd: command passed into az
app_id: name of app (used for forming request URL)
token: (OPTIONAL) authorization token to fetch fileupload details from IoTC.
MUST INCLUDE type (e.g. 'SharedAccessToken ...', 'Bearer ...')
central_dns_suffix: {centralDnsSuffixInPath} as found in docs
Returns:
fileupload: dict
"""
result = _make_call(
cmd,
app_id,
"delete",
token=token,
api_version=api_version,
central_dns_suffix=central_dns_suffix,
)
return result
def createorupdate_fileupload(
cmd,
app_id: str,
connection_string: str,
container: str,
account: str,
sasTtl: bool,
token: str,
api_version: str,
update=False,
central_dns_suffix=CENTRAL_ENDPOINT,
) -> FileUploadV1_1_preview:
"""
Create the file upload storage account configuration.
Args:
cmd: command passed into az
app_id: name of app (used for forming request URL)
connection_string: The connection string used to configure the storage account
container: The name of the container inside the storage account
account: (optional) The storage account name where to upload the file to
sasTtl: (optional) ISO 8601 duration standard,
The amount of time the device’s request to upload a file is valid before it expires.
token: (OPTIONAL) authorization token to fetch file upload details from IoTC.
MUST INCLUDE type (e.g. 'SharedAccessToken ...', 'Bearer ...')
central_dns_suffix: {centralDnsSuffixInPath} as found in docs
Returns:
fileupload: dict
"""
url = "https://{}.{}/{}".format(app_id, central_dns_suffix, BASE_PATH)
headers = _utility.get_headers(token, cmd, has_json_payload=True)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = api_version
payload = {}
if connection_string:
payload["connectionString"] = connection_string
if container:
payload["container"] = container
if account:
payload["account"] = account
if sasTtl:
payload["sasTtl"] = sasTtl
if update:
response = requests.patch(
url, headers=headers, json=payload, params=query_parameters
)
else:
response = requests.put(
url, headers=headers, json=payload, params=query_parameters
)
result = _utility.try_extract_result(response)
return _utility.get_object(result, MODEL, api_version)
```
#### File: service/operations/enrollment_group_operations.py
```python
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class EnrollmentGroupOperations(object):
"""EnrollmentGroupOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: The API version to use for the request. Supported versions include: 2021-10-01. Constant value: "2021-10-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2021-10-01"
self.config = config
def get(
self, id, custom_headers=None, raw=False, **operation_config):
"""Get a device enrollment group.
:param id: Enrollment group ID.
:type id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: EnrollmentGroup or ClientRawResponse if raw=true
:rtype: ~dps.models.EnrollmentGroup or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ProvisioningServiceErrorDetailsException<dps.models.ProvisioningServiceErrorDetailsException>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'id': self._serialize.url("id", id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ProvisioningServiceErrorDetailsException(self._deserialize, response)
deserialized = None
header_dict = {}
if response.status_code == 200:
deserialized = self._deserialize('EnrollmentGroup', response)
header_dict = {
'x-ms-error-code': 'str',
}
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
get.metadata = {'url': '/enrollmentGroups/{id}'}
def create_or_update(
self, id, enrollment_group, if_match=None, custom_headers=None, raw=False, **operation_config):
"""Create or update a device enrollment group.
:param id: Enrollment group ID.
:type id: str
:param enrollment_group: The device enrollment group.
:type enrollment_group: ~dps.models.EnrollmentGroup
:param if_match: The ETag of the enrollment record.
:type if_match: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: EnrollmentGroup or ClientRawResponse if raw=true
:rtype: ~dps.models.EnrollmentGroup or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ProvisioningServiceErrorDetailsException<dps.models.ProvisioningServiceErrorDetailsException>`
"""
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'id': self._serialize.url("id", id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(enrollment_group, 'EnrollmentGroup')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ProvisioningServiceErrorDetailsException(self._deserialize, response)
deserialized = None
header_dict = {}
if response.status_code == 200:
deserialized = self._deserialize('EnrollmentGroup', response)
header_dict = {
'x-ms-error-code': 'str',
}
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
create_or_update.metadata = {'url': '/enrollmentGroups/{id}'}
def delete(
self, id, if_match=None, custom_headers=None, raw=False, **operation_config):
"""Delete a device enrollment group.
:param id: Enrollment group ID.
:type id: str
:param if_match: The ETag of the enrollment group record.
:type if_match: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ProvisioningServiceErrorDetailsException<dps.models.ProvisioningServiceErrorDetailsException>`
"""
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'id': self._serialize.url("id", id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [204]:
raise models.ProvisioningServiceErrorDetailsException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'x-ms-error-code': 'str',
})
return client_raw_response
delete.metadata = {'url': '/enrollmentGroups/{id}'}
def query(
self, query, x_ms_max_item_count=None, x_ms_continuation=None, custom_headers=None, raw=False, **operation_config):
"""Query the device enrollment groups.
:param query:
:type query: str
:param x_ms_max_item_count: Page size
:type x_ms_max_item_count: int
:param x_ms_continuation: Continuation token
:type x_ms_continuation: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[~dps.models.EnrollmentGroup] or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ProvisioningServiceErrorDetailsException<dps.models.ProvisioningServiceErrorDetailsException>`
"""
query_specification = models.QuerySpecification(query=query)
# Construct URL
url = self.query.metadata['url']
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if x_ms_max_item_count is not None:
header_parameters['x-ms-max-item-count'] = self._serialize.header("x_ms_max_item_count", x_ms_max_item_count, 'int')
if x_ms_continuation is not None:
header_parameters['x-ms-continuation'] = self._serialize.header("x_ms_continuation", x_ms_continuation, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(query_specification, 'QuerySpecification')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ProvisioningServiceErrorDetailsException(self._deserialize, response)
deserialized = None
header_dict = {}
if response.status_code == 200:
deserialized = self._deserialize('[EnrollmentGroup]', response)
header_dict = {
'x-ms-continuation': 'str',
'x-ms-max-item-count': 'int',
'x-ms-item-type': 'str',
'x-ms-error-code': 'str',
}
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
query.metadata = {'url': '/enrollmentGroups/query'}
def get_attestation_mechanism(
self, id, custom_headers=None, raw=False, **operation_config):
"""Get the attestation mechanism in the device enrollment group record.
:param id: Enrollment group ID
:type id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: AttestationMechanism or ClientRawResponse if raw=true
:rtype: ~dps.models.AttestationMechanism or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ProvisioningServiceErrorDetailsException<dps.models.ProvisioningServiceErrorDetailsException>`
"""
# Construct URL
url = self.get_attestation_mechanism.metadata['url']
path_format_arguments = {
'id': self._serialize.url("id", id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ProvisioningServiceErrorDetailsException(self._deserialize, response)
deserialized = None
header_dict = {}
if response.status_code == 200:
deserialized = self._deserialize('AttestationMechanism', response)
header_dict = {
'x-ms-error-code': 'str',
}
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
get_attestation_mechanism.metadata = {'url': '/enrollmentGroups/{id}/attestationmechanism'}
def run_bulk_operation(
self, enrollment_groups, mode, custom_headers=None, raw=False, **operation_config):
"""Bulk device enrollment group operation with maximum of 10 groups.
:param enrollment_groups: Enrollment items
:type enrollment_groups: list[~dps.models.EnrollmentGroup]
:param mode: Operation mode. Possible values include: 'create',
'update', 'updateIfMatchETag', 'delete'
:type mode: str or ~dps.models.enum
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: BulkEnrollmentGroupOperationResult or ClientRawResponse if
raw=true
:rtype: ~dps.models.BulkEnrollmentGroupOperationResult or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ProvisioningServiceErrorDetailsException<dps.models.ProvisioningServiceErrorDetailsException>`
"""
bulk_operation = models.BulkEnrollmentGroupOperation(enrollment_groups=enrollment_groups, mode=mode)
# Construct URL
url = self.run_bulk_operation.metadata['url']
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(bulk_operation, 'BulkEnrollmentGroupOperation')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ProvisioningServiceErrorDetailsException(self._deserialize, response)
deserialized = None
header_dict = {}
if response.status_code == 200:
deserialized = self._deserialize('BulkEnrollmentGroupOperationResult', response)
header_dict = {
'x-ms-error-code': 'str',
}
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
run_bulk_operation.metadata = {'url': '/enrollmentGroups'}
```
#### File: tests/dps/test_iot_dps_int.py
```python
from azext_iot.common.shared import EntityStatusType, AttestationType, AllocationType, ReprovisionType
from azext_iot.common.utility import generate_key
from azext_iot.tests.dps import (
API_VERSION,
CERT_PATH,
DATAPLANE_AUTH_TYPES,
WEBHOOK_URL,
IoTDPSLiveScenarioTest
)
test_endorsement_key = (
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>
)
class TestDPSEnrollments(IoTDPSLiveScenarioTest):
def __init__(self, test_method):
super(TestDPSEnrollments, self).__init__(test_method)
def test_dps_compute_device_key(self):
offline_device_key = self.cmd(
'az iot dps compute-device-key --key "{}" '
"--registration-id myarbitrarydeviceId".format(test_endorsement_key)
).output
offline_device_key = offline_device_key.strip("\"'\n")
assert offline_device_key == "<KEY>
def test_dps_enrollment_tpm_lifecycle(self):
attestation_type = AttestationType.tpm.value
for auth_phase in DATAPLANE_AUTH_TYPES:
enrollment_id = self.generate_enrollment_names()[0]
device_id = self.generate_device_names()[0]
enrollment = self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment create --enrollment-id {} --attestation-type {}"
" -g {} --dps-name {} --endorsement-key {}"
" --provisioning-status {} --device-id {} --initial-twin-tags {}"
" --initial-twin-properties {} --device-information {} "
"--allocation-policy {} --iot-hubs {}".format(
enrollment_id,
attestation_type,
self.entity_rg,
self.entity_dps_name,
test_endorsement_key,
EntityStatusType.enabled.value,
device_id,
'"{generic_dict}"',
'"{generic_dict}"',
'"{generic_dict}"',
AllocationType.static.value,
self.hub_host_name,
),
auth_type=auth_phase
),
checks=[
self.check("attestation.type", attestation_type),
self.check("registrationId", enrollment_id),
self.check("provisioningStatus", EntityStatusType.enabled.value),
self.check("deviceId", device_id),
self.check("allocationPolicy", AllocationType.static.value),
self.check("iotHubs", self.hub_host_name.split()),
self.check("initialTwin.tags", self.kwargs["generic_dict"]),
self.check("optionalDeviceInformation", self.kwargs["generic_dict"]),
self.check(
"initialTwin.properties.desired", self.kwargs["generic_dict"]
),
self.exists("reprovisionPolicy"),
self.check("reprovisionPolicy.migrateDeviceData", True),
self.check("reprovisionPolicy.updateHubAssignment", True),
],
).get_output_in_json()
etag = enrollment["etag"]
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment list -g {} --dps-name {}".format(
self.entity_rg, self.entity_dps_name
),
auth_type=auth_phase
),
checks=[
self.check("length(@)", 1),
self.check("[0].registrationId", enrollment_id),
],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment show -g {} --dps-name {} --enrollment-id {}".format(
self.entity_rg, self.entity_dps_name, enrollment_id
),
auth_type=auth_phase
),
checks=[self.check("registrationId", enrollment_id)],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment show -g {} --dps-name {} --enrollment-id {} --show-keys".format(
self.entity_rg, self.entity_dps_name, enrollment_id
),
auth_type=auth_phase
),
checks=[
self.check("registrationId", enrollment_id),
self.check("attestation.type", attestation_type),
self.exists("attestation.{}".format(attestation_type)),
],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment update -g {} --dps-name {} --enrollment-id {}"
" --provisioning-status {} --etag {} --info {}".format(
self.entity_rg,
self.entity_dps_name,
enrollment_id,
EntityStatusType.disabled.value,
etag,
'""'
),
auth_type=auth_phase
),
checks=[
self.check("attestation.type", attestation_type),
self.check("registrationId", enrollment_id),
self.check("provisioningStatus", EntityStatusType.disabled.value),
self.check("deviceId", device_id),
self.check("allocationPolicy", AllocationType.static.value),
self.check("iotHubs", self.hub_host_name.split()),
self.exists("initialTwin.tags"),
self.exists("initialTwin.properties.desired"),
self.exists("optionalDeviceInformation"),
],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment delete -g {} --dps-name {} --enrollment-id {}".format(
self.entity_rg, self.entity_dps_name, enrollment_id
),
auth_type=auth_phase
),
)
def test_dps_enrollment_x509_lifecycle(self):
attestation_type = AttestationType.x509.value
for auth_phase in DATAPLANE_AUTH_TYPES:
enrollment_id = self.generate_enrollment_names()[0]
device_id = self.generate_device_names()[0]
etag = self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment create --enrollment-id {} --attestation-type {}"
" -g {} --dps-name {} --cp {} --scp {}"
" --provisioning-status {} --device-id {}"
" --initial-twin-tags {} --initial-twin-properties {}"
" --allocation-policy {} --iot-hubs {}".format(
enrollment_id,
attestation_type,
self.entity_rg,
self.entity_dps_name,
CERT_PATH,
CERT_PATH,
EntityStatusType.enabled.value,
device_id,
'"{generic_dict}"',
'"{generic_dict}"',
AllocationType.hashed.value,
self.hub_host_name,
),
auth_type=auth_phase
),
checks=[
self.check("attestation.type", attestation_type),
self.check("registrationId", enrollment_id),
self.check("provisioningStatus", EntityStatusType.enabled.value),
self.check("deviceId", device_id),
self.check("allocationPolicy", AllocationType.hashed.value),
self.check("iotHubs", self.hub_host_name.split()),
self.check("initialTwin.tags", self.kwargs["generic_dict"]),
self.check(
"initialTwin.properties.desired", self.kwargs["generic_dict"]
),
self.exists("reprovisionPolicy"),
self.check("reprovisionPolicy.migrateDeviceData", True),
self.check("reprovisionPolicy.updateHubAssignment", True),
],
).get_output_in_json()["etag"]
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment list -g {} --dps-name {}".format(self.entity_rg, self.entity_dps_name),
auth_type=auth_phase
),
checks=[
self.check("length(@)", 1),
self.check("[0].registrationId", enrollment_id),
],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment show -g {} --dps-name {} --enrollment-id {}".format(
self.entity_rg, self.entity_dps_name, enrollment_id
),
auth_type=auth_phase
),
checks=[self.check("registrationId", enrollment_id)],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment update -g {} --dps-name {} --enrollment-id {}"
" --provisioning-status {} --etag {} --info {} --rc".format(
self.entity_rg,
self.entity_dps_name,
enrollment_id,
EntityStatusType.disabled.value,
etag,
'"{generic_dict}"',
),
auth_type=auth_phase
),
checks=[
self.check("attestation.type", attestation_type),
self.check("registrationId", enrollment_id),
self.check("provisioningStatus", EntityStatusType.disabled.value),
self.check("deviceId", device_id),
self.check("allocationPolicy", AllocationType.hashed.value),
self.check("iotHubs", self.hub_host_name.split()),
self.exists("initialTwin.tags"),
self.exists("initialTwin.properties.desired"),
self.check("optionalDeviceInformation", self.kwargs["generic_dict"]),
self.check("attestation.type.x509.clientCertificates.primary", None),
],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment delete -g {} --dps-name {} --enrollment-id {}".format(
self.entity_rg, self.entity_dps_name, enrollment_id
),
auth_type=auth_phase
),
)
def test_dps_enrollment_symmetrickey_lifecycle(self):
attestation_type = AttestationType.symmetricKey.value
for auth_phase in DATAPLANE_AUTH_TYPES:
enrollment_id, enrollment_id2 = self.generate_enrollment_names(count=2)
primary_key = generate_key()
secondary_key = generate_key()
device_id = self.generate_enrollment_names()[0]
# Use provided keys
etag = self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment create --enrollment-id {} --attestation-type {}"
" -g {} --dps-name {} --pk {} --sk {}"
" --provisioning-status {} --device-id {}"
" --initial-twin-tags {} --initial-twin-properties {} --device-information {}"
" --allocation-policy {} --rp {} --iot-hubs {} --edge-enabled".format(
enrollment_id,
attestation_type,
self.entity_rg,
self.entity_dps_name,
primary_key,
secondary_key,
EntityStatusType.enabled.value,
device_id,
'"{generic_dict}"',
'"{generic_dict}"',
'"{generic_dict}"',
AllocationType.geolatency.value.lower(),
ReprovisionType.reprovisionandresetdata.value,
self.hub_host_name,
),
auth_type=auth_phase
),
checks=[
self.check("attestation.type", attestation_type),
self.check("registrationId", enrollment_id),
self.check("provisioningStatus", EntityStatusType.enabled.value),
self.check("deviceId", device_id),
self.check("allocationPolicy", AllocationType.geolatency.value),
self.check("iotHubs", self.hub_host_name.split()),
self.check("initialTwin.tags", self.kwargs["generic_dict"]),
self.check("optionalDeviceInformation", self.kwargs["generic_dict"]),
self.check(
"initialTwin.properties.desired", self.kwargs["generic_dict"]
),
self.exists("reprovisionPolicy"),
self.check("reprovisionPolicy.migrateDeviceData", False),
self.check("reprovisionPolicy.updateHubAssignment", True),
self.check("capabilities.iotEdge", True),
],
).get_output_in_json()["etag"]
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment list -g {} --dps-name {}".format(self.entity_rg, self.entity_dps_name),
auth_type=auth_phase
),
checks=[
self.check("length(@)", 1),
self.check("[0].registrationId", enrollment_id),
],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment show -g {} --dps-name {} --enrollment-id {}".format(
self.entity_rg, self.entity_dps_name, enrollment_id
),
auth_type=auth_phase
),
checks=[self.check("registrationId", enrollment_id)],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment update -g {} --dps-name {} --enrollment-id {}"
" --provisioning-status {} --etag {} --edge-enabled False"
" --allocation-policy {} --webhook-url {} --api-version {}".format(
self.entity_rg,
self.entity_dps_name,
enrollment_id,
EntityStatusType.disabled.value,
etag,
AllocationType.custom.value,
WEBHOOK_URL,
API_VERSION,
),
auth_type=auth_phase
),
checks=[
self.check("attestation.type", attestation_type),
self.check("registrationId", enrollment_id),
self.check("provisioningStatus", EntityStatusType.disabled.value),
self.check("deviceId", device_id),
self.check("allocationPolicy", "custom"),
self.check("customAllocationDefinition.webhookUrl", WEBHOOK_URL),
self.check("customAllocationDefinition.apiVersion", API_VERSION),
self.check("iotHubs", None),
self.exists("initialTwin.tags"),
self.exists("initialTwin.properties.desired"),
self.check("attestation.symmetricKey.primaryKey", primary_key),
self.check("capabilities.iotEdge", False),
],
)
# Use service generated keys
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment create --enrollment-id {} --attestation-type {}"
" -g {} --dps-name {} --allocation-policy {} --webhook-url {} --api-version {}".format(
enrollment_id2,
attestation_type,
self.entity_rg,
self.entity_dps_name,
AllocationType.custom.value,
WEBHOOK_URL,
API_VERSION,
),
auth_type=auth_phase
),
checks=[
self.check("attestation.type", attestation_type),
self.check("registrationId", enrollment_id2),
self.check("allocationPolicy", "custom"),
self.check("customAllocationDefinition.webhookUrl", WEBHOOK_URL),
self.check("customAllocationDefinition.apiVersion", API_VERSION),
],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment delete -g {} --dps-name {} --enrollment-id {}".format(
self.entity_rg, self.entity_dps_name, enrollment_id
),
auth_type=auth_phase
),
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment delete -g {} --dps-name {} --enrollment-id {}".format(
self.entity_rg, self.entity_dps_name, enrollment_id2
),
auth_type=auth_phase
),
)
def test_dps_enrollment_group_x509_lifecycle(self):
for auth_phase in DATAPLANE_AUTH_TYPES:
enrollment_id = self.generate_enrollment_names(group=True)[0]
etag = self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment-group create --enrollment-id {} -g {} --dps-name {}"
" --cp {} --scp {} --provisioning-status {} --allocation-policy {}"
" --iot-hubs {} --edge-enabled".format(
enrollment_id,
self.entity_rg,
self.entity_dps_name,
CERT_PATH,
CERT_PATH,
EntityStatusType.enabled.value,
AllocationType.geolatency.value,
self.hub_host_name,
),
auth_type=auth_phase
),
checks=[
self.check("enrollmentGroupId", enrollment_id),
self.check("provisioningStatus", EntityStatusType.enabled.value),
self.exists("reprovisionPolicy"),
self.check("allocationPolicy", AllocationType.geolatency.value),
self.check("iotHubs", self.hub_host_name.split()),
self.check("reprovisionPolicy.migrateDeviceData", True),
self.check("reprovisionPolicy.updateHubAssignment", True),
self.check("capabilities.iotEdge", True),
],
).get_output_in_json()["etag"]
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment-group list -g {} --dps-name {}".format(self.entity_rg, self.entity_dps_name),
auth_type=auth_phase
),
checks=[
self.check("length(@)", 1),
self.check("[0].enrollmentGroupId", enrollment_id),
],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment-group show -g {} --dps-name {} --enrollment-id {}".format(
self.entity_rg, self.entity_dps_name, enrollment_id
),
auth_type=auth_phase
),
checks=[self.check("enrollmentGroupId", enrollment_id)],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment-group show -g {} --dps-name {} --enrollment-id {} --show-keys".format(
self.entity_rg, self.entity_dps_name, enrollment_id
),
auth_type=auth_phase
),
checks=[
self.check("enrollmentGroupId", enrollment_id),
self.exists("attestation.x509"),
],
)
# Compute Device Key only works for symmetric key enrollment groups
self.cmd(
self.set_cmd_auth_type(
'az iot dps compute-device-key -g {} --dps-name {} --enrollment-id {} '
"--registration-id myarbitrarydeviceId".format(
self.entity_rg, self.entity_dps_name, enrollment_id
),
auth_type=auth_phase
),
expect_failure=True
)
etag = self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment-group update -g {} --dps-name {} --enrollment-id {}"
" --provisioning-status {} --rsc --etag {} --rp {} --allocation-policy {}"
" --edge-enabled False --scp {}".format(
self.entity_rg,
self.entity_dps_name,
enrollment_id,
EntityStatusType.disabled.value,
etag,
ReprovisionType.never.value,
AllocationType.hashed.value,
CERT_PATH,
),
auth_type=auth_phase
),
checks=[
self.check("attestation.type", AttestationType.x509.value),
self.check("enrollmentGroupId", enrollment_id),
self.check("provisioningStatus", EntityStatusType.disabled.value),
self.check("attestation.type.x509.clientCertificates.secondary", None),
self.exists("reprovisionPolicy"),
self.check("allocationPolicy", AllocationType.hashed.value),
self.check("reprovisionPolicy.migrateDeviceData", False),
self.check("reprovisionPolicy.updateHubAssignment", False),
self.check("capabilities.iotEdge", False),
],
).get_output_in_json()["etag"]
self.cmd(
self.set_cmd_auth_type(
"iot dps registration list -g {} --dps-name {} --enrollment-id {}".format(
self.entity_rg, self.entity_dps_name, enrollment_id
),
auth_type=auth_phase
),
checks=[self.check("length(@)", 0)],
)
cert_name = self.create_random_name("certificate-for-test", length=48)
cert_etag = self.cmd(
"iot dps certificate create -g {} --dps-name {} --name {} --p {}".format(
self.entity_rg, self.entity_dps_name, cert_name, CERT_PATH
),
checks=[self.check("name", cert_name)],
).get_output_in_json()["etag"]
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment-group update -g {} --dps-name {} --enrollment-id {}"
" --cn {} --etag {} --allocation-policy {} --webhook-url {} --api-version {}".format(
self.entity_rg,
self.entity_dps_name,
enrollment_id,
cert_name,
etag,
AllocationType.custom.value,
WEBHOOK_URL,
API_VERSION,
),
auth_type=auth_phase
),
checks=[
self.check("attestation.type", AttestationType.x509.value),
self.check("enrollmentGroupId", enrollment_id),
self.check("allocationPolicy", "custom"),
self.check("customAllocationDefinition.webhookUrl", WEBHOOK_URL),
self.check("customAllocationDefinition.apiVersion", API_VERSION),
self.check("attestation.x509.caReferences.primary", cert_name),
self.check("attestation.x509.caReferences.secondary", None),
],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment-group delete -g {} --dps-name {} --enrollment-id {}".format(
self.entity_rg, self.entity_dps_name, enrollment_id
),
auth_type=auth_phase
),
)
self.cmd(
"iot dps certificate delete -g {} --dps-name {} --name {} --etag {}".format(
self.entity_rg, self.entity_dps_name, cert_name, cert_etag
),
)
def test_dps_enrollment_group_symmetrickey_lifecycle(self):
attestation_type = AttestationType.symmetricKey.value
for auth_phase in DATAPLANE_AUTH_TYPES:
enrollment_id, enrollment_id2 = self.generate_enrollment_names(count=2, group=True)
primary_key = generate_key()
secondary_key = generate_key()
etag = self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment-group create --enrollment-id {}"
" -g {} --dps-name {} --pk {} --sk {} --provisioning-status {}"
" --initial-twin-tags {} --initial-twin-properties {}"
" --allocation-policy {} --rp {} --iot-hubs {} --edge-enabled".format(
enrollment_id,
self.entity_rg,
self.entity_dps_name,
primary_key,
secondary_key,
EntityStatusType.enabled.value,
'"{generic_dict}"',
'"{generic_dict}"',
AllocationType.geolatency.value,
ReprovisionType.reprovisionandresetdata.value,
self.hub_host_name,
),
auth_type=auth_phase
),
checks=[
self.check("enrollmentGroupId", enrollment_id),
self.check("provisioningStatus", EntityStatusType.enabled.value),
self.check("allocationPolicy", AllocationType.geolatency.value),
self.check("iotHubs", self.hub_host_name.split()),
self.check("initialTwin.tags", self.kwargs["generic_dict"]),
self.check(
"initialTwin.properties.desired", self.kwargs["generic_dict"]
),
self.exists("reprovisionPolicy"),
self.check("reprovisionPolicy.migrateDeviceData", False),
self.check("reprovisionPolicy.updateHubAssignment", True),
self.check("capabilities.iotEdge", True),
],
).get_output_in_json()["etag"]
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment-group list -g {} --dps-name {}".format(self.entity_rg, self.entity_dps_name),
auth_type=auth_phase
),
checks=[
self.check("length(@)", 1),
self.check("[0].enrollmentGroupId", enrollment_id),
],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment-group show -g {} --dps-name {} --enrollment-id {}".format(
self.entity_rg, self.entity_dps_name, enrollment_id
),
auth_type=auth_phase
),
checks=[self.check("enrollmentGroupId", enrollment_id)],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment-group update -g {} --dps-name {} --enrollment-id {}"
" --provisioning-status {} --etag {} --edge-enabled False"
" --allocation-policy {} --webhook-url {} --api-version {}".format(
self.entity_rg,
self.entity_dps_name,
enrollment_id,
EntityStatusType.disabled.value,
etag,
AllocationType.custom.value,
WEBHOOK_URL,
API_VERSION,
),
auth_type=auth_phase
),
checks=[
self.check("enrollmentGroupId", enrollment_id),
self.check("provisioningStatus", EntityStatusType.disabled.value),
self.check("allocationPolicy", "custom"),
self.check("customAllocationDefinition.webhookUrl", WEBHOOK_URL),
self.check("customAllocationDefinition.apiVersion", API_VERSION),
self.check("iotHubs", None),
self.exists("initialTwin.tags"),
self.exists("initialTwin.properties.desired"),
self.check("attestation.symmetricKey.primaryKey", primary_key),
self.check("capabilities.iotEdge", False),
],
)
# Use service generated keys
etag = self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment-group create -g {} --dps-name {} --enrollment-id {}".format(
self.entity_rg, self.entity_dps_name, enrollment_id2
),
auth_type=auth_phase
),
checks=[
self.check("enrollmentGroupId", enrollment_id2),
self.check("attestation.type", attestation_type),
],
).get_output_in_json()["etag"]
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment-group list -g {} --dps-name {}".format(self.entity_rg, self.entity_dps_name),
auth_type=auth_phase
),
checks=[
self.check("length(@)", 2)
],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment-group show -g {} --dps-name {} --enrollment-id {}".format(
self.entity_rg, self.entity_dps_name, enrollment_id2
),
auth_type=auth_phase
),
checks=[self.check("enrollmentGroupId", enrollment_id2)],
)
keys = self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment-group show -g {} --dps-name {} --enrollment-id {} --show-keys".format(
self.entity_rg, self.entity_dps_name, enrollment_id2
),
auth_type=auth_phase
),
checks=[
self.check("enrollmentGroupId", enrollment_id2),
self.exists("attestation.symmetricKey"),
],
).get_output_in_json()["attestation"]["symmetricKey"]
# Compute Device Key tests
online_device_key = self.cmd(
self.set_cmd_auth_type(
'az iot dps compute-device-key -g {} --dps-name {} --enrollment-id {} '
"--registration-id myarbitrarydeviceId".format(
self.entity_rg, self.entity_dps_name, enrollment_id2
),
auth_type=auth_phase
),
).output
offline_device_key = self.cmd(
'az iot dps compute-device-key --key "{}" '
"--registration-id myarbitrarydeviceId".format(keys["primaryKey"])
).output
assert offline_device_key == online_device_key
# Compute Device Key uses primary key
offline_device_key = self.cmd(
'az iot dps compute-device-key --key "{}" '
"--registration-id myarbitrarydeviceId".format(keys["secondaryKey"])
).output
assert offline_device_key != online_device_key
etag = self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment-group update -g {} --dps-name {} --enrollment-id {}"
" --pk {} --sk {} --etag {}".format(
self.entity_rg,
self.entity_dps_name,
enrollment_id2,
keys["secondaryKey"],
keys["primaryKey"],
etag
),
auth_type=auth_phase
),
checks=[
self.check("enrollmentGroupId", enrollment_id2),
self.check("attestation.type", attestation_type),
],
).get_output_in_json()["etag"]
online_device_key = self.cmd(
self.set_cmd_auth_type(
'az iot dps compute-device-key -g {} --dps-name {} --enrollment-id {} '
"--registration-id myarbitrarydeviceId".format(
self.entity_rg, self.entity_dps_name, enrollment_id2
),
auth_type=auth_phase
),
).output
assert offline_device_key == online_device_key
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment-group delete -g {} --dps-name {} --enrollment-id {}".format(
self.entity_rg, self.entity_dps_name, enrollment_id2
),
auth_type=auth_phase
),
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment-group delete -g {} --dps-name {} --enrollment-id {}".format(
self.entity_rg, self.entity_dps_name, enrollment_id
),
auth_type=auth_phase
),
)
def test_dps_enrollment_twin_array(self):
attestation_type = AttestationType.x509.value
for auth_phase in DATAPLANE_AUTH_TYPES:
# test twin array in enrollment
device_id = self.generate_device_names()[0]
enrollment_id = self.generate_enrollment_names()[0]
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment create --enrollment-id {} --attestation-type {}"
" -g {} --dps-name {} --cp {} --scp {}"
" --provisioning-status {} --device-id {}"
" --initial-twin-tags {} --initial-twin-properties {} --device-information {}"
" --allocation-policy {} --iot-hubs {}".format(
enrollment_id,
attestation_type,
self.entity_rg,
self.entity_dps_name,
CERT_PATH,
CERT_PATH,
EntityStatusType.enabled.value,
device_id,
'"{generic_dict}"',
'"{twin_array_dict}"',
'"{generic_dict}"',
AllocationType.hashed.value,
self.hub_host_name,
),
auth_type=auth_phase
),
checks=[
self.check("attestation.type", attestation_type),
self.check("registrationId", enrollment_id),
self.check("provisioningStatus", EntityStatusType.enabled.value),
self.check("deviceId", device_id),
self.check("allocationPolicy", AllocationType.hashed.value),
self.check("iotHubs", self.hub_host_name.split()),
self.check("initialTwin.tags", self.kwargs["generic_dict"]),
self.check("optionalDeviceInformation", self.kwargs["generic_dict"]),
self.check(
"initialTwin.properties.desired", self.kwargs["twin_array_dict"]
),
self.exists("reprovisionPolicy"),
self.check("reprovisionPolicy.migrateDeviceData", True),
self.check("reprovisionPolicy.updateHubAssignment", True),
],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment delete -g {} --dps-name {} --enrollment-id {}".format(
self.entity_rg, self.entity_dps_name, enrollment_id
),
auth_type=auth_phase
),
)
# test twin array in enrollment group
enrollment_group_id = self.generate_enrollment_names(group=True)[0]
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment-group create --enrollment-id {} -g {} --dps-name {}"
" --cp {} --scp {} --provisioning-status {} --allocation-policy {}"
" --iot-hubs {} --edge-enabled --props {}".format(
enrollment_group_id,
self.entity_rg,
self.entity_dps_name,
CERT_PATH,
CERT_PATH,
EntityStatusType.enabled.value,
AllocationType.geolatency.value,
self.hub_host_name,
'"{twin_array_dict}"',
),
auth_type=auth_phase
),
checks=[
self.check("enrollmentGroupId", enrollment_group_id),
self.check("provisioningStatus", EntityStatusType.enabled.value),
self.exists("reprovisionPolicy"),
self.check("allocationPolicy", AllocationType.geolatency.value),
self.check("iotHubs", self.hub_host_name.split()),
self.check(
"initialTwin.properties.desired", self.kwargs["twin_array_dict"]
),
self.check("reprovisionPolicy.migrateDeviceData", True),
self.check("reprovisionPolicy.updateHubAssignment", True),
self.check("capabilities.iotEdge", True),
],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment-group delete -g {} --dps-name {} --enrollment-id {}".format(
self.entity_rg, self.entity_dps_name, enrollment_group_id
),
auth_type=auth_phase
),
)
``` |
{
"source": "jongio/azure-iot-rest",
"score": 3
} |
#### File: data-plane/devices/device-conf.py
```python
import requests
import uuid
import sys
from base64 import b64encode, b64decode
from hashlib import sha256
from time import time
from hmac import HMAC
import argparse
if sys.version_info.major >= 3:
from urllib.parse import quote, urlencode
else:
from urllib import quote, urlencode
parser = argparse.ArgumentParser(description="")
parser.add_argument("--name", help="IoT Hub Name", required=True)
parser.add_argument("--key", help="IoT Hub (iothubowner) primary key", required=True)
parser.add_argument("--device-id", help="IoT Edge device Id", required=True)
parser.add_argument("--config-file", help="Full path to module config file", required=True)
parser.add_argument("--key-name", help="IoT Hub policy key name, defaults to %(default)s", default="iothubowner")
parser.add_argument("--api-version", help="IoT Hub REST API version, defaults to %(default)s", default="2017-11-08-preview")
args = parser.parse_args()
name = args.name # IoT Hub name
key = args.key # IoT Hub primary key
deviceId = args.device_id # IoT Hub device id
configFile = args.config_file # Path to the configuration file
resourceURI = name + '.azure-devices.net'
tokenExpirationPeriod = 60
policyKeyName = args.key_name
apiVersion = args.api_version
applyConfigurationURI = 'https://%s/devices/%s/applyConfigurationContent?api-version=%s' % (resourceURI, deviceId, apiVersion)
def get_iot_hub_sas_token(uri, key, policy_name, expiry=3600):
ttl = time() + expiry
sign_key = "%s\n%d" % ((quote(uri)), int(ttl))
signature = b64encode(HMAC(b64decode(key), sign_key.encode('utf-8'), sha256).digest())
rawtoken = {
'sr' : uri,
'sig': signature,
'se' : str(int(ttl))
}
if policy_name is not None:
rawtoken['skn'] = policy_name
return 'SharedAccessSignature ' + urlencode(rawtoken)
def get_config_file_contents():
with open(args.config_file, 'r') as configFile:
return configFile.read()
def apply_configuration():
applyConfigurationResponse = requests.post(applyConfigurationURI,
headers={
'Authorization': iotHubSasToken,
'Content-Type': 'application/json'
},
data = get_config_file_contents()
)
print(applyConfigurationURI)
print(applyConfigurationResponse.status_code)
print(applyConfigurationResponse.text)
if applyConfigurationResponse.status_code == 204:
print("Configuration successfully applied. Please run `docker logs edgeAgent -f` to see the change applied.")
else:
print("There was an error applying the configuration. You should see an error message above that indicates the issue.")
iotHubSasToken = get_iot_hub_sas_token(resourceURI, key, policyKeyName, tokenExpirationPeriod)
apply_configuration()
``` |
{
"source": "jongio/azure-script",
"score": 2
} |
#### File: handlers/az/CosmosDB.py
```python
import sys
from azsc.handlers.Handler import Handler
from azsc.handlers.az.Generic import GenericHandler
class CosmosDBHandler(GenericHandler):
azure_object = "cosmosdb"
def get_from_context(self, name):
if name in self.context:
return self.context[name]
else:
print("***** MISSING '{}' in context".format(name))
sys.exit(1)
def execute(self):
fqn = self.get_full_resource_name()
if fqn == "cosmosdb":
self.add_context_parameter("resource-group", "group")
if fqn == "cosmosdb database":
db_name = self.name
self.params["db-name"] = db_name
self.name = self.get_from_context("cosmosdb")
if fqn == "cosmosdb collection":
collection_name = self.name
self.params["collection-name"] = collection_name
self.name = self.get_from_context("cosmosdb")
self.add_context_parameter("db-name", "cosmosdb database")
cmd = super(CosmosDBHandler, self).execute()
self.save_to_context()
return cmd
```
#### File: handlers/az/FunctionApp.py
```python
from azsc.handlers.Handler import Handler
from azsc.handlers.az.Generic import GenericHandler
class FunctionAppHandler(GenericHandler):
azure_object = "functionapp"
def execute(self):
fqn = self.get_full_resource_name()
if fqn == "functionapp":
self.add_context_parameter("resource-group", "group")
self.add_context_parameter("plan", "appservice plan")
self.add_context_parameter("storage-account", "storage account")
if fqn == "functionapp config":
self.add_context_parameter("resource-group", "group")
if fqn == "functionapp deployment":
self.add_context_parameter("plan", "appservice plan")
self.add_context_parameter("resource-group", "group")
if fqn == "eventhubs namespace authorization-rule keys list":
self.add_context_parameter("namespace-name", "eventhubs namespace")
self.add_context_parameter("resource-group", "group")
if fqn == "eventhubs namespace":
self.add_context_parameter("resource-group", "group")
self.add_context_parameter("location", "location")
cmd = super(FunctionAppHandler, self).execute()
self.save_to_context()
return cmd
```
#### File: handlers/az/Generic.py
```python
import sys
from azsc.handlers.Handler import Handler
class ContextParameter:
name = None
context = None
def __init__(self, name, context):
self.name = name
self.context = context
def __eq__(self, other):
if isinstance(other, ContextParameter):
return self.name == other.name
return self.name == str(other)
def __str__(self):
return "{0} => {1}".format(self.name, self.context)
class GenericHandler(Handler):
azure_object = "*"
context_parameters = None
def __init__(self, context, resources, action, name, params):
super(GenericHandler, self).__init__(context, resources, action, name, params)
self.context_parameters = []
def execute(self):
cmd = u"az"
cmd += u" {0}".format(' '.join(self.resources))
cmd += u" {0}".format(self.action)
if (self.name != None):
cmd += u" --name {0}".format(self.name)
#print("-> {0} {1} {2}".format(self.resources, self.action, self.name))
#print("-> CONTEXT: {0}".format(self.context))
#print("-> PARAM_CONTEXT: {0}".format(self.context_parameters))
for cp in self.context_parameters:
self._param_from_context(cp.name, cp.context)
if (len(self.params)>0):
for param in self.params:
cmd += u" --{0} {1}".format(param, self.params[param])
return cmd
def add_context_parameter(self, parameter_name, context_name):
self.context_parameters.append(ContextParameter(parameter_name, context_name))
def _param_from_context(self, param_name, context_name):
if not param_name in self.params:
if context_name in self.context:
self.params[param_name] = self.context[context_name]
else:
print("-> CONTEXT: {0}".format(self.context))
print("-> PARAM_CONTEXT: {0}".format(self.context_parameters))
sys.exit("Missing '{0}' parameter and not suitable context value '{1}' found.".format(param_name, context_name))
```
#### File: handlers/az/IoT.py
```python
from azsc.handlers.Handler import Handler
from azsc.handlers.az.Generic import GenericHandler
class IoTHubHandler(GenericHandler):
azure_object = "iot hub"
def execute(self):
fqn = self.get_full_resource_name()
if fqn == "iot hub":
self.add_context_parameter("resource-group", "group")
cmd = super(IoTHubHandler, self).execute()
self.save_to_context()
return cmd
class IotHubDeviceIdentity(GenericHandler):
azure_object = "iot hub device-identity"
def execute(self):
device_id = self.name
self.params["device-id"] = device_id
self.name = None
self.add_context_parameter("hub-name", "iot hub")
cmd = super(IotHubDeviceIdentity, self).execute()
self.save_to_context(value=device_id)
return cmd
```
#### File: azsc/handlers/Handler.py
```python
class Handler(object):
context = None
resources = None
action = None
params = None
name = None
def __init__(self, context, resources, action, name, params):
self.context = context
self.resources = resources
self.action = action
self.name = name
self.params = params
def get_full_resource_name(self):
"""
Return full resource name
"""
return ' '.join(self.resources)
def save_to_context(self, key=None, value=None):
"""
Push value into context.
If 'key' is None, the full resource name will be used a key.
If 'value' is None, the object name will be used a value.
"""
if (key == None):
key = self.get_full_resource_name()
if (value == None):
value = self.name
#print("[{0}:{1}]".format(key, value))
self.context[key] = value
```
#### File: azsc/script_parser/__init__.py
```python
import sys
import logging
import os
from lark import Lark
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
def run_parser(script, target, debug):
if (debug == True):
logging.getLogger().setLevel(logging.DEBUG)
logging.info("AZ Script Compiler v 0.1")
logging.info("loading grammar")
with open(os.path.join(__location__, '../grammar/azsc.lark'), 'r') as f:
grammar = f.read()
logging.info("loading script file")
try:
with open(script, 'r') as f:
text = f.read()
except IOError:
error_message = "script {0} file not found".format(script)
logging.error(error_message)
return "ERROR: " + error_message
logging.info("setting up parser")
lark = Lark(grammar)
logging.info("generating parse tree")
tree = lark.parse(text)
logging.debug("parse tree:\n" + tree.pretty())
logging.info("importing parse tree transformer")
from azsc.transformers.AZSTransformer import AZSTransformer
logging.info("compiling")
t = AZSTransformer()
t.transform(tree)
cmd = t.get_command()
if (debug==True):
logging.debug("context:")
ctx = t.get_context()
for c in ctx:
logging.debug("\t[%s]=%s", str(c), str(ctx[c]))
logging.info("done")
return cmd
``` |
{
"source": "jongio/storage-python-getting-started",
"score": 2
} |
#### File: storage-python-getting-started/AzureStoragePythonGettingStarted/Blobs.py
```python
import os
import config
import random
import string
from random import randint
from azure.storage import CloudStorageAccount
from azure.storage.blob import BlockBlobService, PageBlobService, AppendBlobService
#
# Azure Storage Blob Sample - Demonstrate how to use the Blob Storage service.
# Blob storage stores unstructured data such as text, binary data, documents or media files.
# Blobs can be accessed from anywhere in the world via HTTP or HTTPS.
#
# Documentation References:
# - What is a Storage Account - http://azure.microsoft.com/en-us/documentation/articles/storage-whatis-account/
# - Getting Started with Blobs - https://azure.microsoft.com/en-us/documentation/articles/storage-python-how-to-use-blob-storage/
# - Blob Service Concepts - http://msdn.microsoft.com/en-us/library/dd179376.aspx
# - Blob Service REST API - http://msdn.microsoft.com/en-us/library/dd135733.aspx
# - Blob Service Python API - http://azure.github.io/azure-storage-python/ref/azure.storage.blob.html
# - Storage Emulator - http://azure.microsoft.com/en-us/documentation/articles/storage-use-emulator/
#
class blob_samples():
# Runs all samples for Azure Storage Blob service.
# Input Arguments:
# account - CloudStorageAccount to use for running the samples
def run_all_samples(account):
print('\n\nAzure Storage Blob sample - Starting.')
try:
# Block blob basics
print('\n\n* Basic block blob operations *\n')
blob_samples.basic_blockblob_operations(account)
# Page blob basics
print('\n\n* Basic page blob operations *\n')
blob_samples.basic_pageblob_operations(account)
if (config.IS_EMULATED == False):
# Append blob basics
# Append blob is not yet supported in the Emulator
print('\n\n* Basic append blob operations *\n')
blob_samples.basic_appendblob_operations(account)
except Exception as e:
if (config.IS_EMULATED):
print(
'Error occurred in the sample. If you are using the emulator, please make sure the emulator is running.', e)
else:
print(
'Error occurred in the sample. Please make sure the account name and key are correct.', e)
finally:
print('\n\nAzure Storage Blob sample - Completed.')
# Runs basic block blob samples for Azure Storage Blob service.
# Input Arguments:
# container_name - Container name to use for running the samples
def basic_blockblob_operations(account):
file_to_upload = "HelloWorld.png"
# Create a Block Blob Service object
blockblob_service = account.create_block_blob_service()
#blockblob_service = BlockBlobService(account_name=config.STORAGE_ACCOUNT_NAME, account_key=config.STORAGE_ACCOUNT_KEY)
container_name = 'blockblobbasicscontainer' + \
blob_samples.randomcontainername(6)
# Create a new container
print('1. Create a container with name - ' + container_name)
blockblob_service.create_container(container_name)
# Upload file as a block blob
print('2. Uploading BlockBlob')
# Get full path on drive to file_to_upload by joining the fully qualified directory name and file name on the local drive
full_path_to_file = os.path.join(
os.path.dirname(__file__), file_to_upload)
blockblob_service.create_blob_from_path(
container_name, file_to_upload, full_path_to_file)
# List all the blobs in the container
print('3. List Blobs in Container')
generator = blockblob_service.list_blobs(container_name)
for blob in generator:
print('\tBlob Name: ' + blob.name)
# Download the blob
print('4. Download the blob')
blockblob_service.get_blob_to_path(container_name, file_to_upload, os.path.join(
os.path.dirname(__file__), file_to_upload + '.copy.png'))
# Clean up after the sample
print('5. Delete block Blob')
blockblob_service.delete_blob(container_name, file_to_upload)
# Delete the container
print("6. Delete Container")
blockblob_service.delete_container(container_name)
# Runs basic page blob samples for Azure Storage Blob service.
# Input Arguments:
# account - CloudStorageAccount to use for running the samples
def basic_pageblob_operations(account):
file_to_upload = "HelloPageBlobWorld.txt"
# Create a block blob service object
pageblob_service = account.create_page_blob_service()
container_name = 'pageblobbasicscontainer' + \
blob_samples.randomcontainername(6)
# Create a new container
print('1. Create a container with name - ' + container_name)
pageblob_service.create_container(container_name)
# Create a page blob
print('2. Creating Page Blob')
pageblob_service.create_blob_from_bytes(
container_name, file_to_upload, blob_samples.get_random_bytes(512))
# List all the blobs in the container
print('3. List Blobs in Container')
blob_list = pageblob_service.list_blobs(container_name)
for blob in blob_list:
print('\tBlob Name: ' + blob.name)
# Read a page blob
print('4. Reading a Page Blob')
readblob = pageblob_service.get_blob_to_bytes(container_name, # name of the container
file_to_upload, # name of blob to read
start_range=3, # page to start reading from
end_range=10) # page to stop reading at
# Clean up after the sample
print('5. Delete block Blob')
pageblob_service.delete_blob(container_name, file_to_upload)
# If you want to delete the container uncomment the line of code below.
print("6. Delete Container")
pageblob_service.delete_container(container_name)
# Runs basic append blob samples for Azure Storage Blob service.
# Input Arguments:
# container_name - Container name to use for running the samples
def basic_appendblob_operations(account):
file_to_upload = "HelloAppendBlobWorld.txt"
# Create an append blob service object
appendblob_service = account.create_append_blob_service()
container_name = 'appendblobbasicscontainer' + \
blob_samples.randomcontainername(6)
# Create a new container
print('1. Create a container with name - ' + container_name)
appendblob_service.create_container(container_name)
# Create an append blob
print('2. Create Append Blob')
appendblob_service.create_blob(container_name, file_to_upload)
# Write to an append blob
print('3. Write to Append Blob')
appendblob_service.append_blob_from_text(
container_name, file_to_upload, '\tHello Append Blob world!\n')
appendblob_service.append_blob_from_text(
container_name, file_to_upload, '\tHello Again Append Blob world!')
# List all the blobs in the container
print('4. List Blobs in Container')
generator = appendblob_service.list_blobs(container_name)
for blob in generator:
print('\tBlob Name: ' + blob.name)
# Read the blob
print('5. Read Append blob')
append_blob = appendblob_service.get_blob_to_text(
container_name, file_to_upload)
print(append_blob.content)
# Clean up after the sample
print('6. Delete Append Blob')
appendblob_service.delete_blob(container_name, file_to_upload)
# If you want to delete the container uncomment the line of code below.
print("7. Delete Container")
appendblob_service.delete_container(container_name)
# Gets Random Bytes of specified size for use in samples.
# Input Arguments:
# size - size of random bytes to get
def get_random_bytes(size):
rand = random.Random()
result = bytearray(size)
for i in range(size):
result[i] = rand.randint(0, 255)
return bytes(result)
# Gets 6 random characters to append to container name.
def randomcontainername(length):
return ''.join(random.choice(string.ascii_lowercase) for i in range(length))
``` |
{
"source": "jongkweh/onnxruntime",
"score": 2
} |
#### File: python/util/check_onnx_model_mobile_usability.py
```python
import argparse
import logging
import pathlib
# need this before the mobile helper imports for some reason
logging.basicConfig(format='%(levelname)s: %(message)s')
from .mobile_helpers import check_model_can_use_ort_mobile_pkg, usability_checker # noqa
def check_usability():
parser = argparse.ArgumentParser(
description='''Analyze an ONNX model to determine how well it will work in mobile scenarios, and whether
it is likely to be able to use the pre-built ONNX Runtime Mobile Android or iOS package.''',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--config_path',
help='Path to required operators and types configuration used to build '
'the pre-built ORT mobile package.',
required=False,
type=pathlib.Path,
default=check_model_can_use_ort_mobile_pkg.get_default_config_path())
parser.add_argument('--log_level', choices=['debug', 'info', 'warning', 'error'],
default='info', help='Logging level')
parser.add_argument('model_path', help='Path to ONNX model to check', type=pathlib.Path)
args = parser.parse_args()
logger = logging.getLogger('check_usability')
if args.log_level == 'debug':
logger.setLevel(logging.DEBUG)
elif args.log_level == 'info':
logger.setLevel(logging.INFO)
elif args.log_level == 'warning':
logger.setLevel(logging.WARNING)
else:
logger.setLevel(logging.ERROR)
try_eps = usability_checker.analyze_model(args.model_path, skip_optimize=False, logger=logger)
check_model_can_use_ort_mobile_pkg.run_check(args.model_path, args.config_path, logger)
logger.info("Run `python -m onnxruntime.tools.convert_onnx_models_to_ort ...` to convert the ONNX model to "
"ORT format. By default, the conversion tool will create an ORT format model optimized to "
"'basic' level (with a .basic.ort file extension) for use with NNAPI or CoreML, "
"and an ORT format model optimized to 'all' level (with a .all.ort file extension) for use with "
"the CPU EP.")
if try_eps:
logger.info("As NNAPI or CoreML may provide benefits with this model it is recommended to compare the "
"performance of the <model>.basic.ort model using the NNAPI EP on Android, and the "
"CoreML EP on iOS, against the performance of the <model>.all.ort model using the CPU EP.")
else:
logger.info("For optimal performance the <model>.all.ort model should be used with the CPU EP. ")
if __name__ == '__main__':
check_usability()
```
#### File: util/ort_format_model/utils.py
```python
import os
import typing
from .operator_type_usage_processors import OperatorTypeUsageManager
from .ort_model_processor import OrtFormatModelProcessor
from ..logger import get_logger
log = get_logger("ort_format_model.utils")
def _extract_ops_and_types_from_ort_models(model_path_or_dir: str, enable_type_reduction: bool,
optimization_level: str = None):
if not os.path.exists(model_path_or_dir):
raise ValueError('Path to model/s does not exist: {}'.format(model_path_or_dir))
required_ops = {}
op_type_usage_manager = OperatorTypeUsageManager() if enable_type_reduction else None
suffix = f'.{optimization_level}.ort' if optimization_level else '.ort'
if os.path.isfile(model_path_or_dir):
if model_path_or_dir.lower().endswith(suffix):
model_processor = OrtFormatModelProcessor(model_path_or_dir, required_ops, op_type_usage_manager)
model_processor.process() # this updates required_ops and op_type_processors
log.info('Processed {}'.format(model_path_or_dir))
else:
log.debug('Skipped {}'.format(model_path_or_dir))
else:
for root, _, files in os.walk(model_path_or_dir):
for file in files:
model_path = os.path.join(root, file)
if file.lower().endswith(suffix):
model_processor = OrtFormatModelProcessor(model_path, required_ops, op_type_usage_manager)
model_processor.process() # this updates required_ops and op_type_processors
log.info('Processed {}'.format(model_path))
else:
log.debug('Skipped {}'.format(model_path))
return required_ops, op_type_usage_manager
def create_config_from_models(model_path_or_dir: str, output_file: str = None, enable_type_reduction: bool = True,
optimization_level: typing.Optional[str] = None):
'''
Create a configuration file with required operators and optionally required types.
:param model_path_or_dir: Path to recursively search for ORT format models, or to a single ORT format model.
:param output_file: File to write configuration to.
Defaults to creating required_operators[_and_types].config in the model_path_or_dir directory.
:param enable_type_reduction: Include required type information for individual operators in the configuration.
:param optimization_level: Filter files and adjust default output_file based on the optimization level. If set,
looks for '.<optimization_level>.ort' as the file suffix. Uses '.<optimization_level>.config' as the config
file suffix.
When we convert models we include the optimization level in the filename. When creating the configuration
we only want to create it for the specific optimization level so that we don't include irrelevant operators.
'''
required_ops, op_type_processors = _extract_ops_and_types_from_ort_models(model_path_or_dir, enable_type_reduction,
optimization_level)
if output_file:
directory, filename = os.path.split(output_file)
if not filename:
raise RuntimeError("Invalid output path for configuration: {}".format(output_file))
if directory and not os.path.exists(directory):
os.makedirs(directory)
else:
dir = model_path_or_dir
if os.path.isfile(model_path_or_dir):
dir = os.path.dirname(model_path_or_dir)
suffix = f'.{optimization_level}.config' if optimization_level else '.config'
output_file = os.path.join(
dir, ('required_operators_and_types' if enable_type_reduction else 'required_operators') + suffix)
with open(output_file, 'w') as out:
out.write("# Generated from model/s in {}\n".format(model_path_or_dir))
for domain in sorted(required_ops.keys()):
for opset in sorted(required_ops[domain].keys()):
ops = required_ops[domain][opset]
if ops:
out.write("{};{};".format(domain, opset))
if enable_type_reduction:
# type string is empty if op hasn't been seen
entries = ['{}{}'.format(op, op_type_processors.get_config_entry(domain, op) or '')
for op in sorted(ops)]
else:
entries = sorted(ops)
out.write("{}\n".format(','.join(entries)))
log.info("Created config in %s", output_file)
``` |
{
"source": "jonglezb/spack",
"score": 2
} |
#### File: packages/wonton/package.py
```python
from spack import *
class Wonton(CMakePackage):
"""Wonton is a support package for the Portage
(https://github.com/laristra/portage) and Tangram
(https://github.com/laristra/tangram) libraries. It contains some
mesh/state classes, wrappers for other mesh/state libraries and
some utilities required by Portage and Tangram.
"""
homepage = "https://portage.lanl.gov"
git = "https://github.com/laristra/wonton.git"
url = "https://github.com/laristra/wonton/releases/download/1.2.10/wonton-1.2.10.tar.gz"
maintainers = ['raovgarimella']
version('1.2.10', sha256='1367b9d3294d1c8a15e3cefa2670102de25baf456d5ed0e2fb70863f062e96b0')
version('1.2.1', sha256='4f00513d1abe86f256214d2b5171b1575b2cd464df8609307c24cbc4c595c305')
variant('lapacke', default=True, description='Use LAPACKE solvers')
# Variants for controlling parallelism
variant('mpi', default=False, description='Enable distributed meshes with MPI')
variant('thrust', default=False, description='Enable on-node parallelism using NVidia Thrust library')
variant('kokkos', default=False, description='Enable on-node or device parallelism with Kokkos')
variant('openmp', default=False, description="Enable on-node parallelism using OpenMP")
variant('cuda', default=False, description="Enable GPU parallelism using CUDA")
# wrappers to external mesh/state libraries
variant('jali', default=False, description='Enable Jali mesh wrappers')
conflicts('+jali ~mpi') # Jali needs MPI
conflicts('+thrust +cuda') # Thrust with CUDA does not work as yet
conflicts('+thrust +kokkos') # Don't enable Kokkos, Thrust simultaneously
# dependencies
depends_on('[email protected]:', type='build')
depends_on('netlib-lapack +lapacke', when='+lapacke')
depends_on('mpi', when='+mpi')
depends_on('jali +mstk', when='+jali')
depends_on('mpi', when='+jali')
# We need boost only when no thrust option
depends_on('boost', when='~thrust')
# NVidia thrust library
depends_on('[email protected]', when='+thrust')
# CUDA library
depends_on('cuda', when='+cuda')
# Kokkos with appropriate option
depends_on('kokkos +openmp', when='+kokkos +openmp')
depends_on('kokkos +cuda', when='+kokkos +cuda')
def cmake_args(self):
options = []
if '+mpi' in self.spec:
options.append('-DWONTON_ENABLE_MPI=ON')
else:
options.append('-DWONTON_ENABLE_MPI=OFF')
if '+lapacke' in self.spec:
options.append('-DWONTON_ENABLE_LAPACKE=ON')
options.append('-DBLA_VENDOR=' + self.spec['blas'].name.upper())
options.append(
'-DBLAS_LIBRARIES=' + self.spec['blas'].libs.joined()
)
else:
options.append('-DWONTON_ENABLE_LAPACKE=OFF')
if '+thrust' in self.spec:
options.append('-DWONTON_ENABLE_THRUST=ON')
if '+cuda' in self.spec:
options.append(
'-DTHRUST_HOST_BACKEND:STRING=THRUST_HOST_SYSTEM_CPP'
)
options.append(
'-DTHRUST_DEVICE_BACKEND:STRING=THRUST_DEVICE_SYSTEM_CUDA'
)
else:
options.append(
'-DTHRUST_HOST_BACKEND:STRING=THRUST_HOST_SYSTEM_CPP'
)
options.append(
'-DTHRUST_DEVICE_BACKEND:STRING=THRUST_DEVICE_SYSTEM_OMP'
)
else:
options.append('-DWONTON_ENABLE_THRUST=OFF')
if '+kokkos' in self.spec:
options.append('-DWONTON_ENABLE_Kokkos=ON')
if '+cuda' in self.spec:
options.append('-DWONTON_ENABLE_Kokkos_CUDA=ON')
elif '+openmp' in self.spec:
options.append('-DWONTON_ENABLE_Kokkos_OpenMP=ON')
else:
options.append('-DWONTON_ENABLE_Kokkos=OFF')
if '+jali' in self.spec:
options.append('-DWONTON_ENABLE_Jali=ON')
else:
options.append('-DWONTON_ENABLE_Jali=OFF')
if '+flecsi' in self.spec:
options.append('-DWONTON_ENABLE_FleCSI=ON')
else:
options.append('-DWONTON_ENABLE_FleCSI=OFF')
# Unit test variant
if self.run_tests:
options.append('-DENABLE_UNIT_TESTS=ON')
options.append('-DENABLE_APP_TESTS=ON')
else:
options.append('-DENABLE_UNIT_TESTS=OFF')
options.append('-DENABLE_APP_TESTS=OFF')
return options
``` |
{
"source": "Jongmassey/ubuc-dev",
"score": 2
} |
#### File: ubuc-dev/equipmentdb/model_base.py
```python
from django.contrib.auth.models import User
from django.db import models
from django.urls.base import reverse_lazy
import re
# abstract base class with common auditing fields
class UbucModel(models.Model):
created_on = models.DateTimeField(
auto_now_add=True, null=False, editable=False
)
updated_on = models.DateTimeField(
auto_now=True, null=False, editable=False
)
created_by = models.ForeignKey(
User,
null=False,
blank=False,
editable=False,
on_delete=models.RESTRICT,
related_name="%(class)s_created_by",
)
updated_by = models.ForeignKey(
User,
null=False,
blank=False,
editable=False,
on_delete=models.RESTRICT,
related_name="%(class)s_updated_by",
)
def get_absolute_url(self):
return reverse_lazy(f"{classToURL(self.__class__.__name__)}-list")
def save_with_user(self, user) -> None:
self.updated_by = user
if self.created_by_id is None:
self.created_by = user
return super().save()
class Meta:
abstract = True
def classToURL(class_name: str) -> str:
exp = re.compile("([a-z])([A-Z])")
return exp.sub(r"\1-\2", class_name).lower()
# Status code enums
class FaultStatus(models.IntegerChoices):
NEW = 0
IN_PROGRESS = 1
FIXED = 2
UNFIXABLE = 3
NO_FAULT = 4
class ServiceStatus(models.IntegerChoices):
UNKNOWN = 0
IN_SERVICE = 1
OUT_OF_SERVICE = 2
class TestStatus(models.IntegerChoices):
UNKNOWN = 0
IN_TEST = 1
OUT_OF_TEST = 2
``` |
{
"source": "jongn/car-vision",
"score": 3
} |
#### File: jongn/car-vision/Cars.py
```python
from skimage import data
from skimage import io
import numpy as np
import matplotlib.pyplot as plt
import subprocess
import cv2
import socket
import Utils
def main():
#classifier_test()
mog = cv2.bgsegm.createBackgroundSubtractorMOG()
gmg = cv2.bgsegm.createBackgroundSubtractorGMG()
mog2 = cv2.createBackgroundSubtractorMOG2()
background_test(mog, 0.8)
#background_test(mog2, 0.8)
def classifier_test():
image_sequence = 'Data/Camera3/image_%05d.jpg'
car_cascade = cv2.CascadeClassifier('car_classifier.xml')
cap = cv2.VideoCapture(image_sequence)
frame_id = 0
while(1):
ret, frame = cap.read()
if ret:
cars = car_cascade.detectMultiScale(frame)
for (x,y,w,h) in cars:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
print 'Processing %d : cars detected : [%s]' % (frame_id, len(cars))
cv2.imshow('frame', frame)
cv2.waitKey(300)
frame_id += 1
else:
break
cap.release()
cv2.destroyAllWindows()
def background_test(bg, initial_learning):
image_sequence = 'Data/Camera3/image_%05d.jpg'
image_sequence = 'Images/%02d_TV388_N1PRESIDIO.jpg'
cap = cv2.VideoCapture(image_sequence)
back = cv2.VideoCapture(image_sequence)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
ret, frame = back.read()
avg = np.float32(frame)
frame_count = 0
while(1):
ret, frame = back.read()
if ret:
avg = cv2.accumulateWeighted(frame, avg, 0.05)
frame_count = frame_count + 1
else:
break
back.release()
background = cv2.convertScaleAbs(avg)
cv2.imshow('background',background)
bg.apply(background, None, initial_learning)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
frame_id = 0
while(1):
ret, frame = cap.read()
if ret:
fgmask = bg.apply(frame)
closing = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)
opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel)
dilation = cv2.dilate(opening, kernel, iterations=1)
(_, contours, _) = cv2.findContours(fgmask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#matches = []
for (i, contour) in enumerate(contours):
(x, y, w, h) = cv2.boundingRect(contour)
contour_valid = (w >= 10) and (h >= 10)
if not contour_valid:
continue
#centroid = get_centroid(x, y, w, h)
#cv2.rectangle(fgmask, (x,y), (x+w,y+h),(255,0,0),2)
#cv2.rectangle(frame, (x,y), (x+w,y+h),(255,0,0),2)
#matches.append(((x, y, w, h), centroid))
cv2.imshow('mask', fgmask)
cv2.imshow('dilation', dilation)
cv2.imshow('original', frame)
orig_string = 'Output/' + str(frame_id) + '_orig.jpg'
mask_string = 'Output/' + str(frame_id) + '_mask.jpg'
dil_string = 'Output/' + str(frame_id) + '_dilation.jpg'
cv2.imwrite(orig_string, frame)
cv2.imwrite(mask_string, fgmask)
cv2.imwrite(dil_string, dilation)
cv2.waitKey(10)
frame_id += 1
else:
break
cap.release()
cv2.destroyAllWindows()
def background_tests():
image_sequence = 'Data/Camera3/image_%05d.jpg'
cap = cv2.VideoCapture(image_sequence)
back = cv2.VideoCapture(image_sequence)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
mog = cv2.bgsegm.createBackgroundSubtractorMOG()
gmg = cv2.bgsegm.createBackgroundSubtractorGMG()
mog2 = cv2.createBackgroundSubtractorMOG2()
ret, frame = back.read()
avg = np.float32(frame)
frame_count = 0
while(1):
ret, frame = back.read()
if ret:
avg = cv2.accumulateWeighted(frame, avg, 0.05)
frame_count = frame_count + 1
else:
break
back.release()
background = cv2.convertScaleAbs(avg)
cv2.imshow('background',background)
if frame_count < 120:
print 'Not enough frames for accurate GMG background setup'
mog.apply(background, None, 0.8)
mog2.apply(background, None, 0.8)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
frame_id = 0
while(1):
ret, frame = cap.read()
if ret:
fgmas_mog = mog.apply(frame)
fgmas_mog2 = mog2.apply(frame)
closing_mog = cv2.morphologyEx(fgmas_mog, cv2.MORPH_CLOSE, kernel)
opening_mog = cv2.morphologyEx(closing_mog, cv2.MORPH_OPEN, kernel)
dilation_mog = cv2.dilate(opening_mog, kernel, iterations=1)
closing_mog2 = cv2.morphologyEx(fgmas_mog2, cv2.MORPH_CLOSE, kernel)
opening_mog2 = cv2.morphologyEx(closing_mog2, cv2.MORPH_OPEN, kernel)
dilation_mog2 = cv2.dilate(opening_mog2, kernel, iterations=1)
(_, contours_mog, _) = cv2.findContours(dilation_mog, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
(_, contours_mog2, _) = cv2.findContours(dilation_mog2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for (i, contour) in enumerate(contours_mog):
(x, y, w, h) = cv2.boundingRect(contour)
contour_valid = (w >= 10) and (h >= 10)
if not contour_valid:
continue
centroid = get_centroid(x, y, w, h)
cv2.rectangle(fgmas_mog, (x,y), (x+w,y+h),(255,0,0),2)
for (i, contour) in enumerate(contours_mog2):
(x, y, w, h) = cv2.boundingRect(contour)
contour_valid = (w >= 10) and (h >= 10)
if not contour_valid:
continue
cv2.rectangle(fgmas_mog2, (x,y), (x+w,y+h),(255,0,0),2)
cv2.imshow('mog', fgmas_mog)
cv2.imshow('mog2', fgmas_mog2)
cv2.waitKey(300)
frame_id += 1
else:
break
cap.release()
cv2.destroyAllWindows()
def get_centroid(x, y, w, h):
x1 = int(w / 2)
y1 = int(h / 2)
cx = x + x1
cy = y + y1
return (cx, cy)
if __name__ == "__main__":
main()
```
#### File: jongn/car-vision/Utils.py
```python
import time
import requests
def grab_image(url, img_name):
path = "Images/" + img_name
img_data = requests.get(url).content
with open(path, 'wb') as handler:
handler.write(img_data)
def grab_image_1min(url, img_name):
frame = 0
while True:
image_name = str(frame) + "_" + img_name
grab_image(url, image_name)
time.sleep(60)
frame = frame + 1
def main():
url = "http://www.dot.ca.gov/cwwp2/data/d4/cctv/image/TV388_N1PRESIDIO.jpg?1378316522948"
img_name = "TV388_N1PRESIDIO.jpg"
grab_image_1min(url, img_name)
if __name__ == "__main__":
main()
``` |
{
"source": "jongodinez/Vulnerous",
"score": 3
} |
#### File: jongodinez/Vulnerous/Vulweb.py
```python
import sys
import socket
import subprocess
import os
import time
import signal
import random
import string
import threading
import re
from urllib.parse import urlsplit
# Scan Time Elapser
intervals = (
('h', 3600),
('m', 60),
('s', 1),
)
def display_time(seconds, granularity=3):
result = []
seconds = seconds + 1
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
result.append("{}{}".format(value, name))
return ' '.join(result[:granularity])
def url_maker(url):
if not re.match(r'http(s?)\:', url):
url = 'http://' + url
parsed = urlsplit(url)
host = parsed.netloc
if host.startswith('www.'):
host = host[4:]
return host
def check_internet():
os.system('ping -c1 github.com > rs_net 2>&1')
if "0% packet loss" in open('rs_net').read():
val = 1
else:
val = 0
os.system('rm rs_net > /dev/null 2>&1')
return val
# Initializing the color module class
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
BADFAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
BG_ERR_TXT = '\033[41m' # For critical errors and crashes
BG_HEAD_TXT = '\033[100m'
BG_ENDL_TXT = '\033[46m'
BG_CRIT_TXT = '\033[45m'
BG_HIGH_TXT = '\033[41m'
BG_MED_TXT = '\033[43m'
BG_LOW_TXT = '\033[44m'
BG_INFO_TXT = '\033[42m'
# Classifies the Vulnerability's Severity
def vul_info(val):
result =''
if val == 'c':
result = bcolors.BG_CRIT_TXT+" critical "+bcolors.ENDC
elif val == 'h':
result = bcolors.BG_HIGH_TXT+" high "+bcolors.ENDC
elif val == 'm':
result = bcolors.BG_MED_TXT+" medium "+bcolors.ENDC
elif val == 'l':
result = bcolors.BG_LOW_TXT+" low "+bcolors.ENDC
else:
result = bcolors.BG_INFO_TXT+" info "+bcolors.ENDC
return result
# Legends
proc_high = bcolors.BADFAIL + "●" + bcolors.ENDC
proc_med = bcolors.WARNING + "●" + bcolors.ENDC
proc_low = bcolors.OKGREEN + "●" + bcolors.ENDC
# Links the vulnerability with threat level and remediation database
def vul_remed_info(v1,v2,v3):
print(bcolors.BOLD+"Vulnerability Threat Level"+bcolors.ENDC)
print("\t"+vul_info(v2)+" "+bcolors.WARNING+str(tool_resp[v1][0])+bcolors.ENDC)
print(bcolors.BOLD+"Vulnerability Definition"+bcolors.ENDC)
print("\t"+bcolors.BADFAIL+str(tools_fix[v3-1][1])+bcolors.ENDC)
print(bcolors.BOLD+"Vulnerability Remediation"+bcolors.ENDC)
print("\t"+bcolors.OKGREEN+str(tools_fix[v3-1][2])+bcolors.ENDC)
# Source Help Context
def helper():
print(bcolors.OKBLUE+"Information:"+bcolors.ENDC)
print("------------")
print("\t./Source.py example.com: Scans the domain example.com")
print( "\t./Source.py --update : Updates the scanner to the latest version.")
print( "\t./Source.py --help : Displays this help context.")
print( bcolors.OKBLUE+"Interactive:"+bcolors.ENDC)
print( "------------")
print( "\tCtrl+C: Skips current test.")
print( "\tCtrl+Z: Quits Source.")
print( bcolors.OKBLUE+"Legends:"+bcolors.ENDC)
print( "--------")
print( "\t["+proc_high+"]: Scan process may take longer times (not predictable).")
print( "\t["+proc_med+"]: Scan process may take less than 10 minutes.")
print( "\t["+proc_low+"]: Scan process may take less than a minute or two.")
print( bcolors.OKBLUE+"Vulnerability Information:"+bcolors.ENDC)
print( "--------------------------")
print( "\t"+vul_info('c')+": Requires immediate attention as it may lead to compromise or service unavailability.")
print( "\t"+vul_info('h')+" : May not lead to an immediate compromise, but there are high chances of probability.")
print( "\t"+vul_info('m')+" : Attacker may correlate multiple vulnerabilities of this type to launch a sophisticated attack.")
print( "\t"+vul_info('l')+" : Not a serious issue, but it is recommended to attend the finding.")
print( "\t"+vul_info('i')+" : Not classified as a vulnerability, simply an useful informational alert to be considered.\n")
# Clears Line
def clear():
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
# Source Logo
def logo():
print(bcolors.WARNING)
print("VULNEROUS RAPID WEB APP ANALYSER")
print (bcolors.ENDC)
# Initiliazing the idle loader/spinner class
class Spinner:
busy = False
delay = 0.05
@staticmethod
def spinning_cursor():
while 1:
for cursor in '|/\\': yield cursor #←↑↓→
#for cursor in '←↑↓→': yield cursor
def __init__(self, delay=None):
self.spinner_generator = self.spinning_cursor()
if delay and float(delay): self.delay = delay
def spinner_task(self):
try:
while self.busy:
#sys.stdout.write(next(self.spinner_generator))
print( bcolors.BG_ERR_TXT+next(self.spinner_generator)+bcolors.ENDC),
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write('\b')
sys.stdout.flush()
except (KeyboardInterrupt, SystemExit):
#clear()
print( "\n\t"+ bcolors.BG_ERR_TXT+"Source received a series of Ctrl+C hits. Quitting..." +bcolors.ENDC)
sys.exit(1)
def start(self):
self.busy = True
threading.Thread(target=self.spinner_task).start()
def stop(self):
try:
self.busy = False
time.sleep(self.delay)
except (KeyboardInterrupt, SystemExit):
#clear()
print( "\n\t"+ bcolors.BG_ERR_TXT+"Source received a series of Ctrl+C hits. Quitting..." +bcolors.ENDC)
sys.exit(1)
# End ofloader/spinner class
# Instantiating the spinner/loader class
spinner = Spinner()
# Scanners that will be used and filename rotation (default: enabled (1))
tool_names = [
["host","Host - Checks for existence of IPV6 address.","host",1],
["aspnet_config_err","ASP.Net Misconfiguration - Checks for ASP.Net Misconfiguration.","wget",1],
["wp_check","WordPress Checker - Checks for WordPress Installation.","wget",1],
["drp_check", "Drupal Checker - Checks for Drupal Installation.","wget",1],
["joom_check", "Joomla Checker - Checks for Joomla Installation.","wget",1],
["uniscan","Uniscan - Checks for robots.txt & sitemap.xml","uniscan",1],
["wafw00f","Wafw00f - Checks for Application Firewalls.","wafw00f",1],
["nmap","Nmap - Fast Scan [Only Few Port Checks]","nmap",1],
["theharvester","The Harvester - Scans for emails using Google's passive search.","theharvester",1],
["dnsrecon","DNSRecon - Attempts Multiple Zone Transfers on Nameservers.","dnsrecon",1],
["fierce","Fierce - Attempts Zone Transfer [No Brute Forcing]","fierce",1],
["dnswalk","DNSWalk - Attempts Zone Transfer.","dnswalk",1],
["whois","WHOis - Checks for Administrator's Contact Information.","whois",1],
["nmap_header","Nmap [XSS Filter Check] - Checks if XSS Protection Header is present.","nmap",1],
["nmap_sloris","Nmap [Slowloris DoS] - Checks for Slowloris Denial of Service Vulnerability.","nmap",1],
["sslyze_hbleed","SSLyze - Checks only for Heartbleed Vulnerability.","sslyze",1],
["nmap_hbleed","Nmap [Heartbleed] - Checks only for Heartbleed Vulnerability.","nmap",1],
["nmap_poodle","Nmap [POODLE] - Checks only for Poodle Vulnerability.","nmap",1],
["nmap_ccs","Nmap [OpenSSL CCS Injection] - Checks only for CCS Injection.","nmap",1],
["nmap_freak","Nmap [FREAK] - Checks only for FREAK Vulnerability.","nmap",1],
["nmap_logjam","Nmap [LOGJAM] - Checks for LOGJAM Vulnerability.","nmap",1],
["sslyze_ocsp","SSLyze - Checks for OCSP Stapling.","sslyze",1],
["sslyze_zlib","SSLyze - Checks for ZLib Deflate Compression.","sslyze",1],
["sslyze_reneg","SSLyze - Checks for Secure Renegotiation Support and Client Renegotiation.","sslyze",1],
["sslyze_resum","SSLyze - Checks for Session Resumption Support with [Session IDs/TLS Tickets].","sslyze",1],
["lbd","LBD - Checks for DNS/HTTP Load Balancers.","lbd",1],
["golismero_dns_malware","Golismero - Checks if the domain is spoofed or hijacked.","golismero",1],
["golismero_heartbleed","Golismero - Checks only for Heartbleed Vulnerability.","golismero",1],
["golismero_brute_url_predictables","Golismero - BruteForces for certain files on the Domain.","golismero",1],
["golismero_brute_directories","Golismero - BruteForces for certain directories on the Domain.","golismero",1],
["golismero_sqlmap","Golismero - SQLMap [Retrieves only the DB Banner]","golismero",1],
["dirb","DirB - Brutes the target for Open Directories.","dirb",1],
["xsser","XSSer - Checks for Cross-Site Scripting [XSS] Attacks.","xsser",1],
["golismero_ssl_scan","Golismero SSL Scans - Performs SSL related Scans.","golismero",1],
["golismero_zone_transfer","Golismero Zone Transfer - Attempts Zone Transfer.","golismero",1],
["golismero_nikto","Golismero Nikto Scans - Uses Nikto Plugin to detect vulnerabilities.","golismero",1],
["golismero_brute_subdomains","Golismero Subdomains Bruter - Brute Forces Subdomain Discovery.","golismero",1],
["dnsenum_zone_transfer","DNSEnum - Attempts Zone Transfer.","dnsenum",1],
["fierce_brute_subdomains","Fierce Subdomains Bruter - Brute Forces Subdomain Discovery.","fierce",1],
["dmitry_email","DMitry - Passively Harvests Emails from the Domain.","dmitry",1],
["dmitry_subdomains","DMitry - Passively Harvests Subdomains from the Domain.","dmitry",1],
["nmap_telnet","Nmap [TELNET] - Checks if TELNET service is running.","nmap",1],
["nmap_ftp","Nmap [FTP] - Checks if FTP service is running.","nmap",1],
["nmap_stuxnet","Nmap [STUXNET] - Checks if the host is affected by STUXNET Worm.","nmap",1],
["webdav","WebDAV - Checks if WEBDAV enabled on Home directory.","davtest",1],
["golismero_finger","Golismero - Does a fingerprint on the Domain.","golismero",1],
["uniscan_filebrute","Uniscan - Brutes for Filenames on the Domain.","uniscan",1],
["uniscan_dirbrute", "Uniscan - Brutes Directories on the Domain.","uniscan",1],
["uniscan_ministresser", "Uniscan - Stress Tests the Domain.","uniscan",1],
["uniscan_rfi","Uniscan - Checks for LFI, RFI and RCE.","uniscan",1],#50
["uniscan_xss","Uniscan - Checks for XSS, SQLi, BSQLi & Other Checks.","uniscan",1],
["nikto_xss","Nikto - Checks for Apache Expect XSS Header.","nikto",1],
["nikto_subrute","Nikto - Brutes Subdomains.","nikto",1],
["nikto_shellshock","Nikto - Checks for Shellshock Bug.","nikto",1],
["nikto_internalip","Nikto - Checks for Internal IP Leak.","nikto",1],
["nikto_putdel","Nikto - Checks for HTTP PUT DEL.","nikto",1],
["nikto_headers","Nikto - Checks the Domain Headers.","nikto",1],
["nikto_ms01070","Nikto - Checks for MS10-070 Vulnerability.","nikto",1],
["nikto_servermsgs","Nikto - Checks for Server Issues.","nikto",1],
["nikto_outdated","Nikto - Checks if Server is Outdated.","nikto",1],
["nikto_httpoptions","Nikto - Checks for HTTP Options on the Domain.","nikto",1],
["nikto_cgi","Nikto - Enumerates CGI Directories.","nikto",1],
["nikto_ssl","Nikto - Performs SSL Checks.","nikto",1],
["nikto_sitefiles","Nikto - Checks for any interesting files on the Domain.","nikto",1],
["nikto_paths","Nikto - Checks for Injectable Paths.","nikto",1],
["dnsmap_brute","DNSMap - Brutes Subdomains.","dnsmap",1],
["nmap_sqlserver","Nmap - Checks for MS-SQL Server DB","nmap",1],
["nmap_mysql", "Nmap - Checks for MySQL DB","nmap",1],
["nmap_oracle", "Nmap - Checks for ORACLE DB","nmap",1],
["nmap_rdp_udp","Nmap - Checks for Remote Desktop Service over UDP","nmap",1],
["nmap_rdp_tcp","Nmap - Checks for Remote Desktop Service over TCP","nmap",1],
["nmap_full_ps_tcp","Nmap - Performs a Full TCP Port Scan","nmap",1],
["nmap_full_ps_udp","Nmap - Performs a Full UDP Port Scan","nmap",1],
["nmap_snmp","Nmap - Checks for SNMP Service","nmap",1],
["aspnet_elmah_axd","Checks for ASP.net Elmah Logger","wget",1],
["nmap_tcp_smb","Checks for SMB Service over TCP","nmap",1],
["nmap_udp_smb","Checks for SMB Service over UDP","nmap",1],
["wapiti","Wapiti - Checks for SQLi, RCE, XSS and Other Vulnerabilities","wapiti",1],
["nmap_iis","Nmap - Checks for IIS WebDAV","nmap",1],
["whatweb","WhatWeb - Checks for X-XSS Protection Header","whatweb",1]
]
# Command that is used to initiate the tool (with parameters and extra params)
tool_cmd = [
["host ",""],
["wget -O temp_aspnet_config_err --tries=1 ","/%7C~.aspx"],
["wget -O temp_wp_check --tries=1 ","/wp-admin"],
["wget -O temp_drp_check --tries=1 ","/user"],
["wget -O temp_joom_check --tries=1 ","/administrator"],
["uniscan -e -u ",""],
["wafw00f ",""],
["nmap -F --open -Pn ",""],
["theharvester -l 50 -b google -d ",""],
["dnsrecon -d ",""],
["fierce -wordlist xxx -dns ",""],
["dnswalk -d ","."],
["whois ",""],
["nmap -p80 --script http-security-headers -Pn ",""],
["nmap -p80,443 --script http-slowloris --max-parallelism 500 -Pn ",""],
["sslyze --heartbleed ",""],
["nmap -p443 --script ssl-heartbleed -Pn ",""],
["nmap -p443 --script ssl-poodle -Pn ",""],
["nmap -p443 --script ssl-ccs-injection -Pn ",""],
["nmap -p443 --script ssl-enum-ciphers -Pn ",""],
["nmap -p443 --script ssl-dh-params -Pn ",""],
["sslyze --certinfo=basic ",""],
["sslyze --compression ",""],
["sslyze --reneg ",""],
["sslyze --resum ",""],
["lbd ",""],
["golismero -e dns_malware scan ",""],
["golismero -e heartbleed scan ",""],
["golismero -e brute_url_predictables scan ",""],
["golismero -e brute_directories scan ",""],
["golismero -e sqlmap scan ",""],
["dirb http://"," -fi"],
["xsser --all=http://",""],
["golismero -e sslscan scan ",""],
["golismero -e zone_transfer scan ",""],
["golismero -e nikto scan ",""],
["golismero -e brute_dns scan ",""],
["dnsenum ",""],
["fierce -dns ",""],
["dmitry -e ",""],
["dmitry -s ",""],
["nmap -p23 --open -Pn ",""],
["nmap -p21 --open -Pn ",""],
["nmap --script stuxnet-detect -p445 -Pn ",""],
["davtest -url http://",""],
["golismero -e fingerprint_web scan ",""],
["uniscan -w -u ",""],
["uniscan -q -u ",""],
["uniscan -r -u ",""],
["uniscan -s -u ",""],
["uniscan -d -u ",""],
["nikto -Plugins 'apache_expect_xss' -host ",""],
["nikto -Plugins 'subdomain' -host ",""],
["nikto -Plugins 'shellshock' -host ",""],
["nikto -Plugins 'cookies' -host ",""],
["nikto -Plugins 'put_del_test' -host ",""],
["nikto -Plugins 'headers' -host ",""],
["nikto -Plugins 'ms10-070' -host ",""],
["nikto -Plugins 'msgs' -host ",""],
["nikto -Plugins 'outdated' -host ",""],
["nikto -Plugins 'httpoptions' -host ",""],
["nikto -Plugins 'cgi' -host ",""],
["nikto -Plugins 'ssl' -host ",""],
["nikto -Plugins 'sitefiles' -host ",""],
["nikto -Plugins 'paths' -host ",""],
["dnsmap ",""],
["nmap -p1433 --open -Pn ",""],
["nmap -p3306 --open -Pn ",""],
["nmap -p1521 --open -Pn ",""],
["nmap -p3389 --open -sU -Pn ",""],
["nmap -p3389 --open -sT -Pn ",""],
["nmap -p1-65535 --open -Pn ",""],
["nmap -p1-65535 -sU --open -Pn ",""],
["nmap -p161 -sU --open -Pn ",""],
["wget -O temp_aspnet_elmah_axd --tries=1 ","/elmah.axd"],
["nmap -p445,137-139 --open -Pn ",""],
["nmap -p137,138 --open -Pn ",""],
["wapiti "," -f txt -o temp_wapiti"],
["nmap -p80 --script=http-iis-webdav-vuln -Pn ",""],
["whatweb "," -a 1"]
]
# Tool Responses (Begins) [Responses + Severity (c - critical | h - high | m - medium | l - low | i - informational) + Reference for Vuln Definition and Remediation]
tool_resp = [
["Does not have an IPv6 Address. It is good to have one.","i",1],
["ASP.Net is misconfigured to throw server stack errors on screen.","m",2],
["WordPress Installation Found. Check for vulnerabilities corresponds to that version.","i",3],
["Drupal Installation Found. Check for vulnerabilities corresponds to that version.","i",4],
["Joomla Installation Found. Check for vulnerabilities corresponds to that version.","i",5],
["robots.txt/sitemap.xml found. Check those files for any information.","i",6],
["No Web Application Firewall Detected","m",7],
["Some ports are open. Perform a full-scan manually.","l",8],
["Email Addresses Found.","l",9],
["Zone Transfer Successful using DNSRecon. Reconfigure DNS immediately.","h",10],
["Zone Transfer Successful using fierce. Reconfigure DNS immediately.","h",10],
["Zone Transfer Successful using dnswalk. Reconfigure DNS immediately.","h",10],
["Whois Information Publicly Available.","i",11],
["XSS Protection Filter is Disabled.","m",12],
["Vulnerable to Slowloris Denial of Service.","c",13],
["HEARTBLEED Vulnerability Found with SSLyze.","h",14],
["HEARTBLEED Vulnerability Found with Nmap.","h",14],
["POODLE Vulnerability Detected.","h",15],
["OpenSSL CCS Injection Detected.","h",16],
["FREAK Vulnerability Detected.","h",17],
["LOGJAM Vulnerability Detected.","h",18],
["Unsuccessful OCSP Response.","m",19],
["Server supports Deflate Compression.","m",20],
["Secure Renegotiation is unsupported.","m",21],
["Secure Resumption unsupported with (Sessions IDs/TLS Tickets).","m",22],
["No DNS/HTTP based Load Balancers Found.","l",23],
["Domain is spoofed/hijacked.","h",24],
["HEARTBLEED Vulnerability Found with Golismero.","h",14],
["Open Files Found with Golismero BruteForce.","m",25],
["Open Directories Found with Golismero BruteForce.","m",26],
["DB Banner retrieved with SQLMap.","l",27],
["Open Directories Found with DirB.","m",26],
["XSSer found XSS vulnerabilities.","c",28],
["Found SSL related vulnerabilities with Golismero.","m",29],
["Zone Transfer Successful with Golismero. Reconfigure DNS immediately.","h",10],
["Golismero Nikto Plugin found vulnerabilities.","m",30],
["Found Subdomains with Golismero.","m",31],
["Zone Transfer Successful using DNSEnum. Reconfigure DNS immediately.","h",10],
["Found Subdomains with Fierce.","m",31],
["Email Addresses discovered with DMitry.","l",9],
["Subdomains discovered with DMitry.","m",31],
["Telnet Service Detected.","h",32],
["FTP Service Detected.","c",33],
["Vulnerable to STUXNET.","c",34],
["WebDAV Enabled.","m",35],
["Found some information through Fingerprint(ing.","l",36],
["Open Files Found with Uniscan.","m",25],
["Open Directories Found with Uniscan.","m",26],
["Vulnerable to Stress Tests.","h",37],
["Uniscan detected possible LFI, RFI or RCE.","h",38],
["Uniscan detected possible XSS, SQLi, BSQLi.","h",39],
["Apache Expect XSS Header not present.","m",12],
["Found Subdomains with Nikto.","m",31],
["Webserver vulnerable to Shellshock Bug.","c",40],
["Webserver leaks Internal IP.","l",41],
["HTTP PUT DEL Methods Enabled.","m",42],
["Some vulnerable headers exposed.","m",43],
["Webserver vulnerable to MS10-070.","h",44],
["Some issues found on the Webserver.","m",30],
["Webserver is Outdated.","h",45],
["Some issues found with HTTP Options.","l",42],
["CGI Directories Enumerated.","l",26],
["Vulnerabilities reported in SSL Scans.","m",29],
["Interesting Files Detected.","m",25],
["Injectable Paths Detected.","l",46],
["Found Subdomains with DNSMap.","m",31],
["MS-SQL DB Service Detected.","l",47],
["MySQL DB Service Detected.","l",47],
["ORACLE DB Service Detected.","l",47],
["RDP Server Detected over UDP.","h",48],
["RDP Server Detected over TCP.","h",48],
["TCP Ports are Open","l",8],
["UDP Ports are Open","l",8],
["SNMP Service Detected.","m",49],
["Elmah is Configured.","m",50],
["SMB Ports are Open over TCP","m",51],
["SMB Ports are Open over UDP","m",51],
["Wapiti discovered a range of vulnerabilities","h",30],
["IIS WebDAV is Enabled","m",35],
["X-XSS Protection is not Present","m",12]
]
# Tool Responses (Ends)
# Tool Status (Response Data + Response Code (if status check fails and you still got to push it + Legends + Approx Time + Tool Identification + Bad Responses)
tool_status = [
["has IPv6",1,proc_low," < 15s","ipv6",["not found","has IPv6"]],
["Server Error",0,proc_low," < 30s","asp.netmisconf",["unable to resolve host address","Connection timed out"]],
["wp-login",0,proc_low," < 30s","wpcheck",["unable to resolve host address","Connection timed out"]],
["drupal",0,proc_low," < 30s","drupalcheck",["unable to resolve host address","Connection timed out"]],
["joomla",0,proc_low," < 30s","joomlacheck",["unable to resolve host address","Connection timed out"]],
["[+]",0,proc_low," < 40s","robotscheck",["Use of uninitialized value in unpack at"]],
["No WAF",0,proc_low," < 45s","wafcheck",["appears to be down"]],
["tcp open",0,proc_med," < 2m","nmapopen",["Failed to resolve"]],
["No emails found",1,proc_med," < 3m","harvester",["No hosts found","No emails found"]],
["[+] Zone Transfer was successful!!",0,proc_low," < 20s","dnsreconzt",["Could not resolve domain"]],
["Whoah, it worked",0,proc_low," < 30s","fiercezt",["none"]],
["0 errors",0,proc_low," < 35s","dnswalkzt",["!!!0 failures, 0 warnings, 3 errors."]],
["Admin Email:",0,proc_low," < 25s","whois",["No match for domain"]],
["XSS filter is disabled",0,proc_low," < 20s","nmapxssh",["Failed to resolve"]],
["VULNERABLE",0,proc_high," < 45m","nmapdos",["Failed to resolve"]],
["Server is vulnerable to Heartbleed",0,proc_low," < 40s","sslyzehb",["Could not resolve hostname"]],
["VULNERABLE",0,proc_low," < 30s","nmap1",["Failed to resolve"]],
["VULNERABLE",0,proc_low," < 35s","nmap2",["Failed to resolve"]],
["VULNERABLE",0,proc_low," < 35s","nmap3",["Failed to resolve"]],
["VULNERABLE",0,proc_low," < 30s","nmap4",["Failed to resolve"]],
["VULNERABLE",0,proc_low," < 35s","nmap5",["Failed to resolve"]],
["ERROR - OCSP response status is not successful",0,proc_low," < 25s","sslyze1",["Could not resolve hostname"]],
["VULNERABLE",0,proc_low," < 30s","sslyze2",["Could not resolve hostname"]],
["VULNERABLE",0,proc_low," < 25s","sslyze3",["Could not resolve hostname"]],
["VULNERABLE",0,proc_low," < 30s","sslyze4",["Could not resolve hostname"]],
["does NOT use Load-balancing",0,proc_med," < 4m","lbd",["NOT FOUND"]],
["No vulnerabilities found",1,proc_low," < 45s","golism1",["Cannot resolve domain name","No vulnerabilities found"]],
["No vulnerabilities found",1,proc_low," < 40s","golism2",["Cannot resolve domain name","No vulnerabilities found"]],
["No vulnerabilities found",1,proc_low," < 45s","golism3",["Cannot resolve domain name","No vulnerabilities found"]],
["No vulnerabilities found",1,proc_low," < 40s","golism4",["Cannot resolve domain name","No vulnerabilities found"]],
["No vulnerabilities found",1,proc_low," < 45s","golism5",["Cannot resolve domain name","No vulnerabilities found"]],
["FOUND: 0",1,proc_high," < 35m","dirb",["COULDNT RESOLVE HOST","FOUND: 0"]],
["Could not find any vulnerability!",1,proc_med," < 4m","xsser",["XSSer is not working propertly!","Could not find any vulnerability!"]],
["Occurrence ID",0,proc_low," < 45s","golism6",["Cannot resolve domain name"]],
["DNS zone transfer successful",0,proc_low," < 30s","golism7",["Cannot resolve domain name"]],
["Nikto found 0 vulnerabilities",1,proc_med," < 4m","golism8",["Cannot resolve domain name","Nikto found 0 vulnerabilities"]],
["Possible subdomain leak",0,proc_high," < 30m","golism9",["Cannot resolve domain name"]],
["AXFR record query failed:",1,proc_low," < 45s","dnsenumzt",["NS record query failed:","AXFR record query failed","no NS record for"]],
["Found 0 entries",1,proc_high," < 75m","fierce2",["Found 0 entries","is gimp"]],
["Found 0 E-Mail(s)",1,proc_low," < 30s","dmitry1",["Unable to locate Host IP addr","Found 0 E-Mail(s)"]],
["Found 0 possible subdomain(s)",1,proc_low," < 35s","dmitry2",["Unable to locate Host IP addr","Found 0 possible subdomain(s)"]],
["open",0,proc_low," < 15s","nmaptelnet",["Failed to resolve"]],
["open",0,proc_low," < 15s","nmapftp",["Failed to resolve"]],
["open",0,proc_low," < 20s","nmapstux",["Failed to resolve"]],
["SUCCEED",0,proc_low," < 30s","webdav",["is not DAV enabled or not accessible."]],
["No vulnerabilities found",1,proc_low," < 15s","golism10",["Cannot resolve domain name","No vulnerabilities found"]],
["[+]",0,proc_med," < 2m","uniscan2",["Use of uninitialized value in unpack at"]],
["[+]",0,proc_med," < 5m","uniscan3",["Use of uninitialized value in unpack at"]],
["[+]",0,proc_med," < 9m","uniscan4",["Use of uninitialized value in unpack at"]],
["[+]",0,proc_med," < 8m","uniscan5",["Use of uninitialized value in unpack at"]],
["[+]",0,proc_med," < 9m","uniscan6",["Use of uninitialized value in unpack at"]],
["0 item(s) reported",1,proc_low," < 35s","nikto1",["ERROR: Cannot resolve hostname","0 item(s) reported","No web server found","0 host(s) tested"]],
["0 item(s) reported",1,proc_low," < 35s","nikto2",["ERROR: Cannot resolve hostname","0 item(s) reported","No web server found","0 host(s) tested"]],
["0 item(s) reported",1,proc_low," < 35s","nikto3",["ERROR: Cannot resolve hostname","0 item(s) reported","No web server found","0 host(s) tested"]],
["0 item(s) reported",1,proc_low," < 35s","nikto4",["ERROR: Cannot resolve hostname","0 item(s) reported","No web server found","0 host(s) tested"]],
["0 item(s) reported",1,proc_low," < 35s","nikto5",["ERROR: Cannot resolve hostname","0 item(s) reported","No web server found","0 host(s) tested"]],
["0 item(s) reported",1,proc_low," < 35s","nikto6",["ERROR: Cannot resolve hostname","0 item(s) reported","No web server found","0 host(s) tested"]],
["0 item(s) reported",1,proc_low," < 35s","nikto7",["ERROR: Cannot resolve hostname","0 item(s) reported","No web server found","0 host(s) tested"]],
["0 item(s) reported",1,proc_low," < 35s","nikto8",["ERROR: Cannot resolve hostname","0 item(s) reported","No web server found","0 host(s) tested"]],
["0 item(s) reported",1,proc_low," < 35s","nikto9",["ERROR: Cannot resolve hostname","0 item(s) reported","No web server found","0 host(s) tested"]],
["0 item(s) reported",1,proc_low," < 35s","nikto10",["ERROR: Cannot resolve hostname","0 item(s) reported","No web server found","0 host(s) tested"]],
["0 item(s) reported",1,proc_low," < 35s","nikto11",["ERROR: Cannot resolve hostname","0 item(s) reported","No web server found","0 host(s) tested"]],
["0 item(s) reported",1,proc_low," < 35s","nikto12",["ERROR: Cannot resolve hostname","0 item(s) reported","No web server found","0 host(s) tested"]],
["0 item(s) reported",1,proc_low," < 35s","nikto13",["ERROR: Cannot resolve hostname","0 item(s) reported","No web server found","0 host(s) tested"]],
["0 item(s) reported",1,proc_low," < 35s","nikto14","ERROR: Cannot resolve hostname , 0 item(s) reported"],
["#1",0,proc_high," < 30m","dnsmap_brute",["[+] 0 (sub)domains and 0 IP address(es) found"]],
["open",0,proc_low," < 15s","nmapmssql",["Failed to resolve"]],
["open",0,proc_low," < 15s","nmapmysql",["Failed to resolve"]],
["open",0,proc_low," < 15s","nmaporacle",["Failed to resolve"]],
["open",0,proc_low," < 15s","nmapudprdp",["Failed to resolve"]],
["open",0,proc_low," < 15s","nmaptcprdp",["Failed to resolve"]],
["open",0,proc_high," > 50m","nmapfulltcp",["Failed to resolve"]],
["open",0,proc_high," > 75m","nmapfulludp",["Failed to resolve"]],
["open",0,proc_low," < 30s","nmapsnmp",["Failed to resolve"]],
["Microsoft SQL Server Error Log",0,proc_low," < 30s","elmahxd",["unable to resolve host address","Connection timed out"]],
["open",0,proc_low," < 20s","nmaptcpsmb",["Failed to resolve"]],
["open",0,proc_low," < 20s","nmapudpsmb",["Failed to resolve"]],
["Host:",0,proc_med," < 5m","wapiti",["none"]],
["WebDAV is ENABLED",0,proc_low," < 40s","nmapwebdaviis",["Failed to resolve"]],
["X-XSS-Protection[1",1,proc_med," < 3m","whatweb",["Timed out","Socket error","X-XSS-Protection[1"]]
]
# Vulnerabilities and Remediation
tools_fix = [
[1, "Not a vulnerability, just an informational alert. The host does not have IPv6 support. IPv6 provides more security as IPSec (responsible for CIA - Confidentiality, Integrity and Availablity) is incorporated into this model. So it is good to have IPv6 Support.",
"It is recommended to implement IPv6. More information on how to implement IPv6 can be found from this resource. https://www.cisco.com/c/en/us/solutions/collateral/enterprise/cisco-on-cisco/IPv6-Implementation_CS.html"],
[2, "Sensitive Information Leakage Detected. The ASP.Net application does not filter out illegal characters in the URL. The attacker injects a special character (%7C~.aspx) to make the application spit sensitive information about the server stack.",
"It is recommended to filter out special charaters in the URL and set a custom error page on such situations instead of showing default error messages. This resource helps you in setting up a custom error page on a Microsoft .Net Application. https://docs.microsoft.com/en-us/aspnet/web-forms/overview/older-versions-getting-started/deploying-web-site-projects/displaying-a-custom-error-page-cs"],
[3, "It is not bad to have a CMS in WordPress. There are chances that the version may contain vulnerabilities or any third party scripts associated with it may possess vulnerabilities",
"It is recommended to conceal the version of WordPress. This resource contains more information on how to secure your WordPress Blog. https://codex.wordpress.org/Hardening_WordPress"],
[4, "It is not bad to have a CMS in Drupal. There are chances that the version may contain vulnerabilities or any third party scripts associated with it may possess vulnerabilities",
"It is recommended to conceal the version of Drupal. This resource contains more information on how to secure your Drupal Blog. https://www.drupal.org/docs/7/site-building-best-practices/ensure-that-your-site-is-secure"],
[5, "It is not bad to have a CMS in Joomla. There are chances that the version may contain vulnerabilities or any third party scripts associated with it may possess vulnerabilities",
"It is recommended to conceal the version of Joomla. This resource contains more information on how to secure your Joomla Blog. https://www.incapsula.com/blog/10-tips-to-improve-your-joomla-website-security.html"],
[6, "Sometimes robots.txt or sitemap.xml may contain rules such that certain links that are not supposed to be accessed/indexed by crawlers and search engines. Search engines may skip those links but attackers will be able to access it directly.",
"It is a good practice not to include sensitive links in the robots or sitemap files."],
[7, "Without a Web Application Firewall, An attacker may try to inject various attack patterns either manually or using automated scanners. An automated scanner may send hordes of attack vectors and patterns to validate an attack, there are also chances for the application to get DoS`ed (Denial of Service)",
"Web Application Firewalls offer great protection against common web attacks like XSS, SQLi, etc. They also provide an additional line of defense to your security infrastructure. This resource contains information on web application firewalls that could suit your application. https://www.gartner.com/reviews/market/web-application-firewall"],
[8, "Open Ports give attackers a hint to exploit the services. Attackers try to retrieve banner information through the ports and understand what type of service the host is running",
"It is recommended to close the ports of unused services and use a firewall to filter the ports wherever necessary. This resource may give more insights. https://security.stackexchange.com/a/145781/6137"],
[9, "Chances are very less to compromise a target with email addresses. However, attackers use this as a supporting data to gather information around the target. An attacker may make use of the username on the email address and perform brute-force attacks on not just email servers, but also on other legitimate panels like SSH, CMS, etc with a password list as they have a legitimate name. This is however a shoot in the dark scenario, the attacker may or may not be successful depending on the level of interest",
"Since the chances of exploitation is feeble there is no need to take action. Perfect remediation would be choosing different usernames for different services will be more thoughtful."],
[10, "Zone Transfer reveals critical topological information about the target. The attacker will be able to query all records and will have more or less complete knowledge about your host.",
"Good practice is to restrict the Zone Transfer by telling the Master which are the IPs of the slaves that can be given access for the query. This SANS resource provides more information. https://www.sans.org/reading-room/whitepapers/dns/securing-dns-zone-transfer-868"],
[11, "The email address of the administrator and other information (address, phone, etc) is available publicly. An attacker may use these information to leverage an attack. This may not be used to carry out a direct attack as this is not a vulnerability. However, an attacker makes use of these data to build information about the target.",
"Some administrators intentionally would have made this information public, in this case it can be ignored. If not, it is recommended to mask the information. This resource provides information on this fix. http://www.name.com/blog/how-tos/tutorial-2/2013/06/protect-your-personal-information-with-whois-privacy/"],
[12, "As the target is lacking this header, older browsers will be prone to Reflected XSS attacks.",
"Modern browsers does not face any issues with this vulnerability (missing headers). However, older browsers are strongly recommended to be upgraded."],
[13, "This attack works by opening multiple simultaneous connections to the web server and it keeps them alive as long as possible by continously sending partial HTTP requests, which never gets completed. They easily slip through IDS by sending partial requests.",
"If you are using Apache Module, `mod_antiloris` would help. For other setup you can find more detailed remediation on this resource. https://www.acunetix.com/blog/articles/slow-http-dos-attacks-mitigate-apache-http-server/"],
[14, "This vulnerability seriously leaks private information of your host. An attacker can keep the TLS connection alive and can retrieve a maximum of 64K of data per heartbeat.",
"PFS (Perfect Forward Secrecy) can be implemented to make decryption difficult. Complete remediation and resource information is available here. http://heartbleed.com/"],
[15, "By exploiting this vulnerability, an attacker will be able gain access to sensitive data in a n encrypted session such as session ids, cookies and with those data obtained, will be able to impersonate that particular user.",
"This is a flaw in the SSL 3.0 Protocol. A better remediation would be to disable using the SSL 3.0 protocol. For more information, check this resource. https://www.us-cert.gov/ncas/alerts/TA14-290A"],
[16, "This attacks takes place in the SSL Negotiation (Handshake) which makes the client unaware of the attack. By successfully altering the handshake, the attacker will be able to pry on all the information that is sent from the client to server and vice-versa",
"Upgrading OpenSSL to latest versions will mitigate this issue. This resource gives more information about the vulnerability and the associated remediation. http://ccsinjection.lepidum.co.jp/"],
[17, "With this vulnerability the attacker will be able to perform a MiTM attack and thus compromising the confidentiality factor.",
"Upgrading OpenSSL to latest version will mitigate this issue. Versions prior to 1.1.0 is prone to this vulnerability. More information can be found in this resource. https://bobcares.com/blog/how-to-fix-sweet32-birthday-attacks-vulnerability-cve-2016-2183/"],
[18, "With the LogJam attack, the attacker will be able to downgrade the TLS connection which allows the attacker to read and modify any data passed over the connection.",
"Make sure any TLS libraries you use are up-to-date, that servers you maintain use 2048-bit or larger primes, and that clients you maintain reject Diffie-Hellman primes smaller than 1024-bit. More information can be found in this resource. https://weakdh.org/"],
[19, "Allows remote attackers to cause a denial of service (crash), and possibly obtain sensitive information in applications that use OpenSSL, via a malformed ClientHello handshake message that triggers an out-of-bounds memory access.",
" OpenSSL versions 0.9.8h through 0.9.8q and 1.0.0 through 1.0.0c are vulnerable. It is recommended to upgrade the OpenSSL version. More resource and information can be found here. https://www.openssl.org/news/secadv/20110208.txt"],
[20, "Otherwise termed as BREACH atack, exploits the compression in the underlying HTTP protocol. An attacker will be able to obtain email addresses, session tokens, etc from the TLS encrypted web traffic.",
"Turning off TLS compression does not mitigate this vulnerability. First step to mitigation is to disable Zlib compression followed by other measures mentioned in this resource. http://breachattack.com/"],
[21, "Otherwise termed as Plain-Text Injection attack, which allows MiTM attackers to insert data into HTTPS sessions, and possibly other types of sessions protected by TLS or SSL, by sending an unauthenticated request that is processed retroactively by a server in a post-renegotiation context.",
"Detailed steps of remediation can be found from these resources. https://securingtomorrow.mcafee.com/technical-how-to/tips-securing-ssl-renegotiation/ https://www.digicert.com/news/2011-06-03-ssl-renego/ "],
[22, "This vulnerability allows attackers to steal existing TLS sessions from users.",
"Better advice is to disable session resumption. To harden session resumption, follow this resource that has some considerable information. https://wiki.crashtest-security.com/display/KB/Harden+TLS+Session+Resumption"],
[23, "This has nothing to do with security risks, however attackers may use this unavailability of load balancers as an advantage to leverage a denial of service attack on certain services or on the whole application itself.",
"Load-Balancers are highly encouraged for any web application. They improve performance times as well as data availability on during times of server outage. To know more information on load balancers and setup, check this resource. https://www.digitalocean.com/community/tutorials/what-is-load-balancing"],
[24, "An attacker can forwarded requests that comes to the legitimate URL or web application to a third party address or to the attacker's location that can serve malware and affect the end user's machine.",
"It is highly recommended to deploy DNSSec on the host target. Full deployment of DNSSEC will ensure the end user is connecting to the actual web site or other service corresponding to a particular domain name. For more information, check this resource. https://www.cloudflare.com/dns/dnssec/how-dnssec-works/"],
[25, "Attackers may find considerable amount of information from these files. There are even chances attackers may get access to critical information from these files.",
"It is recommended to block or restrict access to these files unless necessary."],
[26, "Attackers may find considerable amount of information from these directories. There are even chances attackers may get access to critical information from these directories.",
"It is recommended to block or restrict access to these directories unless necessary."],
[27, "May not be SQLi vulnerable. An attacker will be able to know that the host is using a backend for operation.",
"Banner Grabbing should be restricted and access to the services from outside would should be made minimum."],
[28, "An attacker will be able to steal cookies, deface web application or redirect to any third party address that can serve malware.",
"Input validation and Output Sanitization can completely prevent Cross Site Scripting (XSS) attacks. XSS attacks can be mitigated in future by properly following a secure coding methodology. The following comprehensive resource provides detailed information on fixing this vulnerability. https://www.owasp.org/index.php/XSS_(Cross_Site_Scripting)_Prevention_Cheat_Sheet"],
[29, "SSL related vulnerabilities breaks the confidentiality factor. An attacker may perform a MiTM attack, intrepret and eavesdrop the communication.",
"Proper implementation and upgraded version of SSL and TLS libraries are very critical when it comes to blocking SSL related vulnerabilities."],
[30, "Particular Scanner found multiple vulnerabilities that an attacker may try to exploit the target.",
"Refer to Vulnerous-Vulnerability-Report to view the complete information of the vulnerability, once the scan gets completed."],
[31, "Attackers may gather more information from subdomains relating to the parent domain. Attackers may even find other services from the subdomains and try to learn the architecture of the target. There are even chances for the attacker to find vulnerabilities as the attack surface gets larger with more subdomains discovered.",
"It is sometimes wise to block sub domains like development, staging to the outside world, as it gives more information to the attacker about the tech stack. Complex naming practices also help in reducing the attack surface as attackers find hard to perform subdomain bruteforcing through dictionaries and wordlists."],
[32, "Through this deprecated protocol, an attacker may be able to perform MiTM and other complicated attacks.",
"It is highly recommended to stop using this service and it is far outdated. SSH can be used to replace TELNET. For more information, check this resource https://www.ssh.com/ssh/telnet"],
[33, "This protocol does not support secure communication and there are likely high chances for the attacker to eavesdrop the communication. Also, many FTP programs have exploits available in the web such that an attacker can directly crash the application or either get a SHELL access to that target.",
"Proper suggested fix is use an SSH protocol instead of FTP. It supports secure communication and chances for MiTM attacks are quite rare."],
[34, "The StuxNet is level-3 worm that exposes critical information of the target organization. It was a cyber weapon that was designed to thwart the nuclear intelligence of Iran. Seriously wonder how it got here? Hope this isn't a false positive Nmap ;)",
"It is highly recommended to perform a complete rootkit scan on the host. For more information refer to this resource. https://www.symantec.com/security_response/writeup.jsp?docid=2010-071400-3123-99&tabid=3"],
[35, "WebDAV is supposed to contain multiple vulnerabilities. In some case, an attacker may hide a malicious DLL file in the WebDAV share however, and upon convincing the user to open a perfectly harmless and legitimate file, execute code under the context of that user",
"It is recommended to disable WebDAV. Some critical resource regarding disbling WebDAV can be found on this URL. https://www.networkworld.com/article/2202909/network-security/-webdav-is-bad---says-security-researcher.html"],
[36, "Attackers always do a fingerprint( of any server before they launch an attack. Fingerprint(ing gives them information about the server type, content- they are serving, last modification times etc, this gives an attacker to learn more information about the target",
"A good practice is to obfuscate the information to outside world. Doing so, the attackers will have tough time understanding the server's tech stack and therefore leverage an attack."],
[37, "Attackers mostly try to render web applications or service useless by flooding the target, such that blocking access to legitimate users. This may affect the business of a company or organization as well as the reputation",
"By ensuring proper load balancers in place, configuring rate limits and multiple connection restrictions, such attacks can be drastically mitigated."],
[38, "Intruders will be able to remotely include shell files and will be able to access the core file system or they will be able to read all the files as well. There are even higher chances for the attacker to remote execute code on the file system.",
"Secure code practices will mostly prevent LFI, RFI and RCE attacks. The following resource gives a detailed insight on secure coding practices. https://wiki.sei.cmu.edu/confluence/display/seccode/Top+10+Secure+Coding+Practices"],
[39, "Hackers will be able to steal data from the backend and also they can authenticate themselves to the website and can impersonate as any user since they have total control over the backend. They can even wipe out the entire database. Attackers can also steal cookie information of an authenticated user and they can even redirect the target to any malicious address or totally deface the application.",
"Proper input validation has to be done prior to directly querying the database information. A developer should remember not to trust an end-user's input. By following a secure coding methodology attacks like SQLi, XSS and BSQLi. The following resource guides on how to implement secure coding methodology on application development. https://wiki.sei.cmu.edu/confluence/display/seccode/Top+10+Secure+Coding+Practices"],
[40, "Attackers exploit the vulnerability in BASH to perform remote code execution on the target. An experienced attacker can easily take over the target system and access the internal sources of the machine",
"This vulnerability can be mitigated by patching the version of BASH. The following resource gives an indepth analysis of the vulnerability and how to mitigate it. https://www.symantec.com/connect/blogs/shellshock-all-you-need-know-about-bash-bug-vulnerability https://www.digitalocean.com/community/tutorials/how-to-protect-your-server-against-the-shellshock-bash-vulnerability"],
[41, "Gives attacker an idea on how the address scheming is done internally on the organizational network. Discovering the private addresses used within an organization can help attackers in carrying out network-layer attacks aiming to penetrate the organization's internal infrastructure.",
"Restrict the banner information to the outside world from the disclosing service. More information on mitigating this vulnerability can be found here. https://portswigger.net/kb/issues/00600300_private-ip-addresses-disclosed"],
[42, "There are chances for an attacker to manipulate files on the webserver.",
"It is recommended to disable the HTTP PUT and DEL methods incase if you don't use any REST API Services. Following resources helps you how to disable these methods. http://www.techstacks.com/howto/disable-http-methods-in-tomcat.html https://docs.oracle.com/cd/E19857-01/820-5627/gghwc/index.html https://developer.ibm.com/answers/questions/321629/how-to-disable-http-methods-head-put-delete-option/"],
[43, "Attackers try to learn more about the target from the amount of information exposed in the headers. An attacker may know what type of tech stack a web application is emphasizing and many other information.",
"Banner Grabbing should be restricted and access to the services from outside would should be made minimum."],
[44, "An attacker who successfully exploited this vulnerability could read data, such as the view state, which was encrypted by the server. This vulnerability can also be used for data tampering, which, if successfully exploited, could be used to decrypt and tamper with the data encrypted by the server.",
"Microsoft has released a set of patches on their website to mitigate this issue. The information required to fix this vulnerability can be inferred from this resource. https://docs.microsoft.com/en-us/security-updates/securitybulletins/2010/ms10-070"],
[45, "Any outdated web server may contain multiple vulnerabilities as their support would've been ended. An attacker may make use of such an opportunity to leverage attacks.",
"It is highly recommended to upgrade the web server to the available latest version."],
[46, "Hackers will be able to manipulate the URLs easily through a GET/POST request. They will be able to inject multiple attack vectors in the URL with ease and able to monitor the response as well",
"By ensuring proper sanitization techniques and employing secure coding practices it will be impossible for the attacker to penetrate through. The following resource gives a detailed insight on secure coding practices. https://wiki.sei.cmu.edu/confluence/display/seccode/Top+10+Secure+Coding+Practices"],
[47, "Since the attacker has knowledge about the particular type of backend the target is running, they will be able to launch a targetted exploit for the particular version. They may also try to authenticate with default credentials to get themselves through.",
"Timely security patches for the backend has to be installed. Default credentials has to be changed. If possible, the banner information can be changed to mislead the attacker. The following resource gives more information on how to secure your backend. http://kb.bodhost.com/secure-database-server/"],
[48, "Attackers may launch remote exploits to either crash the service or tools like ncrack to try brute-forcing the password on the target.",
"It is recommended to block the service to outside world and made the service accessible only through the a set of allowed IPs only really neccessary. The following resource provides insights on the risks and as well as the steps to block the service. https://www.perspectiverisk.com/remote-desktop-service-vulnerabilities/"],
[49, "Hackers will be able to read community strings through the service and enumerate quite an information from the target. Also, there are multiple Remote Code Execution and Denial of Service vulnerabilities related to SNMP services.",
"Use a firewall to block the ports from the outside world. The following article gives wide insight on locking down SNMP service. https://www.techrepublic.com/article/lock-it-down-dont-allow-snmp-to-compromise-network-security/"],
[50, "Attackers will be able to find the logs and error information generated by the application. They will also be able to see the status codes that was generated on the application. By combining all these information, the attacker will be able to leverage an attack.",
"By restricting access to the logger application from the outside world will be more than enough to mitigate this weakness."],
[51, "Cyber Criminals mainly target this service as it is very easier for them to perform a remote attack by running exploits. WannaCry Ransomware is one such example.",
"Exposing SMB Service to the outside world is a bad idea, it is recommended to install latest patches for the service in order not to get compromised. The following resource provides a detailed information on SMB Hardening concepts. https://kb.iweb.com/hc/en-us/articles/115000274491-Securing-Windows-SMB-and-NetBios-NetBT-Services"]
]
#vul_remed_info('c',50)
#sys.exit(1)
# Tool Set
tools_precheck = [
["wapiti"], ["whatweb"], ["nmap"], ["golismero"], ["host"], ["wget"], ["uniscan"], ["wafw00f"], ["dirb"], ["davtest"], ["theharvester"], ["xsser"], ["dnsrecon"],["fierce"], ["dnswalk"], ["whois"], ["sslyze"], ["lbd"], ["golismero"], ["dnsenum"],["dmitry"], ["davtest"], ["nikto"], ["dnsmap"]
]
# Shuffling Scan Order (starts)
scan_shuffle = list(zip(tool_names, tool_cmd, tool_resp, tool_status))
random.shuffle(scan_shuffle)
tool_names, tool_cmd, tool_resp, tool_status = zip(*scan_shuffle)
tool_checks = (len(tool_names) + len(tool_resp) + len(tool_status)) / 3 # Cross verification incase, breaks.
# Shuffling Scan Order (ends)
# Tool Head Pointer: (can be increased but certain tools will be skipped)
tool = 0
# Run Test
runTest = 1
# For accessing list/dictionary elements
arg1 = 0
arg2 = 1
arg3 = 2
arg4 = 3
arg5 = 4
arg6 = 5
# Detected Vulnerabilities [will be dynamically populated]
rs_vul_list = list()
rs_vul_num = 0
rs_vul = 0
# Total Time Elapsed
rs_total_elapsed = 0
# Tool Pre Checker
rs_avail_tools = 0
# Checks Skipped
rs_skipped_checks = 0
if len(sys.argv) == 1 :
logo()
helper()
else:
target = sys.argv[1].lower()
if target == '--update' or target == '-u' or target == '--u':
logo()
print("Source is updating....Please wait.\n")
spinner.start()
# Checking internet connectivity first...
rs_internet_availability = check_internet()
if rs_internet_availability == 0:
print( "\t"+ bcolors.BG_ERR_TXT + "There seems to be some problem connecting to the internet. Please try again or later." +bcolors.ENDC)
spinner.stop()
sys.exit(1)
cmd = 'sha1sum Source.py | grep .... | cut -c 1-40'
oldversion_hash = subprocess.check_output(cmd, shell=True)
oldversion_hash = oldversion_hash.strip()
os.system('wget -N https://raw.githubusercontent.com/ScorchingShade/Vulnerous-web/master/Source.py -O Source.py > /dev/null 2>&1')
newversion_hash = subprocess.check_output(cmd, shell=True)
newversion_hash = newversion_hash.strip()
if oldversion_hash == newversion_hash :
clear()
print( "\t"+ bcolors.OKBLUE +"You already have the latest version of Source." + bcolors.ENDC)
else:
clear()
print( "\t"+ bcolors.OKGREEN +"Source successfully updated to the latest version." +bcolors.ENDC)
spinner.stop()
sys.exit(1)
elif target == '--help' or target == '-h' or target == '--h':
logo()
helper()
sys.exit(1)
else:
target = url_maker(target)
os.system('rm te* > /dev/null 2>&1') # Clearing previous scan files
os.system('clear')
os.system('setterm -cursor off')
logo()
print( bcolors.BG_HEAD_TXT+"[ Checking Available Security Scanning Tools Phase... Initiated. ]"+bcolors.ENDC)
unavail_tools = 0
unavail_tools_names = list()
while (rs_avail_tools < len(tools_precheck)):
precmd = str(tools_precheck[rs_avail_tools][arg1])
try:
p = subprocess.Popen([precmd], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,shell=True)
output, err = p.communicate()
val = output
val=val+err
except:
print( "\t"+bcolors.BG_ERR_TXT+"Source was terminated abruptly..."+bcolors.ENDC)
sys.exit(1)
if (b"not found" in val):
print( "\t"+bcolors.OKBLUE+tools_precheck[rs_avail_tools][arg1]+bcolors.ENDC+bcolors.BADFAIL+"...unavailable."+bcolors.ENDC)
for scanner_index, scanner_val in enumerate(tool_names):
if scanner_val[2] == tools_precheck[rs_avail_tools][arg1]:
scanner_val[3] = 0 # disabling scanner as it's not available.
unavail_tools_names.append(tools_precheck[rs_avail_tools][arg1])
unavail_tools = unavail_tools + 1
else:
print( "\t"+bcolors.OKBLUE+tools_precheck[rs_avail_tools][arg1]+bcolors.ENDC+bcolors.OKGREEN+"...available."+bcolors.ENDC)
rs_avail_tools = rs_avail_tools + 1
clear()
unavail_tools_names = list(set(unavail_tools_names))
if unavail_tools == 0:
print( "\t"+bcolors.OKGREEN+"All Scanning Tools are available. All vulnerability checks will be performed by Source."+bcolors.ENDC)
else:
print( "\t"+bcolors.WARNING+"Some of these tools "+bcolors.BADFAIL+str(unavail_tools_names)+bcolors.ENDC+bcolors.WARNING+" are unavailable. Source can still perform tests by excluding these tools from the tests. Please install these tools to fully utilize the functionality of Source."+bcolors.ENDC)
print( bcolors.BG_ENDL_TXT+"[ Checking Available Security Scanning Tools Phase... Completed. ]"+bcolors.ENDC)
print( "\n")
print( bcolors.BG_HEAD_TXT+"[ Preliminary Scan Phase Initiated... Loaded "+str(tool_checks)+" vulnerability checks. ]"+bcolors.ENDC)
#while (tool < 1):
while(tool < len(tool_names)):
print( "["+tool_status[tool][arg3]+tool_status[tool][arg4]+"] Deploying "+str(tool+1)+"/"+str(tool_checks)+" | "+bcolors.OKBLUE+tool_names[tool][arg2]+bcolors.ENDC),
if tool_names[tool][arg4] == 0:
print( bcolors.WARNING+"...Scanning Tool Unavailable. Auto-Skipping Test..."+bcolors.ENDC)
rs_skipped_checks = rs_skipped_checks + 1
tool = tool + 1
continue
spinner.start()
scan_start = time.time()
temp_file = "temp_"+tool_names[tool][arg1]
cmd = tool_cmd[tool][arg1]+target+tool_cmd[tool][arg2]+" > "+temp_file+" 2>&1"
try:
subprocess.check_output(cmd, shell=True)
except KeyboardInterrupt:
runTest = 0
except:
runTest = 1
if runTest == 1:
spinner.stop()
scan_stop = time.time()
elapsed = scan_stop - scan_start
rs_total_elapsed = rs_total_elapsed + elapsed
print( bcolors.OKBLUE+"\b...Completed in "+display_time(int(elapsed))+bcolors.ENDC+"\n")
clear()
rs_tool_output_file = open(temp_file).read()
if tool_status[tool][arg2] == 0:
if tool_status[tool][arg1].lower() in rs_tool_output_file.lower():
#print( "\t"+ vul_info(tool_resp[tool][arg2]) + bcolors.BADFAIL +" "+ tool_resp[tool][arg1] + bcolors.ENDC)
vul_remed_info(tool,tool_resp[tool][arg2],tool_resp[tool][arg3])
rs_vul_list.append(tool_names[tool][arg1]+"*"+tool_names[tool][arg2])
else:
if any(i in rs_tool_output_file for i in tool_status[tool][arg6]):
m = 1 # This does nothing.
else:
#print( "\t"+ vul_info(tool_resp[tool][arg2]) + bcolors.BADFAIL +" "+ tool_resp[tool][arg1] + bcolors.ENDC)
vul_remed_info(tool,tool_resp[tool][arg2],tool_resp[tool][arg3])
rs_vul_list.append(tool_names[tool][arg1]+"*"+tool_names[tool][arg2])
else:
runTest = 1
spinner.stop()
scan_stop = time.time()
elapsed = scan_stop - scan_start
rs_total_elapsed = rs_total_elapsed + elapsed
print( bcolors.OKBLUE+"\b\b\b\b...Interrupted in "+display_time(int(elapsed))+bcolors.ENDC+"\n")
clear()
print( "\t"+bcolors.WARNING + "Test Skipped. Performing Next. Press Ctrl+Z to Quit Source." + bcolors.ENDC)
rs_skipped_checks = rs_skipped_checks + 1
tool=tool+1
print( bcolors.BG_ENDL_TXT+"[ Preliminary Scan Phase Completed. ]"+bcolors.ENDC)
print( "\n")
#################### Report & Documentation Phase ###########################
print(bcolors.BG_HEAD_TXT+"[ Report Generation Phase Initiated. ]"+bcolors.ENDC)
if len(rs_vul_list)==0:
print( "\t"+bcolors.OKGREEN+"No Vulnerabilities Detected."+bcolors.ENDC)
else:
with open("Vulnerous-Vulnerability-Report", "a") as report:
while(rs_vul < len(rs_vul_list)):
vuln_info = rs_vul_list[rs_vul].split('*')
report.write(vuln_info[arg2])
report.write("\n------------------------\n\n")
temp_report_name = "temp_"+vuln_info[arg1]
with open(temp_report_name, 'r') as temp_report:
data = temp_report.read()
report.write(data)
report.write("\n\n")
temp_report.close()
rs_vul = rs_vul + 1
print( "\tComplete Vulnerability Report for "+bcolors.OKBLUE+target+bcolors.ENDC+" named "+bcolors.OKGREEN+"`Vulnerous-Vulnerability-Report`"+bcolors.ENDC+" is available under the same directory Source resides.")
report.close()
# Writing all scan files output into Vulnerous-Debug-ScanLog for debugging purposes.
for file_index, file_name in enumerate(tool_names):
with open("Vulnerous-Debug-ScanLog", "a") as report:
try:
with open("temp_"+file_name[arg1], 'r') as temp_report:
data = temp_report.read()
report.write(file_name[arg2])
report.write("\n------------------------\n\n")
report.write(data)
report.write("\n\n")
temp_report.close()
except:
break
report.close()
print( "\tTotal Number of Vulnerability Checks : "+bcolors.BOLD+bcolors.OKGREEN+str(len(tool_names))+bcolors.ENDC)
print( "\tTotal Number of Vulnerability Checks Skipped: "+bcolors.BOLD+bcolors.WARNING+str(rs_skipped_checks)+bcolors.ENDC)
print( "\tTotal Number of Vulnerabilities Detected : "+bcolors.BOLD+bcolors.BADFAIL+str(len(rs_vul_list))+bcolors.ENDC)
print( "\tTotal Time Elapsed for the Scan : "+bcolors.BOLD+bcolors.OKBLUE+display_time(int(rs_total_elapsed))+bcolors.ENDC)
print( "\n")
print( "\tFor Debugging Purposes, You can view the complete output generated by all the tools named "+bcolors.OKBLUE+"`Vulnerous-Debug-ScanLog`"+bcolors.ENDC+" under the same directory.")
print( bcolors.BG_ENDL_TXT+"[ Report Generation Phase Completed. ]"+bcolors.ENDC)
os.system('setterm -cursor on')
os.system('rm te* > /dev/null 2>&1') # Clearing previous scan files
``` |
{
"source": "jongold/autoencoders",
"score": 3
} |
#### File: autoencoders/models/ConvolutionalAutoencoder.py
```python
import torch
from torch import nn
from autoencoders.models.utils import xavier_weights_init
class ConvolutionalAutoencoder(nn.Module):
def __init__(self, in_channels=1):
super(ConvolutionalAutoencoder, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(in_channels, 16, (3, 3), padding=1),
nn.ReLU(True),
nn.MaxPool2d((2, 2)),
nn.Conv2d(16, 8, (3, 3), padding=1),
nn.ReLU(True),
nn.MaxPool2d((2, 2)),
nn.Conv2d(8, 8, (3, 3), padding=1),
nn.ReLU(True),
nn.MaxPool2d((2, 2), padding=1),
)
self.decoder = nn.Sequential(
nn.Conv2d(8, 8, (3, 3), padding=1),
nn.ReLU(True),
nn.Upsample(scale_factor=2),
nn.Conv2d(8, 8, (3, 3), padding=1),
nn.ReLU(True),
nn.Upsample(scale_factor=2),
nn.Conv2d(8, 16, (3, 3), padding=0),
nn.ReLU(True),
nn.Upsample(scale_factor=2),
nn.Conv2d(16, in_channels, kernel_size=(3, 3), padding=1),
nn.Sigmoid(),
)
[xavier_weights_init(m) for m in self.modules()]
def forward(self, x):
return self.decoder(self.encoder(x))
def train(batch_size=512, epochs=100):
from torch.autograd import Variable
import torchnet as tnt
from torchnet.engine import Engine
from tensorboardX import SummaryWriter
from autoencoders.models.sampling import sample
import autoencoders.data.mnist as mnist
from autoencoders.utils.tensorboard import run_path
use_gpu = torch.cuda.is_available()
writer = SummaryWriter(run_path('conv'))
engine = Engine()
meter_loss = tnt.meter.AverageValueMeter()
model = ConvolutionalAutoencoder()
optimizer = torch.optim.Adam(model.parameters(), 3e-4)
criterion = nn.BCELoss()
dataloader = mnist(batch_size=batch_size)
if use_gpu:
model.cuda()
criterion.cuda()
def h(sample):
inputs, _ = sample
inputs = Variable(inputs)
if use_gpu:
inputs = inputs.cuda()
output = model(inputs)
loss = criterion(output, inputs)
return loss, output
def on_forward(state):
meter_loss.add(state['loss'].data[0])
def on_start_epoch(state):
meter_loss.reset()
def on_end_epoch(state):
writer.add_scalar('loss', meter_loss.value()[0], state['epoch'])
writer.add_image('image', sample(model, dataloader), state['epoch'])
meter_loss.reset()
# engine.hooks['on_sample'] = on_sample
engine.hooks['on_forward'] = on_forward
engine.hooks['on_start_epoch'] = on_start_epoch
engine.hooks['on_end_epoch'] = on_end_epoch
engine.train(h, dataloader, maxepoch=epochs, optimizer=optimizer)
if __name__ == '__main__':
train()
```
#### File: autoencoders/models/sampling.py
```python
import torch
from torch.autograd import Variable
import torchvision.utils as vutils
from autoencoders.models.utils import flatten, to_one_hot
use_gpu = torch.cuda.is_available()
def sample(model, loader, n=10):
imgs = next(iter(loader))[0][:n]
imgs = Variable(imgs)
if use_gpu:
imgs = imgs.cuda()
reconstructions = model(imgs)
reconstructions = reconstructions.view(reconstructions.size(0), 1, 28, 28)
return vutils.make_grid(torch.cat([imgs.data, reconstructions.data]), n)
def vae_reconstructions(model, loader, n=10):
model.eval()
imgs = next(iter(loader))[0][:n]
imgs = Variable(imgs)
if use_gpu:
imgs = imgs.cuda()
reconstructions, _, _ = model(imgs)
reconstructions = reconstructions.view(reconstructions.size(0), 1, 28, 28)
return vutils.make_grid(torch.cat([imgs.data, reconstructions.data]), n)
def cvae_reconstructions(model, loader, n=10):
model.eval()
inputs, targets = next(iter(loader))
batch_size = inputs.size(0)
n = min(n, batch_size)
inputs = Variable(inputs)[:n]
# TODO: remove hardcoded n_classes
targets = Variable(to_one_hot(
targets[:n], batch_size=n, n_classes=10))
if use_gpu:
inputs = inputs.cuda()
targets = targets.cuda()
reconstructions, _, _ = model(flatten(inputs), targets)
reconstructions = reconstructions.view(reconstructions.size(0), 1, 28, 28)
return vutils.make_grid(torch.cat([inputs.data, reconstructions.data]), n)
``` |
{
"source": "jongoodnow/gitime",
"score": 3
} |
#### File: gitime/gitime/commands.py
```python
from __future__ import unicode_literals, print_function
from gitime import fprintf
from gitime.user import User
from gitime.commit import Commit, parse_hours_flag, parse_commit_message
from gitime.invoice import Invoice, InvoiceNotFound
import gitime.database as db
import sys
import textwrap
import os
import csv
from datetime import datetime
def settings_main(args):
u = User()
if hasattr(args, 'rate'):
u.set_rate(args.rate)
fprintf("The rate of $%s per hour will be applied to all new invoices."
%args.rate)
if hasattr(args, 'round'):
u.set_rounding(args.round)
fprintf("Hours will be rounded to the nearest %s on all new invoices"
%args.round)
if args.list:
fprintf(textwrap.dedent("""\
Your default values for all invoices created in the future:
Hourly Rate: $%.2f
Round hours to the nearest %g"""
%(u.rate, u.rounding)))
def invoice_main(args):
u = User()
if hasattr(args, 'name'):
if args.new:
kwargs = {'new': True}
if hasattr(args, 'rate'):
kwargs['rate'] = args.rate
if hasattr(args, 'round'):
kwargs['rounding'] = args.round
inv = Invoice(args.name, **kwargs)
inv.set_active()
fprintf("Future commits will now be sent to the invoice %s."
%inv.name)
else:
try:
inv = Invoice(args.name)
except InvoiceNotFound:
if raw_input(
"That invoice doesn't exist. Make a new one? [Y/n] "
) == 'n':
sys.exit()
inv = Invoice(args.name, new=True)
if hasattr(args, 'rate'):
inv.set_rate(args.rate)
if hasattr(args, 'round'):
inv.set_rounding(args.round)
if u.active_invoice_rowid != inv.rowid:
inv.set_active()
fprintf("Future commits will now be sent to the invoice %s."
%inv.name)
else:
if db.invoice_count() == 0:
fprintf("You do not have any invoices yet! Create one with `gitime "
"invoice -n <invoice name>`.")
else:
inv = Invoice(u.active_invoice_rowid)
if hasattr(args, 'rate'):
inv.set_rate(args.rate)
if hasattr(args, 'round'):
inv.set_rounding(args.round)
if args.list:
count = db.invoice_count()
noun = 'invoice' if count == 1 else 'invoices'
fprintf("You have %d %s:" %(count, noun))
for invoice in db.query_all_invoices():
if invoice[3] == u.active_invoice_rowid:
active = " (active)"
else:
active = ""
fprintf(invoice[0], active)
def status_main(args):
if hasattr(args, 'invoice'):
inv = Invoice(args.invoice)
else:
u = User()
invid = u.active_invoice_rowid
if invid == 0:
fprintf("You do not have any invoices yet! Create one with `gitime "
"invoice -n <invoice name>`.")
sys.exit()
inv = Invoice(u.active_invoice_rowid)
total_hours = inv.total_hours()
hourstr = 'hour' if total_hours == 1 else 'hours'
print(textwrap.dedent("""\
On invoice %s
Total Time Worked: %g %s
Total Charges: $%.2f
Charges:"""
%(inv.name, total_hours, hourstr, inv.total_earnings())))
commits = inv.get_commit_meta()
if not commits:
fprintf("No charges yet!")
else:
for com in commits:
date = (datetime.fromtimestamp(com[1])).strftime('%m-%d-%Y')
wspace1 = (17 - len(date)) * " "
hourstr = 'hour' if com[2] == 1 else 'hours'
hours = "%g %s" %(com[2], hourstr)
wspace2 = (14 - len(hours)) * " "
message = com[0]
fprintf(date, wspace1, hours, wspace2, message)
def timer_main(args):
u = User()
if not args.force:
if u.active_invoice_rowid == 0:
fprintf(
"WARNING: You do not have an active invoice set. "
"You won't be able to record your hours without one. "
"Create an invoice with the command: `gitime invoice -n "
"<invoice name>` first, or suppress this warning by running "
"the timer with the --force flag.",
file=sys.stderr)
sys.exit()
if args.action == 'start':
u.start_timer()
fprintf('Timer started at %s' %str(datetime.now()))
elif args.action == 'pause':
u.pause_timer()
fprintf('Timer paused at %s' %str(datetime.now()))
elif args.action == 'reset':
u.reset_timer()
elif args.action == 'status':
inv = Invoice(u.active_invoice_rowid)
if u.timer_running:
status = 'has been running since %s.' %str(
datetime.fromtimestamp(u.timer_start))
else:
status = 'is not running.'
fprintf('The timer %s' %status)
fprintf('Total hours tracked: %.2f' %(u.time_tracked(inv)))
def commit_main(args):
# commits are NOT handled by argparse. `args` are passed to this function
# as they are from sys.argv.
u = User()
invid = u.active_invoice_rowid
if invid == 0:
fprintf(
"GITIME ERROR: You do not have an active invoice set. "
"You won't be able to record your hours without one. "
"Create an invoice with the command: `gitime invoice -n <invoice "
"name>` first. Your commit has NOT been made.", file=sys.stderr)
sys.exit()
inv = Invoice(invid)
raw_hours = parse_hours_flag(args)
if raw_hours is not False:
hours = round(raw_hours / inv.rounding) * inv.rounding
else:
hours = u.time_tracked(inv)
if hours <= 0:
fprintf(
"GITIME ERROR: You didn't specify a number of hours, and the "
"timer hasn't recorded anything. Run this command with the "
"`--hours <hour count>` flag, or use the timer to track your "
"time. Your commit has NOT been made."), file=sys.stderr)
sys.exit()
u.reset_timer()
message = parse_commit_message(args)
if not message:
fprintf("GITIME ERROR: Could not find a message in your commit.",
file=sys.stderr)
sys.exit()
com = Commit(message=message,
hours=hours,
invoice=u.active_invoice_rowid)
fprintf("GITIME: Your commit has been logged in invoice %s." %inv.name)
if '--fake' not in args:
fprintf("GITIME: Running your commit now...")
args[0] = 'git'
os.system(" ".join(args))
def export_invoice_main(args):
if hasattr(args, 'invoice'):
try:
inv = Invoice(args.invoice)
except InvoiceNotFound:
fprintf("That invoice does not exist.", file=sys.stderr)
sys.exit()
else:
u = User()
if u.active_invoice_rowid == 0:
fprintf("You do not have an active invoice set. Create one with "
"`gitime invoice -n <invoice name> first.", file=sys.stderr)
sys.exit()
inv = Invoice(u.active_invoice_rowid)
if hasattr(args, 'file'):
filename = args.file
else:
filename = inv.name
commits = inv.get_commit_meta()
if args.format == 'csv':
if filename[-4:] != '.csv':
filename += '.csv'
with open(filename, 'wb') as fi:
writer = csv.writer(fi)
writer.writerow(['Date', 'Hours', 'Task'])
for com in commits:
writer.writerow([
(datetime.fromtimestamp(com[1])).strftime('%m-%d-%Y'),
com[2], com[0]])
writer.writerow([])
writer.writerow(['Total Time Worked:', "%s" %inv.total_hours()])
writer.writerow(['Total Charges:', "$%.2f" %inv.total_earnings()])
elif args.format == 'xlsx':
try:
import xlsxwriter
except ImportError:
fprintf("You appear to be missing the xlsxwriter module required "
"for Excel workbook export. You can install it with the "
"command `pip install xlsxwriter`.", file=sys.stderr)
sys.exit()
if filename[-5:] != '.xlsx':
filename += '.xlsx'
workbook = xlsxwriter.Workbook(filename)
worksheet = workbook.add_worksheet()
worksheet.set_column('A:A', 18)
worksheet.set_column('C:C', 80)
worksheet.write_string(0, 0, 'Date')
worksheet.write_string(0, 1, 'Hours')
worksheet.write_string(0, 2, 'Task')
row = 1
for com in commits:
worksheet.write_string(row, 0,
(datetime.fromtimestamp(com[1])).strftime('%m-%d-%Y'))
worksheet.write_number(row, 1, com[2])
worksheet.write_string(row, 2, com[0])
row += 1
row += 1
worksheet.write_string(row, 0, 'Total Time Worked:')
worksheet.write_number(row, 1, inv.total_hours())
row += 1
worksheet.write_string(row, 0, 'Total Charges:')
worksheet.write_string(row, 1, '$%.2f' %inv.total_earnings())
workbook.close()
else:
fprintf("The format you specified is not supported at this time. "
"Current allowed formats are: `csv`, `xlsx`.", file=sys.stderr)
def reset_main(args):
if not args.force:
if raw_input(textwrap.fill((
"WARNING: This will delete all invoices, commit logs, and user "
"preferences. Your git repos won't be affected. You should export "
"your invoices first. Are you sure? [y/N] "), 80)
) not in ('y', 'Y'):
sys.exit()
else:
db.first_time_setup()
```
#### File: jongoodnow/gitime/setup.py
```python
import os
import sys
import codecs
from setuptools import setup
from setuptools.command.install import install as _install
import gitime.database as db
long_description = """
Keep track of your billable hours along with your commits. Gitime lets you build an invoice with your tasks and hours worked from your commit messages.
`Read the docs <http://gitime.readthedocs.org/en/latest/>`_ for more details
Simple Usage
------------
Set your hourly rate::
$ gitime set -r 50
Start a new invoice::
$ gitime invoice -n "Awesome Secret Project"
Time how long you've been working::
$ gitime timer start
Make a commit as you would normally, but on the commit step, use :code:`gitime` instead of :code:`git`::
$ git add feature.c
$ gitime commit -m "Added a really cool thing"
$ git push
Or, if you don't like timers, record the time yourself with the :code:`--hours` flag::
$ gitime commit -m "Proved Riemann Hypothesis" --hours 2
Look at your invoice::
$ gitime status
On invoice Awesome Secret Project
Total time worked: 3 hours
Total charges: $150.00
Charges:
07-21-2014 1 hour Added a really cool thing
07-22-2014 2 hours Proved Riemann Hypothesis
When it's time to bill, export your invoice to a spreadsheet. Currently, the only format available is csv. More formats are coming soon::
$ gitime export
Installation
------------
You'll need two things installed to use gitime:
- `Git <http://git-scm.com/downloads>`_, and an executable called :code:`git` on your path
- `Python 2.7 <https://www.python.org/downloads/>`_ (or Python 3.4)
Install the latest release with::
$ pip install gitime
Or install the development version with::
$ git clone https://github.com/jongoodnow/gitime.git
$ cd gitime
$ pip install -r requirements.txt
$ python setup.py install
"""
class install(_install):
def run(self):
_install.run(self)
if not db.db_exists():
DB_DIR = os.path.expanduser('~/.gitime')
if not os.path.exists(DB_DIR):
os.makedirs(DB_DIR)
if os.name in ('posix', 'mac'):
db.set_unix_permissions(DB_DIR)
db.first_time_setup()
setup(
name="gitime",
version="1.0.1",
description="Build an invoice with your tasks and hours worked from your commit messages",
long_description=long_description,
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/jongoodnow/gitime",
keywords=['git', 'invoice', 'timer'],
license="MIT",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: Utilities'
],
packages=['gitime'],
entry_points={
'console_scripts': [
'gitime = gitime.cli:main'
]
},
cmdclass={
'install': install
},
test_suite='tests',
install_requires=['xlsxwriter', 'six']
)
``` |
{
"source": "jongpie/advanced-SFDX-Jenkins-deployment",
"score": 2
} |
#### File: scripts/deployment/delete-old-flow-versions.py
```python
import subprocess, json, os, platform, sys, getopt
import xml.dom.minidom
API_VERSION = '51.0'
NAMESPACE = {
'salesforce' : 'http://soap.sforce.com/2006/04/metadata',
'xsi' : 'http://www.w3.org/2001/XMLSchema-instance'
}
SFDX_EXECUTABLE = '/usr/local/bin/sfdx' if platform.system() == 'Linux' else 'sfdx'
def getParameters(argv):
parameters = {}
parameters['checkonly'] = False
parameters['outputdirectory'] = 'flow-destructive-changes/'
parameters['targetusername'] = None
try:
opts, args = getopt.getopt(argv, 'hco:t:', ['help','checkonly','outputdirectory=','targetusername='])
except getopt.GetoptError:
print 'delete-old-flows.py --checkonly --outputdir <folder_name> --targetusername <my_sfdx_alias>'
print 'delete-old-flows.py --checkonly --outputdir <folder_name> --targetusername <my_sfdx_alias>'
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
print '''
-c, --checkonly\tRuns the SFDX deploy command as a validation-only deployment. Defaults to False.
-o, --outputdirectory\tSubdirectory to use for destructiveChanges.xml and package.xml. Defaults to flow-destructive-changes/.
-t, --targetusername\tSFDX alias of the username for the desired environment. Your default username is used if not specified.
'''
sys.exit()
elif opt in ('-c', '--checkonly'):
parameters['checkonly'] = True
elif opt in ('-o', '--outputdirectory'):
if arg.endswith('/') == False:
arg = arg + '/'
parameters['outputdirectory'] = arg
elif opt in ('-t', '--targetusername'):
parameters['targetusername'] = arg
print parameters
return parameters
def getObsoleteFlowVersions(target_username):
print 'Retrieving the list of obsolete flow versions'
target_username_parameter = ' --targetusername ' + target_username + ' ' if target_username is not None else ''
query = "SELECT Id, Status, Definition.DeveloperName, VersionNumber FROM Flow WHERE Definition.NamespacePrefix = null AND Status = 'Obsolete' ORDER BY Definition.DeveloperName, VersionNumber"
query_command = SFDX_EXECUTABLE + ' force:data:soql:query --usetoolingapi --json --query "' + query + '"' + target_username_parameter
print 'query_command=' + query_command
response = json.loads(subprocess.check_output(query_command, shell=True))
print response
return response['result']['records']
def createPackageXml(output_directory):
doc = xml.dom.minidom.Document()
root = doc.createElementNS(NAMESPACE.get('salesforce'), 'Package')
root.setAttribute('xmlns', NAMESPACE.get('salesforce'))
doc.appendChild(root)
version = doc.createElement('version')
versionName = doc.createTextNode(API_VERSION)
version.appendChild(versionName)
root.appendChild(version)
print(doc.toprettyxml())
doc.writexml(
open(output_directory + 'package.xml', 'w'),
addindent = ' ',
newl = '\n'
)
def createFlowDestructiveChangesXml(output_directory, obsolete_flow_versions):
doc = xml.dom.minidom.Document()
root = doc.createElementNS(NAMESPACE.get('salesforce'), 'Package')
root.setAttribute('xmlns', NAMESPACE.get('salesforce'))
doc.appendChild(root)
typesNode = doc.createElement('types')
root.appendChild(typesNode)
for flow in obsolete_flow_versions:
print('flow==', flow)
versionName = flow['Definition']['DeveloperName'] + '-' + str(flow['VersionNumber'])
flowMemberNode = doc.createElement('members')
flowNameNode = doc.createTextNode(versionName)
flowMemberNode.appendChild(flowNameNode)
typesNode.appendChild(flowMemberNode)
metadataTypeNode = doc.createElement('name')
metadataTypeName = doc.createTextNode('Flow')
metadataTypeNode.appendChild(metadataTypeName)
typesNode.appendChild(metadataTypeNode)
version = doc.createElement('version')
versionName = doc.createTextNode(API_VERSION)
version.appendChild(versionName)
root.appendChild(version)
print(doc.toprettyxml())
doc.writexml(
open(output_directory + 'destructiveChanges.xml', 'w'),
addindent = " ",
newl = '\n'
)
def deployDestructiveChanges(output_directory, target_username, check_only):
target_username_parameter = ' --targetusername ' + target_username + ' ' if target_username is not None else ''
check_only_parameter = ' --checkonly ' if check_only else ''
deploy_command = (
SFDX_EXECUTABLE + ' force:mdapi:deploy '
+ ' --deploydir ' + output_directory
+ target_username_parameter
+ check_only_parameter
)
print('deploy_command=' + deploy_command)
result = subprocess.check_output(deploy_command, shell=True)
if __name__ == '__main__':
print 'Starting flow cleanup job'
# Get the parameters
parameters = getParameters(sys.argv[1:])
output_directory = parameters['outputdirectory']
check_only = parameters['checkonly']
target_username = parameters['targetusername']
# Make sure the output directory exists
if not os.path.exists(output_directory):
os.makedirs(output_directory)
# Get the flows and generate the XML files
flows = getObsoleteFlowVersions(target_username)
createPackageXml(output_directory)
createFlowDestructiveChangesXml(output_directory, flows)
# Delete!
deployDestructiveChanges(output_directory, target_username, check_only)
print 'Finished flow cleanup job'
``` |
{
"source": "jongpillee/deep-content-user",
"score": 2
} |
#### File: jongpillee/deep-content-user/encoding.py
```python
from __future__ import print_function
import numpy as np
import random
import os
import argparse
from model import *
from load_label import *
from keras.models import Model
def main(args):
# load models
model_str = '(args, inference=True)'
if args.model == 'basic':
audio_model_1, user_model_1 = eval('model_' + args.model + model_str)
audio_model = [audio_model_1]
user_model = [user_model_1]
elif args.model == 'multi':
audio_model_1,audio_model_2,audio_model_3,audio_model_4,audio_model_5,user_model_1,user_model_2,user_model_3,user_model_4,user_model_5 = eval('model_' + args.model + model_str)
audio_model = [audio_model_1,audio_model_2,audio_model_3,audio_model_4,audio_model_5]
user_model = [user_model_1,user_model_2,user_model_3,user_model_4,user_model_5]
save_path = './embeddings/%s/' % args.model
# user embedding
for model_iter in range(len(user_model)):
user_embedding = user_model[model_iter].predict(np.arange(args.num_user))
user_save_str = 'user_embedding_%d.npy' % (model_iter+1)
np.save(save_path + user_save_str, user_embedding)
print('user embedding saved')
# load label
sorted_coo_train, sorted_coo_valid, songs, user_to_item_train, user_to_item_valid, all_items, D7id_to_path, Tid_to_D7id, Sid_to_Tid, item_to_user_train, train_idx, valid_idx, test_idx = load_label(args)
# item embedding
for model_iter in range(len(audio_model)):
item_embedding = np.zeros((args.num_song,args.dim_embedding))
for iter in range(len(songs)):
try:
file_path = args.feature_path + D7id_to_path[Tid_to_D7id[Sid_to_Tid[songs[iter]]]].replace('.mp3','.npy')
tmp = np.load(file_path)
tmp = tmp.T
tmp -= args.mel_mean
tmp /= args.mel_std
# segmentation
input_seg = []
num_seg = int(tmp.shape[0]/args.num_frame)
for iter2 in range(num_seg):
input_seg.append(tmp[iter2*args.num_frame:(iter2+1)*args.num_frame,:])
input_seg = np.array(input_seg)
predicted = audio_model[model_iter].predict(input_seg)
item_embedding[iter] = np.mean(predicted,axis=0)
except Exception:
continue
if np.remainder(iter,1000) == 0:
print(iter)
print(iter+1)
item_save_str = 'item_embedding_%d.npy' % (model_iter+1)
np.save(save_path + item_save_str,item_embedding)
print('item embedding saved')
if __name__ == '__main__':
# options
parser = argparse.ArgumentParser(description='feature vector extraction')
parser.add_argument('model', type=str, default='basic', help='choose between basic model and multi model')
parser.add_argument('weight_name', type=str, help='weight path')
parser.add_argument('--N-negs', type=int, default=20, help='negative sampling size')
parser.add_argument('--margin', type=float, default=0.2, help='margin value for hinge loss')
parser.add_argument('--dim-embedding', type=int, default=256, help='feature vector dimension')
parser.add_argument('--num-frame', type=int, default=130, help='frame size of input')
parser.add_argument('--feature-path', type=str, default='/home1/irteam/users/jongpil/data/msd/mels/', help='mel-spectrogram path')
parser.add_argument('--mel-mean', type=float, default=0.2262, help='mean value calculated from training set')
parser.add_argument('--mel-std', type=float, default=0.2579, help='std value calculated from training set')
parser.add_argument('--num-user', type=int, default=20000, help='the number of users')
parser.add_argument('--num-song', type=int, default=10000, help='the number of items')
parser.add_argument('--melBins', type=int, default=128, help='mel bin size')
parser.add_argument('--lr', type=float, default=0.2, help='learning rate')
parser.add_argument('--lrdecay', type=float, default=1e-6, help='learning rate decaying')
args = parser.parse_args()
main(args)
``` |
{
"source": "jongpillee/musicTagging_MSD",
"score": 2
} |
#### File: jongpillee/musicTagging_MSD/encoding_cnn.py
```python
import os
import numpy as np
import time
from keras.optimizers import SGD
from keras.models import model_from_json,Model
from keras import backend as K
from keras.callbacks import Callback,ModelCheckpoint,EarlyStopping
from keras.layers import Input
from keras.layers.core import Dense
import sys
import librosa
# load model
model_path = './models/'
architecture_name = model_path + 'architecture_msdTag.json'
weight_name = model_path + 'weight_msdTag.hdf5'
nst = 0
partition = 1
save_path = sys.argv[1]
train_arg = sys.argv[2]
fs = 22050
def load_melspec(file_name_from,num_segment,sample_length):
#file_name = file_name_from.replace('.wav','.au')
file_name = file_name_from
tmp,sr = librosa.load(file_name,sr=fs,mono=True)
tmp = tmp.astype(np.float32)
y_length = len(tmp)
tmp_segmentized = np.zeros((num_segment,sample_length,1))
for iter2 in range(0,num_segment):
hopping = (y_length-sample_length)/(num_segment-1)
count_tmp = 0
if hopping < 0:
if count_tmp == 0:
tmp_tmp = np.repeat(tmp,10)
count_tmp += 1
y_length_tmp = len(tmp_tmp)
hopping = (y_length_tmp - sample_length)/(num_segment-1)
tmp_segmentized[iter2,:,0] = tmp_tmp[iter2*hopping:iter2*hopping+sample_length]
else:
tmp_segmentized[iter2,:,0] = tmp[iter2*hopping:iter2*hopping+sample_length]
return tmp_segmentized
# load data
with open(train_arg) as f:
train_list = [x.split('\t')[0] for x in f.read().splitlines()]
print len(train_list)
all_list = train_list
print len(all_list)
model = model_from_json(open(architecture_name).read())
model.load_weights(weight_name)
print 'model loaded!!!'
# compile & optimizer
sgd = SGD(lr=0.001,decay=1e-6,momentum=0.9,nesterov=True)
model.compile(loss='categorical_crossentropy',optimizer=sgd,metrics=['accuracy'])
# print model summary
model.summary()
sample_length = model.input_shape[1]
print sample_length
num_segment = int(22050*30/sample_length)+1
print 'Number of segments per song: ' + str(num_segment)
# define activation layer
layer_dict = dict([(layer.name,layer) for layer in model.layers[1:]])
layer_num = (len(layer_dict)-1)/4
# msd doesn't have dropout so +1 for capturing last hidden layer
activation_layer = 'activation_%d' % (layer_num+1)
print activation_layer
layer_output = layer_dict[activation_layer].output
get_last_hidden_output = K.function([model.layers[0].input, K.learning_phase()], [layer_output])
# encoding
all_size = len(all_list)
for iter2 in range(int(nst*all_size/partition),int((nst+1)*all_size/partition)):
# check existence
save_name = save_path + '/' + model_select + all_list[iter2].replace('.wav','.npy')
if not os.path.exists(os.path.dirname(save_name)):
os.makedirs(os.path.dirname(save_name))
if os.path.isfile(save_name) == 1:
print iter2, save_name + '_file_exist!!!!!!!'
continue
# load melgram
x_sample_tmp = load_melspec(all_list[iter2],num_segment,sample_length)
print x_sample_tmp.shape
# prediction
weight = get_last_hidden_output([x_sample_tmp,0])[0]
maxpooled = np.amax(weight,axis=1)
averagepooled = np.average(maxpooled,axis=0)
print averagepooled.shape,iter2
np.save(save_name,averagepooled)
``` |
{
"source": "jongrim/whats-the-weather",
"score": 3
} |
#### File: whats-the-weather/whats_the_weather/__main__.py
```python
import argparse
import pprint
from datetime import datetime
from whats_the_weather.helpers import (convert_kelvin_to_f, convert_temps,
convert_timestamp_to_datetime,
convert_timestamp_to_string)
from whats_the_weather.weather import Weather
def get_weather(city_name, forecast=None, indent=2, show_json=False, dt=None):
'''Makes a Weather object'''
w = Weather(dt)
cur_city = w.get_the_weather(city_name, forecast)
visual_space = ' ' * indent
if forecast:
display_forecast_weather(cur_city, visual_space, show_json)
else:
display_current_weather(cur_city, visual_space, show_json)
def display_forecast_weather(data_dict, space, show_json):
# Get data out of forecast dict
weather_dict = data_dict['forecast'].get('json', None)
if not weather_dict:
print('No cached forecast weather information \
for this location')
return
if show_json:
pprint.pprint(weather_dict)
return
frcst_wthr = process_forecast_data(weather_dict)
# pprint.pprint(frcst_wthr)
max_temp = 'max_temp'
min_temp = 'min_temp'
wthr_conds = 'wthr_conds'
for (day, conds) in frcst_wthr.items():
print(f"Weather for {conds['month']}-{day}:")
print(f'{space}', f'High: {conds[max_temp]}')
print(f'{space}', f'Low: {conds[min_temp]}')
print(f'{space}', 'Weather conditions: ', ', '.join(conds[wthr_conds]))
def process_forecast_data(forecast_dict):
'''Loop through the forecast data and build up a summary'''
data_list = forecast_dict['list']
daily_weather = {}
# Dict keys
max_temp = 'max_temp'
min_temp = 'min_temp'
wthr_conds = 'wthr_conds'
for measure in data_list:
date = convert_timestamp_to_datetime(measure['dt'])
day = date.day
daily_weather.setdefault(day, {})
day_d = daily_weather[day]
day_d.setdefault('month', date.month)
# Search for maximum temp of the day
cur_max = convert_kelvin_to_f(measure['main']['temp_max'])
day_d[max_temp] = max(day_d.get(max_temp, 0), cur_max)
# Search for minimum temp of the day
cur_min = convert_kelvin_to_f(measure['main']['temp_min'])
day_d[min_temp] = min(day_d.get(min_temp, 150), cur_min)
# Set and add weather conditions
day_d.setdefault(wthr_conds, [])
cur_cond = measure['weather'][0]['description']
if cur_cond not in day_d[wthr_conds]:
day_d[wthr_conds].append(cur_cond)
return daily_weather
def display_current_weather(data_dict, space, show_json):
# Get data out of current dict
weather_dict = data_dict['current'].get('json', None)
if not weather_dict:
print('No cached current weather information \
for this location')
return
if show_json:
pprint.pprint(weather_dict)
return
result_city = weather_dict['name']
weather_set = weather_dict['weather']
Temps_F = convert_temps(weather_dict['main'])
sunrise = convert_timestamp_to_string(weather_dict['sys']['sunrise'])
sunset = convert_timestamp_to_string(weather_dict['sys']['sunset'])
rain = weather_dict.get('rain', None)
clouds = weather_dict.get('clouds', None)
wind = weather_dict.get('wind', None)
snow = weather_dict.get('snow', None)
print(f'Current weather for {result_city}:')
# print(f'{space}Weather description: ', end='')
for x in weather_set:
print(f"{space}{x['description'].capitalize()}")
# print('')
print(f'{space}Temperatures:')
print(f'{space}{space}Current: ', f'{Temps_F.current}'.rjust(5))
print(f'{space}{space}Max: ', f'{Temps_F.max}'.rjust(9))
print(f'{space}{space}Min: ', f'{Temps_F.min}'.rjust(9))
print(f"{space}Humidity: {weather_dict['main']['humidity']}%")
print(f'{space}Sunrise: {sunrise}')
print(f'{space}Sunset: {sunset}')
if rain:
if rain.get('3h'):
print(f"{space}Rain volume for last 3 hours: {rain['3h']}")
if clouds:
if clouds.get('all'):
print(f"{space}Cloudiness: {clouds['all']}%")
if wind:
if wind.get('speed'):
print(f"{space}Wind speed: {wind['speed']} meters/ssec")
if wind.get('deg'):
print(f"{space}Wind direction: {wind['deg']} degrees")
if wind.get('gust'):
print(f'{space}', end='')
print("Wind gust: {wind.get('gust')} meters/sec")
if snow:
if snow.get('3h'):
print(f"{space}Snow volume for last 3 hours: {snow['3h']}")
def main():
parser = argparse.ArgumentParser(
description='Get the weather in your terminal')
parser.add_argument('city', help='The city for which you would like the \
weather')
parser.add_argument('-f', '--forecast', dest='forecast',
action='store_true', help='Specifies that the forecast \
should be returned instead of current conditions.')
parser.add_argument('-i', '--indent', dest='indent', type=int, default=2,
help='The indentation setting for output.')
parser.add_argument('-d', '--datetime', dest='datetime',
action='store_true', help='For development purposes \
only.')
parser.add_argument('-j', '--json', dest='json',
action='store_true', help='Show me the JSON! (Pretty \
printed of course)')
# TODO add option to colorize output
# TODO add option for ascii art
args = parser.parse_args()
if args.datetime:
dt = datetime(1970, 1, 1)
else:
dt = None
get_weather(args.city, args.forecast, args.indent, args.json, dt)
if __name__ == '__main__':
main()
```
#### File: whats_the_weather/tests/test_weather.py
```python
import datetime
import unittest
from unittest.mock import patch
from whats_the_weather.helpers import (convert_temps,
convert_timestamp_to_string)
from whats_the_weather.weather import Weather
class TestWeather(unittest.TestCase):
def test_make_weather_object(self):
w = Weather()
self.assertTrue(isinstance(w, Weather))
def test_weather_has_API(self):
w = Weather()
self.assertTrue(w.api_key)
def test_weather_has_city_list(self):
w = Weather()
self.assertTrue(w.city_list)
def test_weather_has_last_call_time_of_datetime(self):
w = Weather()
self.assertTrue(isinstance(w.last_call_time, datetime.datetime))
def test_weather_has_api_rate_limit_of_timedetla(self):
w = Weather()
self.assertTrue(isinstance(w.api_limit, datetime.timedelta))
def test_api_limit_when_greater_than_10min(self):
'''
A Weather object with last call time greater than 10 mins should return
True
'''
w = Weather(datetime.datetime(1970, 1, 1))
self.assertTrue(w.check_if_within_limit())
def test_api_limit_when_less_than_10min(self):
'''
A Weather object with last call time less than 10 mins should return
False
'''
w = Weather(datetime.datetime.now())
self.assertFalse(w.check_if_within_limit())
@patch('whats_the_weather.weather.Weather.store_current_info')
@patch('requests.get')
def test_successful_request_to_api(self, mock_request, mock_store):
'''
A Weather object whose last call time is greater than the rate limit
should be able to make an api call
'''
w = Weather(datetime.datetime(1970, 1, 1))
id = 1
api_key = w.api_key
w.request_weather_with_id(id)
http = 'http://api.openweathermap.org/data/2.5/weather?id=1&APPID='
arg = f'{http}{api_key}'
assert mock_request.called_with(arg)
self.assertTrue(w.wthr_data_dict)
@patch('requests.get')
def test_unsuccessful_request_to_api(self, mock_request):
'''
A Weather object whose last call time should not be able to make an
api request
'''
w = Weather(datetime.datetime.now())
w.WeatherData = False
id = 1
# api_key = w.api_key
w.request_weather_with_id(id)
# http = 'http://api.openweathermap.org/data/2.5/weather?id=1&APPID='
# arg = f'{http}{api_key}'
self.assertFalse(w.WeatherData)
def test_unsuccessful_weather_request_with_id(self):
w = Weather(datetime.datetime.now())
with self.assertRaises(KeyError):
w.get_the_weather('zzzzzz')
# @patch('requests.get')
# def test_successful_weather_request_with_id(self, mock_request):
# w = Weather(datetime.datetime(1970, 1, 1))
# result = w.request_weather_with_id('Atlanta')
# self.assertTrue(result)
class TestWeatherCityList(unittest.TestCase):
def setUp(self):
self.w = Weather()
def test_get_id_by_city_name_where_name_is_included(self):
city = self.w.get_city_id_by_name('Atlanta')
self.assertEqual(city, 4180439)
def test_get_id_by_city_name_where_name_isnot_included(self):
city = self.w.get_city_id_by_name('ZZZZZ')
self.assertEqual(city, None)
class TestWeatherConversions(unittest.TestCase):
def test_convert_kelvin_to_fahrenheiht(self):
temps = {'temp': 280.32, 'temp_min': 279.15, 'temp_max': 281.15}
Temps_F = convert_temps(temps)
self.assertEqual(Temps_F.current, 44.91)
self.assertEqual(Temps_F.max, 46.4)
self.assertEqual(Temps_F.min, 42.8)
def test_convert_UTC_timestamp(self):
time = convert_timestamp_to_string(1485762037)
self.assertEqual(time, '2017-01-30 02:40:37')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jongrover/procedural-task-list-in-many-languages",
"score": 4
} |
#### File: jongrover/procedural-task-list-in-many-languages/task-list.py
```python
exit = False
tasks = []
def display_tasks():
print "----------"
for i, task in enumerate(tasks):
print "%s.) %s" % (i+1, task)
print "----------"
def add_task():
print "Type a new task:"
task = raw_input()
tasks.append(task)
display_tasks()
def remove_task():
display_tasks()
print "Type task number to remove:"
index = int(raw_input()) - 1
tasks.pop(index)
display_tasks()
while (exit == False):
print "Please enter a number from the following choices: 1. Insert a new task, 2. Remove a task, 3. List all tasks, 4. exit this program"
choice = int(raw_input())
if (choice == 1):
add_task()
elif (choice == 2):
remove_task()
elif (choice == 3):
display_tasks()
elif (choice == 4):
print "Exiting..."
exit = True
else:
print "sorry didn't recognize that input"
``` |
{
"source": "jongs3030-datascience/DONTDO_KKJH",
"score": 3
} |
#### File: jongs3030-datascience/DONTDO_KKJH/kkjh.py
```python
from mtcnn import MTCNN
from skimage import io
from cv2 import cv2
import glob
from PIL import Image
from matplotlib import pyplot as plt
from skimage import io
import numpy as np
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
from kkjh_test import depth_test
model = load_model('MobilenetV2_batch64.h5')
from mtcnn import MTCNN
def mtcnn_face_crop(raw_frame):
# raw_frame 이미지 => face_detection => crop => return
# raw_frame 형태는 array 형태여야 함
# face detector model
detector = MTCNN()
# 3차원인 이미지만 진행 // 나머지는 pass
if raw_frame.shape[2] == 4 :
raw_frame = cv2.cvtColor(raw_frame, cv2.COLOR_RGBA2RGB)
print("4차원을 3차원으로")
if raw_frame.shape[2] == 3 :
face_box = detector.detect_faces(raw_frame) # detector 모델에서 face info. return
if face_box : # 얼굴을 찾은 경우에만 !
# face_boundingbox 좌표
x,y = face_box[0]["box"][0], face_box[0]["box"][1]
w,h = face_box[0]["box"][2] , face_box[0]["box"][3]
para = 40 # padding_para
y_s, y_e, x_s, x_e = y-para, y+h+para, x-para, x+w+para
if y_s < 0 :
y_s = 0
if y_e > raw_frame.shape[0] : # raw_frame.shape[0] : 원본 이미지의 세로 길이
y_e = raw_frame.shape[0]
if x_s < 0 :
x_s = 0
if x_e > raw_frame.shape[1] : # raw_frame.shape[1] : 원본 이미지의 가로 길이
x_e = raw_frame.shape[1]
# Crop
dst = raw_frame.copy()
dst = dst[y_s : y_e, x_s : x_e]
# 자른 이미지 리턴
#plt.imshow(dst)
return dst
else:
return raw_frame
else:
return raw_frame
def depth_estimation(raw_frame,k):
print('depth시작')
purpose_shape=(480,640,3)#이 사이즈로 통일하려고 함
origin=mtcnn_face_crop(raw_frame)
savePath='./examples/%d.bmp' %k
if origin.size!=purpose_shape:#규격이 다른 애들만 새로 저장하고
print('if시작')
resized = cv2.resize(origin,(purpose_shape[1],purpose_shape[0]), interpolation=cv2.INTER_AREA)
if origin.shape[2]==4:
resized = cv2.cvtColor(resized,cv2.COLOR_RGBA2RGB)
cv2.imwrite(savePath,resized)
else:#같은 애들은 건드릴필요없지
print('else시작')
resized = origin
cv2.imwrite(savePath, resized)
outputs = depth_test(resized)
savePath2='./results/%d.bmp' %k
cv2.imwrite(savePath2, outputs)
center_point=(outputs.shape[1]//2,outputs.shape[0]//2)
#region=np.zeros((40,40))
region=outputs[center_point[1]-20:center_point[1]+20,center_point[0]-20:center_point[0]+20]
min_intensity=region.min()
# print(min_intensity)
outputs[outputs<=min_intensity]=0
# center_intensity=a[center_point[1],center_point[0]]
region=outputs[center_point[1]-20:center_point[1]+20,center_point[0]-20:center_point[0]+20]
min_intensity=region.min()
max_intensity=region.max()
diff = (max_intensity-min_intensity)
if diff < 100:
flag1 = True
else:
flag1 = False
return flag1
def mask_put_bool(raw_frame):
picture = raw_frame
copy_picture = picture.copy()
resized = cv2.resize(copy_picture, dsize=(224, 224), interpolation=cv2.INTER_AREA)
preprocessed = np.array(resized) / 255
xhat = preprocessed[None, ]
yhat = (model.predict(xhat) > 0).astype("int32")
if yhat==1:
return False
else:
return True
def warning_putText(raw_frame,flag1,flag2,k):
if (flag1 == True) & (flag2 == True): # 마스크는 썼는데 얼굴터치한 경우
final_frame = cv2.putText(raw_frame, "Dont touch ur face!!", (10,20), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,0,0),2)
elif (flag1 == True) & (flag2 == False): # 마스크 미착용인데 얼굴도 만졌어(호로자식 나가뒤져!!!!!)
final_frame = cv2.putText(raw_frame, "Put on mask &", (10,20), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,0,0),2)
final_frame = cv2.putText(raw_frame, "dont touch ur face!!", (10,35), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,0,0),2)
elif (flag1 == False) & (flag2 == True): # 마스크 썼는데 터치도 안함
final_frame = cv2.putText(raw_frame, "Well done", (10,20), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,0,0),2)
else: # 마스크 안쓰고 터치 안함
final_frame = cv2.putText(raw_frame, "Put on mask!!", (10,20), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,0,0),2)
return final_frame
def kkjh_smooth(raw_frame,flag1,k):
#flag1 = dont(raw_frame) # flag1 : 얼굴터치여부(만졌으면 True)
if flag1: # dont 돌렸는데 얼굴 만졌다고 나옴 -> 깊이 추정 해봐야함
touch_bool = depth_estimation(raw_frame,k) # 깊이추정해서 접촉 : True, 미접촉 : False
if ~touch_bool: # dont에서 얼굴 터치했다고 했는데 깊이추정결과가 미접촉으로 나온 경우
flag1 = touch_bool # dont에서 만졌다고 했지만 깊이추정결과 미접촉이라 False 할당
flag2 = mask_put_bool(raw_frame) # flag2 : 마스크 착용 여부
final_frame = warning_putText(raw_frame,flag1,flag2,k)
return final_frame
#picture = io.imread('./examples/mine/train_00000008.jpg')
#flag1=True
#final_frame =bsmooth(picture,flag1)
#plt.imshow(final_frame)
#plt.show()
``` |
{
"source": "JONGSKY/patent_search",
"score": 2
} |
#### File: app/main_page/views.py
```python
from django.shortcuts import render
# Create your views here.
def main_page(request):
return render(request, 'main_page/main_page.html')
```
#### File: app/patent_search/views.py
```python
from django.shortcuts import render
from .documents import PatentDocument
from django.core.paginator import Paginator
from django.contrib import messages
from patent_search.models import Patentsview
from django.db.models import Q
import json
with open('patent_word.txt') as word_file:
word_data = json.load(word_file)
search_auto_complete = word_data['patent_word']
def patent_search(request):
result = {'search_auto_complete' : search_auto_complete}
return render(request, 'patent_search/patent_search.html', result)
def patent_search_result(request):
search_input = request.GET.get('search_name', "")
##patent = list(PatentDocument.search().filter('match', abstract=search_input))
patent = Patentsview.objects.filter(Q(abstract__contains=search_input)).order_by('-date')
if len(patent) != 0:
paginator = Paginator(patent, 8)
page = request.GET.get('page', 1)
patent_list = paginator.page(page)
page_numbers_range = 9
max_index = len(paginator.page_range)
current_page = int(page)
start_index = int((current_page - 1) / page_numbers_range) * page_numbers_range
end_index = start_index + page_numbers_range
if end_index >= max_index:
end_index = max_index
paginator_range = paginator.page_range[start_index:end_index]
search_result = {'search_auto_complete' : search_auto_complete,
'search_name' : search_input,
'patent_list' : patent_list,
'paginator_range' : paginator_range,
'page' : page,
'current_page' : current_page,
'start_index' : start_index,
}
return render(request, 'patent_search/patent_search_result.html', search_result)
else:
search_result = {'search_auto_complete' : search_auto_complete,
'search_name' : search_input,
}
return render(request, 'patent_search/patent_search_none_result.html', search_result)
``` |
{
"source": "jongsunglee/test_iotivity",
"score": 2
} |
#### File: java/iotivity-android/run_android_smoketests.py
```python
import os
import sys
import textwrap
import argparse
import platform
import subprocess
import multiprocessing
import time
import psutil
def find_avalible_console_port():
'''
Find an open port number that will be used for the avd console port. Start searching for the
at port number 5554 and continue incrementing port number till and open port is found.
Returns port number as a string
'''
# 5554 is the default console port used when starting an avd without specifying a port number
# since this is the default port number by default we start checking to see if that port is
# currently in use. If it is being used increase the port to the next even port number.
# Each instance of the avd uses two ports the console port and the avd port.The avd port is
# always console port +1 so we will check that that port is also open.
ret_port = 5554
nc = psutil.net_connections(kind='inet')
while True:
console_port_already_being_used = False
adb_port_already_being_used = False
for i in nc:
if(i.laddr[1] == ret_port):
console_port_already_being_used = True
if(i.laddr[1] == (ret_port + 1)):
adb_port_already_being_used = True
if((not console_port_already_being_used) and (not adb_port_already_being_used)):
return str(ret_port)
ret_port += 2 #for adv the port must be a multiple of 2
def start_avd(avd_name, console_port):
'''
Start up the avd specified by the avd_name parameter use the specify the console port that the avd will use
with the console_port parameter. The find_avalible_console_port() function should be used to determine an
open console_port that can be passed into this function.
note:
- all data on the avd will be wiped to start with a known starting condition.
- the avd will be started with the no-window option so there is no visual indication
that the avd is launched.
Keyword arguments:
avd_name -- the name of the created android virtual device
console_port -- the port number that will attempt to be used
by the when starting the avd
'''
command = 'emulator -avd ' + avd_name + ' -port ' + console_port + ' -wipe-data -no-boot-anim -no-window'
subprocess.Popen([command], shell=True)
def wait_for_avd_boot(console_port):
'''
After calling start_avd this function is used to wait for the avd to complete booting the console_port
option must match the console_port option used to start the avd
Keyword arguments:
console_port -- the port number that was specified when starting the avd
'''
#dev_cmd = 'adb -s emulator-' + console_port + ' shell getprop dev.bootcomplete'
#sys_cmd = 'adb -s emulator-' + console_port + ' shell getprop sys.boot_completed'
dev_cmd = ['adb', '-s', 'emulator-' + console_port, 'shell', 'getprop', 'dev.bootcomplete']
wait_for_boot = True
while wait_for_boot:
adb_process = subprocess.Popen(['adb', '-s', 'emulator-' + console_port, 'shell', 'getprop', 'dev.bootcomplete'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = adb_process.communicate()
#print 'output = ' + str(output) + ' error = ' + str(error) + ' return code = ' + str(adb_process.returncode)
if(adb_process.returncode == 0):
if(output.startswith('1')):
print('property dev.bootcomplete indicates that the avd boot has completed')
wait_for_boot = False
else:
#print('Waiting for emulator to start')
time.sleep(1);
else:
#print('Waiting for emulator to start')
time.sleep(1)
wait_for_boot = True
while wait_for_boot:
adb_process = subprocess.Popen(['adb', '-s', 'emulator-' + console_port, 'shell', 'getprop', 'sys.boot_completed'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = adb_process.communicate()
#print 'output = ' + str(output) + ' error = ' + str(error) + ' return code = ' + str(adb_process.returncode)
if(adb_process.returncode == 0):
if(output.startswith('1')):
print('property sys.boot_completed indicates that the avd boot has completed')
wait_for_boot = False
else:
#print('Waiting for emulator to start')
time.sleep(1)
else:
#print('Waiting for emulator to start')
time.sleep(1)
def build_smoketests():
'''
Use gradlew to build the android smoke tests
'''
os.environ['ANDROID_NDK_HOME'] = os.path.abspath(os.getcwd() + '/../../extlibs/android/ndk/android-ndk-r10d')
command = './gradlew assembleAndroidTest'
subprocess.Popen([command], shell=True).wait()
def install_smoketests(console_port):
'''
Install the android smoke tests. Must run build_smoketests() before running this function
Keyword arguments:
console_port -- the port number that was specified when starting the avd
'''
command = 'adb -s emulator-' + console_port + ' install -r ./build/outputs/apk/iotivity-android-debug-androidTest-unaligned.apk'
subprocess.Popen([command], shell=True).wait()
def run_smoketests(console_port):
'''
run the android smoke test
Keyword arguments:
console_port -- the port number that was specified when starting the avd
'''
command = 'adb -s emulator-' + console_port + ' shell am instrument -w org.iotivity.base.test/android.test.InstrumentationTestRunner'
print command
subprocess.Popen([command], shell=True).wait()
def kill_avd(console_port):
'''
shut down the avd
Keyword arguments:
console_port -- the port number that was specified when starting the avd
'''
command = 'adb -s emulator-' + console_port + ' emu kill'
subprocess.Popen([command], shell=True).wait()
def create_avd(avd_name, target, abi):
'''
Create a new android virtual device
Keyword arguments:
avd_name -- the name of the created avd
target -- the target Target ID of the system image to use with the new AVD. example android-21
abi -- the architecture type for the avd example armeabi, x86, or x86_64
run command $android list targets to get a list of targets and there Tag/ABIs
'''
command = ['android', '-s', 'create', 'avd', '-f', '--name', avd_name, '--target', target, '--abi', abi]
p = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Just use the default avd no need to specify extra options.
p.communicate('no')
p.wait()
def delete_avd(avd_name):
command = ['android', '-s', 'delete', 'avd', '--name', avd_name]
subprocess.Popen(command).wait();
def start_android_and_run_tests(target, abi):
'''
This function does the following
1. creates a new avd named smoke_test_avd_####
where the #### is the port number that is used to talk with the avd
the port number is assigned automatically.
2. starts the created avd
3. waits for the avd to boot
4. builds android smoke tests
5. installs the smoke tests on the avd
6. runs the smoke tests
7. shuts down the avd
8. deletes the avd
Keyword arguments:
avd_name -- the name of the created avd
target -- the target Target ID of the system image to use with the new AVD. example android-21
abi -- the architecture type for the avd example armeabi, x86, or x86_64
run command $android list targets to get a list of targets and there Tag/ABIs
'''
avalible_port = find_avalible_console_port()
avd_name = 'smoke_test_avd_' + avalible_port
create_avd(avd_name, target, abi)
start_avd(avd_name, avalible_port)
wait_for_avd_boot(avalible_port)
build_smoketests();
install_smoketests(avalible_port)
run_smoketests(avalible_port)
kill_avd(avalible_port)
delete_avd(avd_name)
def main(argv):
target = 'android-21'
abi = 'x86_64'
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, epilog=textwrap.dedent('''\
If ABI is not specified it will default to x86_64.
If TARGET is not specified it will default to android-21.'''))
parser.add_argument('-a', '--abi', help='specify the abi of the android avd example "x86_64"')
parser.add_argument('-t', '--target', help='the andrdoid target example "android-21"')
args = parser.parse_args()
if (args.abi != None):
abi = args.abi
if (args.target != None):
target = args.target
print '*****************************************************'
print 'Running andriod smoke test with the following options'
print ' The android target is -- ', target
print ' The android abi is -- ', abi
print '*****************************************************'
start_android_and_run_tests(target, abi)
if __name__ == "__main__":
main(sys.argv[1:])
``` |
{
"source": "jongtack/tensorflow",
"score": 2
} |
#### File: python/ops/parsing_ops.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import common_shapes
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_parsing_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
# pylint: disable=wildcard-import,undefined-variable
from tensorflow.python.ops.gen_parsing_ops import *
ops.NoGradient("DecodeRaw")
ops.NoGradient("StringToNumber")
# pylint: disable=protected-access
def parse_example(serialized,
names=None,
sparse_keys=None,
sparse_types=None,
dense_keys=None,
dense_types=None,
dense_defaults=None,
dense_shapes=None,
name="ParseExample"):
"""Parses `Example` protos.
Parses a number of serialized [`Example`]
(https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/example/example.proto)
protos given in `serialized`.
`names` may contain descriptive names for the corresponding serialized protos.
These may be useful for debugging purposes, but they have no effect on the
output. If not `None`, `names` must be the same length as `serialized`.
This op parses serialized examples into a dictionary mapping keys to `Tensor`
and `SparseTensor` objects respectively, depending on whether the keys appear
in `sparse_keys` or `dense_keys`.
The key `dense_keys[j]` is mapped to a `Tensor` of type `dense_types[j]` and
of shape `(serialized.size(),) + dense_shapes[j]`.
`dense_defaults` provides defaults for values referenced using `dense_keys`.
If a key is not present in this dictionary, the corresponding dense `Feature`
is required in all elements of `serialized`.
`dense_shapes[j]` provides the shape of each `Feature` entry referenced by
`dense_keys[j]`. The number of elements in the `Feature` corresponding to
`dense_key[j]` must always have `np.prod(dense_shapes[j])` entries. The
returned `Tensor` for `dense_key[j]` has shape `[N] + dense_shape[j]`, where
`N` is the number of `Example`s in `serialized`.
The key `sparse_keys[j]` is mapped to a `SparseTensor` of type
`sparse_types[j]`. The `SparseTensor` represents a ragged matrix.
Its indices are `[batch, index]` where `batch` is the batch entry the value
is from, and `index` is the value's index in the list of values associated
with that feature and example.
Examples:
For example, if one expects a `tf.float32` sparse feature `ft` and three
serialized `Example`s are provided:
```
serialized = [
features
{ feature { key: "ft" value { float_list { value: [1.0, 2.0] } } } },
features
{ feature []},
features
{ feature { key: "ft" value { float_list { value: [3.0] } } }
]
```
then the output will look like:
```
{"ft": SparseTensor(indices=[[0, 0], [0, 1], [2, 0]],
values=[1.0, 2.0, 3.0],
shape=(3, 2)) }
```
Given two `Example` input protos in `serialized`:
```
[
features {
feature { key: "kw" value { bytes_list { value: [ "knit", "big" ] } } }
feature { key: "gps" value { float_list { value: [] } } }
},
features {
feature { key: "kw" value { bytes_list { value: [ "emmy" ] } } }
feature { key: "dank" value { int64_list { value: [ 42 ] } } }
feature { key: "gps" value { } }
}
]
```
And arguments
```
names: ["input0", "input1"],
sparse_keys: ["kw", "dank", "gps"]
sparse_types: [DT_STRING, DT_INT64, DT_FLOAT]
```
Then the output is a dictionary:
```python
{
"kw": SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=["knit", "big", "emmy"]
shape=[2, 2]),
"dank": SparseTensor(
indices=[[1, 0]],
values=[42],
shape=[2, 1]),
"gps": SparseTensor(
indices=[],
values=[],
shape=[2, 0]),
}
```
For dense results in two serialized `Example`s:
```
[
features {
feature { key: "age" value { int64_list { value: [ 0 ] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
},
features {
feature { key: "age" value { int64_list { value: [] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
}
]
```
We can use arguments:
```
names: ["input0", "input1"],
dense_keys: np.array(["age", "gender"]),
dense_types: [tf.int64, tf.string],
dense_defaults: {
"age": -1 # "age" defaults to -1 if missing
# "gender" has no specified default so it's required
}
dense_shapes: [(1,), (1,)], # age, gender, label, weight
```
And the expected output is:
```python
{
"age": [[0], [-1]],
"gender": [["f"], ["f"]],
}
```
Args:
serialized: A vector (1-D Tensor) of strings, a batch of binary
serialized `Example` protos.
names: A vector (1-D Tensor) of strings (optional), the names of
the serialized protos.
sparse_keys: A list of string keys in the examples' features.
The results for these keys will be returned as `SparseTensor` objects.
sparse_types: A list of `DTypes` of the same length as `sparse_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
dense_keys: A list of string keys in the examples' features.
The results for these keys will be returned as `Tensor`s
dense_types: A list of DTypes of the same length as `dense_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
dense_defaults: A dict mapping string keys to `Tensor`s.
The keys of the dict must match the dense_keys of the feature.
dense_shapes: A list of tuples with the same length as `dense_keys`.
The shape of the data for each dense feature referenced by `dense_keys`.
Required for any input tensors identified by `dense_keys` whose shapes are
anything other than `[]` or `[1]`.
name: A name for this operation (optional).
Returns:
A `dict` mapping keys to `Tensor`s and `SparseTensor`s.
Raises:
ValueError: If sparse and dense key sets intersect, or input lengths do not
match up.
"""
with ops.op_scope([serialized, names], name, "parse_example"):
names = [] if names is None else names
dense_defaults = {} if dense_defaults is None else dense_defaults
sparse_keys = [] if sparse_keys is None else sparse_keys
sparse_types = [] if sparse_types is None else sparse_types
dense_keys = [] if dense_keys is None else dense_keys
dense_types = [] if dense_types is None else dense_types
dense_shapes = (
[[]] * len(dense_keys) if dense_shapes is None else dense_shapes)
num_dense = len(dense_keys)
num_sparse = len(sparse_keys)
if len(dense_shapes) != num_dense:
raise ValueError("len(dense_shapes) != len(dense_keys): %d vs. %d"
% (len(dense_shapes), num_dense))
if len(dense_types) != num_dense:
raise ValueError("len(dense_types) != len(num_dense): %d vs. %d"
% (len(dense_types), num_dense))
if len(sparse_types) != num_sparse:
raise ValueError("len(sparse_types) != len(sparse_keys): %d vs. %d"
% (len(sparse_types), num_sparse))
if num_dense + num_sparse == 0:
raise ValueError("Must provide at least one sparse key or dense key")
if not set(dense_keys).isdisjoint(set(sparse_keys)):
raise ValueError(
"Dense and sparse keys must not intersect; intersection: %s" %
set(dense_keys).intersection(set(sparse_keys)))
dense_defaults_vec = []
for i, key in enumerate(dense_keys):
default_value = dense_defaults.get(key)
if default_value is None:
default_value = constant_op.constant([], dtype=dense_types[i])
elif not isinstance(default_value, ops.Tensor):
key_name = "key_" + re.sub("[^A-Za-z0-9_.\\-/]", "_", key)
default_value = ops.convert_to_tensor(
default_value, dtype=dense_types[i], name=key_name)
default_value = array_ops.reshape(default_value, dense_shapes[i])
dense_defaults_vec.append(default_value)
dense_shapes = [tensor_util.MakeTensorShapeProto(shape)
if isinstance(shape, (list, tuple)) else shape
for shape in dense_shapes]
outputs = gen_parsing_ops._parse_example(
serialized=serialized,
names=names,
dense_defaults=dense_defaults_vec,
sparse_keys=sparse_keys,
sparse_types=sparse_types,
dense_keys=dense_keys,
dense_shapes=dense_shapes,
name=name)
(sparse_indices, sparse_values, sparse_shapes, dense_values) = outputs
sparse_tensors = [ops.SparseTensor(ix, val, shape) for (ix, val, shape)
in zip(sparse_indices, sparse_values, sparse_shapes)]
return dict(
zip(sparse_keys + dense_keys, sparse_tensors + dense_values))
def parse_single_example(serialized, # pylint: disable=invalid-name
names=None,
sparse_keys=None,
sparse_types=None,
dense_keys=None,
dense_types=None,
dense_defaults=None,
dense_shapes=None,
name="ParseSingleExample"):
"""Parses a single `Example` proto.
Similar to `parse_example`, except:
For dense tensors, the returned `Tensor` is identical to the output of
`parse_example`, except there is no batch dimension, the output shape is the
same as the shape given in `dense_shape`.
For `SparseTensor`s, the first (batch) column of the indices matrix is removed
(the indices matrix is a column vector), the values vector is unchanged, and
the first (`batch_size`) entry of the shape vector is removed (it is now a
single element vector).
See also `parse_example`.
Args:
serialized: A scalar string Tensor, a single serialized Example.
See `parse_example` documentation for more details.
names: (Optional) A scalar string Tensor, the associated name.
See `parse_example` documentation for more details.
sparse_keys: See `parse_example` documentation for more details.
sparse_types: See `parse_example` documentation for more details.
dense_keys: See `parse_example` documentation for more details.
dense_types: See `parse_example` documentation for more details.
dense_defaults: See `parse_example` documentation for more details.
dense_shapes: See `parse_example` documentation for more details.
name: A name for this operation (optional).
Returns:
A dictionary mapping keys to Tensors and SparseTensors.
Raises:
ValueError: if "scalar" or "names" have known shapes, and are not scalars.
"""
with ops.op_scope([serialized, names], name, "parse_single_example"):
serialized = ops.convert_to_tensor(serialized)
serialized_shape = serialized.get_shape()
if serialized_shape.ndims is not None:
if serialized_shape.ndims != 0:
raise ValueError("Input serialized must be a scalar")
else:
serialized = control_flow_ops.with_dependencies(
[logging_ops.Assert(
math_ops.equal(array_ops.rank(serialized), 0),
["Input serialized must be a scalar"],
name="SerializedIsScalar")],
serialized,
name="SerializedDependencies")
serialized = array_ops.expand_dims(serialized, 0)
if names is not None:
names = ops.convert_to_tensor(names)
names_shape = names.get_shape()
if names_shape.ndims is not None:
if names_shape.ndims != 0:
raise ValueError("Input names must be a scalar")
else:
names = control_flow_ops.with_dependencies(
[logging_ops.Assert(
math_ops.equal(array_ops.rank(names), 0),
["Input names must be a scalar"],
name="NamesIsScalar")],
names,
name="NamesDependencies")
names = array_ops.expand_dims(names, 0)
outputs = parse_example(serialized,
names=names,
sparse_keys=sparse_keys,
sparse_types=sparse_types,
dense_keys=dense_keys,
dense_types=dense_types,
dense_defaults=dense_defaults,
dense_shapes=dense_shapes,
name=name)
if dense_keys is not None:
for d in dense_keys:
outputs[d] = array_ops.squeeze(outputs[d], [0], name="Squeeze_%s" % d)
if sparse_keys is not None:
for s in sparse_keys:
outputs[s] = ops.SparseTensor(
array_ops.slice(outputs[s].indices,
[0, 1], [-1, -1], name="Slice_Indices_%s" % s),
outputs[s].values,
array_ops.slice(outputs[s].shape,
[1], [-1], name="Squeeze_Shape_%s" % s))
return outputs
@ops.RegisterShape("ParseExample")
def _ParseExampleShape(op):
"""Shape function for the ParseExample op."""
input_shape = op.inputs[0].get_shape().with_rank(1)
op.inputs[1].get_shape().with_rank(1) # names
num_sparse = op.get_attr("Nsparse")
num_dense = op.get_attr("Ndense")
dense_shapes = op.get_attr("dense_shapes")
sparse_index_shapes = [
tensor_shape.matrix(None, 2) for _ in range(num_sparse)]
sparse_value_shapes = [tensor_shape.vector(None) for _ in range(num_sparse)]
sparse_shape_shapes = [tensor_shape.vector(2) for _ in range(num_sparse)]
assert num_dense == len(dense_shapes)
dense_shapes = [
input_shape.concatenate(dense_shape)
for dense_shape in dense_shapes]
return (sparse_index_shapes + sparse_value_shapes + sparse_shape_shapes +
dense_shapes)
def parse_single_sequence_example(serialized, # pylint: disable=invalid-name
context_sparse_keys=None,
context_sparse_types=None,
context_dense_keys=None,
context_dense_types=None,
context_dense_defaults=None,
context_dense_shapes=None,
feature_list_dense_keys=None,
feature_list_dense_types=None,
feature_list_dense_shapes=None,
feature_list_dense_defaults=None,
debug_name=None,
name="ParseSingleSequenceExample"):
# pylint: disable=line-too-long
"""Parses a single `SequenceExample` proto.
Parses a single serialized [`SequenceExample`]
(https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/example/example.proto)
proto given in `serialized`.
This op parses a serialize sequence example into a tuple of dictionaries
mapping keys to `Tensor` and `SparseTensor` objects respectively.
The first dictionary contains mappings for keys appearing in
`context_sparse_keys` or `context_dense_keys`, and the second dictionary
contains mappings for keys appearing in `feature_list_dense_keys`.
The `context` keys are associated with a `SequenceExample` as a whole,
independent of time / frame. In contrast, the `feature_list` keys provide
a way to access variable-length data within the `FeatureList` section of the
`SequenceExample` proto. While the shapes of `context` values are fixed
with respect to frame, the frame dimension (the first dimension)
of `feature_list` values may vary from `SequenceExample` to `SequenceExample`
and even between `feature_list` keys within the same `SequenceExample`.
The key `context_dense_keys[j]` is mapped to a `Tensor` of type
`context_dense_types[j]` and of shape `context_dense_shapes[j]`.
`context_dense_defaults` provides defaults for values referenced using
`context_dense_keys`. If a key is not present in this dictionary, the
corresponding context_dense `Feature` is required in `serialized`.
`context_dense_shapes[j]` provides the shape of each context `Feature` entry
referenced by `context_dense_keys[j]`. The number of elements in the
`Feature` corresponding to `context_dense_key[j]` must always have
`np.prod(context_dense_shapes[j])` entries. The returned `Tensor` for
`context_dense_key[j]` has shape `context_dense_shape[j]`.
The key `context_sparse_keys[j]` is mapped to a `SparseTensor` of type
`context_sparse_types[j]`. This `SparseTensor` represents a ragged vector.
Its indices are `[index]`, where `index` is the value's index in the list of
values associated with that feature and example.
The key `feature_list_dense_keys[j]` is mapped to a `Tensor` of type
`feature_list_dense_types[j]` and of shape
`(T,) + feature_list_dense_shapes[j]`, where `T` is the length of the
associated `FeatureList` in the `SequenceExample`.
Note: every key declared in `feature_list_dense_keys` **must** be
provided in the `SequenceExample`'s `FeatureLists`, even if just empty.
Exceptions are allowed by adding the given key to the map
`feature_list_dense_defaults` with value None. Any key with value None
map will be treated as empty (zero length) if not found in the
`FeatureList` map.
`debug_name` may contain a descriptive name for the corresponding serialized
proto. This may be useful for debugging purposes, but it has no effect on the
output. If not `None`, `debug_name` must be a scalar.
Args:
serialized: A scalar (0-D Tensor) of type string, a single binary
serialized `SequenceExample` proto.
context_sparse_keys: A list of string keys in the `SequenceExample`'s
features. The results for these keys will be returned as
`SparseTensor` objects.
context_sparse_types: A list of `DTypes`, the same length as `sparse_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
context_dense_keys: A list of string keys in the examples' features.
The results for these keys will be returned as `Tensor`s
context_dense_types: A list of DTypes, same length as `context_dense_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
context_dense_defaults: A dict mapping string keys to `Tensor`s.
The keys of the dict must match the context_dense_keys of the feature.
context_dense_shapes: A list of tuples, same length as `context_dense_keys`.
The shape of the data for each context_dense feature referenced by
`context_dense_keys`. Required for any input tensors identified by
`context_dense_keys` whose shapes are anything other than `[]` or `[1]`.
feature_list_dense_keys: A list of string keys in the `SequenceExample`'s
features_lists. The results for these keys will be returned as `Tensor`s.
feature_list_dense_types: A list of `DTypes`, same length as
`feature_list_dense_keys`. Only `tf.float32` (`FloatList`),
`tf.int64` (`Int64List`), and `tf.string` (`BytesList`) are supported.
feature_list_dense_shapes: A list of tuples, same length as
`feature_list_dense_keys`. The shape of the data for each
`FeatureList` feature referenced by `feature_list_dense_keys`.
feature_list_dense_defaults: A dict mapping key strings to values.
The only currently allowed value is `None`. Any key appearing
in this dict with value `None` is allowed to be missing from the
`SequenceExample`. If missing, the key is treated as zero-length.
debug_name: A scalar (0-D Tensor) of strings (optional), the name of
the serialized proto.
name: A name for this operation (optional).
Returns:
A tuple of two `dict`s, each mapping keys to `Tensor`s and `SparseTensor`s.
The first dict contains the context key/values.
The second dict contains the feature_list key/values.
Raises:
ValueError: If context_sparse and context_dense key sets intersect,
if input lengths do not match up, or if a value in
feature_list_dense_defaults is not None.
TypeError: if feature_list_dense_defaults is not either None or a dict.
"""
# pylint: enable=line-too-long
with ops.op_scope(
[serialized, debug_name], name, "parse_single_sequence_example"):
context_dense_defaults = (
{} if context_dense_defaults is None else context_dense_defaults)
context_sparse_keys = (
[] if context_sparse_keys is None else context_sparse_keys)
context_sparse_types = (
[] if context_sparse_types is None else context_sparse_types)
context_dense_keys = (
[] if context_dense_keys is None else context_dense_keys)
context_dense_types = (
[] if context_dense_types is None else context_dense_types)
context_dense_shapes = (
[[]] * len(context_dense_keys)
if context_dense_shapes is None else context_dense_shapes)
feature_list_dense_keys = (
[] if feature_list_dense_keys is None else feature_list_dense_keys)
feature_list_dense_types = (
[] if feature_list_dense_types is None else feature_list_dense_types)
feature_list_dense_shapes = (
[[]] * len(feature_list_dense_keys)
if feature_list_dense_shapes is None else feature_list_dense_shapes)
feature_list_dense_defaults = (
dict() if feature_list_dense_defaults is None
else feature_list_dense_defaults)
# Internal
feature_list_dense_missing_assumed_empty = []
num_context_dense = len(context_dense_keys)
num_feature_list_dense = len(feature_list_dense_keys)
num_context_sparse = len(context_sparse_keys)
if len(context_dense_shapes) != num_context_dense:
raise ValueError(
"len(context_dense_shapes) != len(context_dense_keys): %d vs. %d"
% (len(context_dense_shapes), num_context_dense))
if len(context_dense_types) != num_context_dense:
raise ValueError(
"len(context_dense_types) != len(num_context_dense): %d vs. %d"
% (len(context_dense_types), num_context_dense))
if len(feature_list_dense_shapes) != num_feature_list_dense:
raise ValueError(
"len(feature_list_dense_shapes) != len(feature_list_dense_keys): "
"%d vs. %d" % (len(feature_list_dense_shapes),
num_feature_list_dense))
if len(feature_list_dense_types) != num_feature_list_dense:
raise ValueError(
"len(feature_list_dense_types) != len(num_feature_list_dense):"
"%d vs. %d" % (len(feature_list_dense_types), num_feature_list_dense))
if len(context_sparse_types) != num_context_sparse:
raise ValueError(
"len(context_sparse_types) != len(context_sparse_keys): %d vs. %d"
% (len(context_sparse_types), num_context_sparse))
if num_context_dense + num_context_sparse + num_feature_list_dense == 0:
raise ValueError(
"Must provide at least one context_sparse key, context_dense key, "
"or feature_list_dense key")
if not set(context_dense_keys).isdisjoint(set(context_sparse_keys)):
raise ValueError(
"Context_Dense and context_sparse keys must not intersect; "
"intersection: %s" %
set(context_dense_keys).intersection(set(context_sparse_keys)))
if not isinstance(feature_list_dense_defaults, dict):
raise TypeError("feature_list_dense_defaults must be a dict")
for k, v in feature_list_dense_defaults.items():
if v is not None:
raise ValueError("Value feature_list_dense_defaults[%s] must be None"
% k)
feature_list_dense_missing_assumed_empty.append(k)
context_dense_defaults_vec = []
for i, key in enumerate(context_dense_keys):
default_value = context_dense_defaults.get(key)
if default_value is None:
default_value = constant_op.constant([], dtype=context_dense_types[i])
elif not isinstance(default_value, ops.Tensor):
key_name = "key_" + re.sub("[^A-Za-z0-9_.\\-/]", "_", key)
default_value = ops.convert_to_tensor(
default_value, dtype=context_dense_types[i], name=key_name)
default_value = array_ops.reshape(
default_value, context_dense_shapes[i])
context_dense_defaults_vec.append(default_value)
context_dense_shapes = [tensor_util.MakeTensorShapeProto(shape)
if isinstance(shape, (list, tuple)) else shape
for shape in context_dense_shapes]
feature_list_dense_shapes = [tensor_util.MakeTensorShapeProto(shape)
if isinstance(shape, (list, tuple)) else shape
for shape in feature_list_dense_shapes]
outputs = gen_parsing_ops._parse_single_sequence_example(
serialized=serialized,
debug_name=debug_name,
context_dense_defaults=context_dense_defaults_vec,
context_sparse_keys=context_sparse_keys,
context_sparse_types=context_sparse_types,
context_dense_keys=context_dense_keys,
context_dense_shapes=context_dense_shapes,
feature_list_dense_keys=feature_list_dense_keys,
feature_list_dense_types=feature_list_dense_types,
feature_list_dense_shapes=feature_list_dense_shapes,
feature_list_dense_missing_assumed_empty=(
feature_list_dense_missing_assumed_empty),
name=name)
(context_sparse_indices, context_sparse_values,
context_sparse_shapes, context_dense_values,
feature_list_dense_values) = outputs
context_sparse_tensors = [
ops.SparseTensor(ix, val, shape) for (ix, val, shape)
in zip(context_sparse_indices,
context_sparse_values,
context_sparse_shapes)]
context_output = dict(
zip(context_sparse_keys + context_dense_keys,
context_sparse_tensors + context_dense_values))
feature_list_output = dict(
zip(feature_list_dense_keys,
feature_list_dense_values))
return (context_output, feature_list_output)
@ops.RegisterShape("ParseSingleSequenceExample")
def _ParseSingleSequenceExampleShape(op):
"""Shape function for the ParseExample op."""
op.inputs[0].get_shape().with_rank(0) # input
op.inputs[-1].get_shape().with_rank(0) # debug_name
# feature_list_dense_missing_assumed_empty
op.inputs[1].get_shape().with_rank(1)
num_context_sparse = op.get_attr("Ncontext_sparse")
num_context_dense = op.get_attr("Ncontext_dense")
num_feature_list_dense = op.get_attr("Nfeature_list_dense")
context_dense_shapes = op.get_attr("context_dense_shapes")
feature_list_dense_shapes = op.get_attr("feature_list_dense_shapes")
context_sparse_index_shapes = [
tensor_shape.matrix(None, 1) for _ in range(num_context_sparse)]
context_sparse_value_shapes = [
tensor_shape.vector(None) for _ in range(num_context_sparse)]
context_sparse_shape_shapes = [
tensor_shape.vector(1) for _ in range(num_context_sparse)]
context_dense_shapes = [
tensor_shape.TensorShape(dense_shape)
for dense_shape in context_dense_shapes]
feature_list_dense_shapes = [
tensor_shape.vector(None).concatenate(dense_shape)
for dense_shape in feature_list_dense_shapes]
assert num_context_dense == len(context_dense_shapes)
assert num_feature_list_dense == len(feature_list_dense_shapes)
return (context_sparse_index_shapes + context_sparse_value_shapes +
context_sparse_shape_shapes + context_dense_shapes +
feature_list_dense_shapes)
ops.RegisterShape("StringToNumber")(
common_shapes.unchanged_shape)
@ops.RegisterShape("DecodeRaw")
def _DecodeRawShape(op):
"""Shape function for the DecodeRaw op."""
# NOTE(mrry): Last dimension is data-dependent.
return [op.inputs[0].get_shape().concatenate([None])]
@ops.RegisterShape("DecodeCSV")
def _DecodeCSVShape(op):
"""Shape function for the DecodeCSV op."""
input_shape = op.inputs[0].get_shape()
# Optionally check that all of other inputs are scalar or empty.
for default_input in op.inputs[1:]:
default_input_shape = default_input.get_shape().with_rank(1)
if default_input_shape[0] > 1:
raise ValueError(
"Shape of a default must be a length-0 or length-1 vector.")
return [input_shape] * len(op.outputs)
``` |
{
"source": "jongtaeklee/hsn_v1",
"score": 2
} |
#### File: hsn_v1/paip/xml2mask.py
```python
import numpy as np
import xml.etree.ElementTree as et
import os, glob, re
from tqdm import tqdm
import tifffile, cv2
import openslide
home_dir = 'D:/Data/Pathology/PAIP 2020/xml2mask_test'
wsi_load_dir = os.path.join(home_dir, 'wsi_folder')
xml_load_dir = os.path.join(home_dir, 'xml_folder')
wsi_fns = sorted(glob.glob(wsi_load_dir + '*.svs') + glob.glob(wsi_load_dir + '*.SVS'))
xml_fns = sorted(glob.glob(xml_load_dir + '*.xml') + glob.glob(xml_load_dir + '*.XML'))
level = 2
div = 4**level ## Level0 scale to Level2 scale
assert len(wsi_fns) == len(xml_fns) == 1 ## the number of training_data WSI pool
save_dir = f'./mask_img_l{level}/'
os.makedirs(save_dir, exist_ok=True)
q = re.compile('training_data_[0-9]{2}')
'''
Annotations (root)
> Annotation (get 'Id' -> 1: tumor area)
> Regions
> Region (get 'NegativeROA' -> 0: positive area // 1: inner negative area)
> Vertices
> Vertex (get 'X', 'Y')
'''
def xml2mask(xml_fn, shape):
# print('reconstructing sparse xml to contours of div={}..'.format(div))
ret = dict()
board_pos = None
board_neg = None
# Annotations >>
e = et.parse(xml_fn).getroot()
e = e.findall('Annotation')
assert(len(e) == 1), len(e)
for ann in e:
board_pos = np.zeros(shape[:2], dtype=np.uint8)
board_neg = np.zeros(shape[:2], dtype=np.uint8)
id_num = int(ann.get('Id'))
assert(id_num == 1)# or id_num == 2)
regions = ann.findall('Regions')
assert(len(regions) == 1)
rs = regions[0].findall('Region')
plistlist = list()
nlistlist = list()
#print('rs:', len(rs))
for i, r in enumerate(rs):
ylist = list()
xlist = list()
plist, nlist = list(), list()
negative_flag = int(r.get('NegativeROA'))
assert negative_flag == 0 or negative_flag == 1
negative_flag = bool(negative_flag)
vs = r.findall('Vertices')[0]
vs = vs.findall('Vertex')
vs.append(vs[0]) # last dot should be linked to the first dot
for v in vs:
y, x = int(v.get('Y').split('.')[0]), int(v.get('X').split('.')[0])
if div is not None:
y //= div
x //= div
if y >= shape[0]:
y = shape[0]-1
elif y < 0:
y = 0
if x >= shape[1]:
x = shape[1]-1
elif x < 0:
x = 0
ylist.append(y)
xlist.append(x)
if negative_flag:
nlist.append((x, y))
else:
plist.append((x, y))
if plist:
plistlist.append(plist)
else:
nlistlist.append(nlist)
for plist in plistlist:
board_pos = cv2.drawContours(board_pos, [np.array(plist, dtype=np.int32)], -1, [255, 0, 0], -1)
for nlist in nlistlist:
board_neg = cv2.drawContours(board_neg, [np.array(nlist, dtype=np.int32)], -1, [255, 0, 0], -1)
ret[id_num] = (board_pos>0) * (board_neg==0)
return ret
def save_mask(xml_fn, shape):
wsi_id = q.findall(xml_fn)[0]
save_fn = save_dir + f'{wsi_id}_l{level}_annotation_tumor.tif'
ret = xml2mask(xml_fn, shape)
tifffile.imsave(save_fn, (ret[1]>0).astype(np.uint8)*255, compress=9)
def load_svs_shape(fn, level=2):
imgh = openslide.OpenSlide(fn)
return [imgh.level_dimensions[level][1], imgh.level_dimensions[level][0]]
if __name__ == '__main__':
for wsi_fn, xml_fn in tqdm(zip(wsi_fns, xml_fns), total=len(wsi_fns)):
wsi_id = q.findall(wsi_fn)[0]
xml_id = q.findall(xml_fn)[0]
assert wsi_id == xml_id
shape = load_svs_shape(wsi_fn, level=level)
save_mask(xml_fn, shape)
``` |
{
"source": "jongtaeklho/swpp-hw1-jongtaeklho",
"score": 4
} |
#### File: jongtaeklho/swpp-hw1-jongtaeklho/babyname_parser.py
```python
import sys
import re
import os
from functools import wraps
"""Baby Names exercise
Implement the babyname parser class that parses the popular names and their ranks from a html file.
1) At first, you need to implement a decorator that checks whether the html file exists or not.
2) Also, the parser should extract tuples of (rank, male-name, female-name) from the file by using regex.
For writing regex, it's nice to include a copy of the target text for inspiration.
3) Finally, you need to implement `parse` method in `BabynameParser` class that parses the extracted tuples
with the given lambda and return a list of processed results.
"""
class BabynameFileNotFoundException(Exception):
"""
A custom exception for the cases that the babyname file does not exist.
"""
pass
def check_filename_existence(func):
@wraps(func)
def wrapper(*args,**kwargs):
try:
return func(*args,**kwargs)
except FileNotFoundError as pathname :
raise BabynameFileNotFoundException("No such file: {}".format(pathname.filename))
return wrapper
"""
(1 point)
A decorator that catches the non-exiting filename argument and raises a custom `BabynameFileNotFoundException`.
Args:
func: The function to decorate.
Raises:
BabynameFileNotFoundException: if there is no such file while func tries to open a file.
We assume func receives directory path and year to generate a filename to open.
"""
# TODO: Implement this decorator
class BabynameParser:
@check_filename_existence
def __init__(self, dirname, year):
"""
(3 points)
Given directory path and year, extracts the name of a file to open the corresponding file
and a list of the (rank, male-name, female-name) tuples from the file read by using regex.
[('1', 'Michael', 'Jessica'), ('2', 'Christopher', 'Ashley'), ....]
Args:
dirname: The name of the directory where baby name html files are stored
year: The year number. int.
"""
pathname = os.path.join(dirname, "{}.html".format(year))
f=open(pathname,'r')
text=f.read()
self.year=year
regex=re.compile("<td>\w{1,60}</td>")
res=regex.findall(text)
mylist=[(res[0][4:-5],res[1][4:-5],res[2][4:-5])]
i=3
while i <= (len(res)-3):
firs=res[i][4:-5]
secon=res[i+1][4:-5]
thir=res[i+2][4:-5]
mylist.append((firs,secon,thir))
i+=3
self.rank_to_names_tuples = mylist
def parse(self, parsing_lambda):
answer=[]
for i in self.rank_to_names_tuples :
answer.append(parsing_lambda(i))
return answer
"""
(2 points)
Collects a list of babynames parsed from the (rank, male-name, female-name) tuples.
The list must contains all results processed with the given lambda.
Args:
parsing_lambda: The parsing lambda.
It must process an single (string, string, string) tuple and return something.
Returns:
A list of lambda function's output
"""
# TODO: Implement this method
``` |
{
"source": "JONGWE1/BankManagement",
"score": 2
} |
#### File: app/imgpatient/views.py
```python
from flask import render_template, redirect, request, url_for, flash, make_response
from .import imgpatient
from .form import ImgpCheckinForm, ImgpRecipeForm
from ..model import Medicine, Price, UserInfo, ImgDoctorTimetable, ImgpCheckin, ImgpCheckinAfford, ImgpRecipe, ImgpRecipeAfford, ImgpCost
from ..decorator import is_login, isauth
from .. import db
import datetime
@imgpatient.route('/imgpatient/checkin', methods= ['GET', 'POST'])
@is_login
@isauth
def checkin(name, auth):
patientcheckin = ImgpCheckin()
form = ImgpCheckinForm()
price = ImgpCheckinAfford()
if request.method == 'GET':
return render_template('imgpatient/checkin.html', form= form, name= name, auth=auth)
else:
if form.validate_on_submit():
response = make_response(redirect(url_for('imgpatient.imgpindex')))
prepatient = ImgpCheckin.query.order_by(ImgpCheckin.imgpcheckinid.desc()).first()
patientcheckin.imgpcheckinid = prepatient.imgpcheckinid + 1
nowpcheckid = patientcheckin.imgpcheckinid
response.set_cookie('img', str(nowpcheckid))
patientcheckin.patientid = form.patientid.data
patientcheckin.doctorid = form.doctorname.data
docid = UserInfo.query.filter_by(id= form.doctorname.data).first()
patientcheckin.doctortype = docid.rank
db.session.add(patientcheckin)
db.session.commit()
price.imgpcheckinid = patientcheckin.imgpcheckinid
price.imgpid = form.patientid.data
priceinfo = Price.query.filter_by(optionid= docid.rank).first()
price.price = priceinfo.price
db.session.add(price)
db.session.commit()
return response
@imgpatient.route('/imgpatient/imgpindex', methods= ['GET', 'POST'])
@is_login
@isauth
def imgpindex(name, auth):
nowpcheckid = int(request.cookies.get('img'))
if request.method == 'GET':
# nowpcheckid = request.cookies.get('img')
# print('2', type(int(nowpcheckid)), int(nowpcheckid))
return render_template('imgpatient/imgpindex.html', name= name, auth=auth)
@imgpatient.route('/imgpatient/recipe', methods= ['GET', 'POST'])
@is_login
@isauth
def recipe(name, auth):
patientrecipe=ImgpRecipe()
form = ImgpRecipeForm()
nowpcheckid = request.cookies.get('img')
if request.method == 'GET':
return render_template('/imgpatient/medicine.html', form= form, name= name, auth=auth)
else:
if form.validate_on_submit():
patientrecipe.imgpcheckinid = int(nowpcheckid)
patientrecipe.imgpid = form.imgpid.data
patientrecipe.medicinenames = ','.join(form.medicines.data)
db.session.add(patientrecipe)
db.session.commit()
return redirect(url_for('imgpatient.recipenum', name= name))
@imgpatient.route('/imgpatient/recipenum', methods= ['GET', 'POST'])
@is_login
@isauth
def recipenum(name, auth):
nowpcheckid = request.cookies.get('img')
price = ImgpRecipeAfford()
if request.method == 'GET':
patientcheckinid = int(nowpcheckid)
selectedinfo = ImgpRecipe.query.filter_by(imgpcheckinid= patientcheckinid).first()
medicinenames = selectedinfo.medicinenames
medslist = medicinenames.split(',')
medsnlist = []
for item in medslist:
med = Medicine.query.filter_by(id= item).first()
medname = med.medicinename
medsnlist.append(medname)
return render_template('imgpatient/recipenum.html', medsnlist= medsnlist, name= name, auth=auth)
else:
patientcheckinid = int(nowpcheckid)
imgprecipe = ImgpRecipe.query.filter(ImgpRecipe.imgpcheckinid == patientcheckinid).first()
mednumbers = []
d = request.values.to_dict()
for number in d.keys():
mednumbers.append(d.get(number))
imgprecipe.medicinenumbers = ','.join(mednumbers)
db.session.commit()
price.imgpcheckinid = patientcheckinid
imgpreinfo = ImgpRecipe.query.filter_by(imgpcheckinid= patientcheckinid).first()
price.imgpid = imgpreinfo.imgpid
recipeinfo = ImgpRecipe.query.filter_by(imgpcheckinid= patientcheckinid).first()
recipemdname = recipeinfo.medicinenames
recipemdnamel = recipemdname.split(',')
recipenum = recipeinfo.medicinenumbers
recipenuml = recipenum.split(',')
count = 0
zipinfo = zip(recipemdnamel, recipenuml)
for item in zipinfo:
medinfo = Price.query.filter_by(optionid= int(item[0])).first()
count = count + medinfo.price * int(item[1])
# for item in recipemdnamel:
# medinfo = Price.query.filter_by(optionid= int(item)).first()
# count = count + medinfo.price *
price.price = count
db.session.add(price)
db.session.commit()
flash('处方已经上传完成')
return redirect(url_for('imgpatient.imgpindex', name= name))
@imgpatient.route('/imgpatient/cost', methods= ['GET', 'POST'])
@is_login
@isauth
def cost(name, auth):
cost = ImgpCost()
nowpcheckid = int(request.cookies.get('img'))
if request.method == 'GET':
imgpcheckininfo = ImgpCheckinAfford.query.filter_by(imgpcheckinid= nowpcheckid).first()
imgprecipeinfo = ImgpRecipeAfford.query.filter_by(imgpcheckinid= nowpcheckid).first()
ociprice = imgpcheckininfo.price
orprice = imgprecipeinfo.price
cost.imgpcheckinid = nowpcheckid
cost.price = ociprice + orprice
price = cost.price
db.session.add(cost)
db.session.commit()
return render_template('imgpatient/cost.html', price= price, name= name, auth= auth)
```
#### File: JONGWE1/BankManagement/manage.py
```python
import os
from app import create_app, db
from app.model import UserGroup, UserInfo, HospitalConstuct, HospitalClass, PatientInfo, DoctorTimetable, \
ImgDoctorTimetable, ExpertsTimetable, Medicine, CheckClass, CheckItem, ExamClass, ExamItem, InhospitalArea, BedInfo, \
Price, FamilyDoctorArea, FamilyDoctor, FamilyDoctorWorkArea, FamilyPatientInfo, FamilyPatientTestResult, \
SpecialConcern, LecturePlace, LectureTime, OpCheckin, OpExam, OpCheck, OpRecipe, OpCheckinAfford, OpExamAfford, \
OpCheckAfford, OpRecipeAfford, OpCost, InPatientDeposit, InPatientTableSet, InPatientTimeAndBed, InPatientCheck, \
InPatientInspect, InPatientPrescript, ImgpCheckin, ImgpRecipe, ImgpCheckinAfford, ImgpRecipeAfford, ImgpCost
from flask_script import Manager, Shell
from flask_migrate import Migrate, MigrateCommand
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
# def make_shell_context():
manager.add_command('db', MigrateCommand)
# @manager.command
# def test():
# """Run the unit tests"""
# import unittest
# test = unittest.TestLoader().discover('tests')
# unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == '__main__':
# 交互环境用
manager.run()
# app.run()
# 运行程序用
``` |
{
"source": "jongwon-jay-lee/ko_lm_dataformat",
"score": 2
} |
#### File: ko_lm_dataformat/ko_lm_dataformat/reader.py
```python
import gzip
import io
import logging
import multiprocessing as mp
from zipfile import ZipFile
import jsonlines
import ujson as json
import zstandard
from .utils import handle_jsonl, listdir_or_file, tarfile_reader
logger = logging.getLogger(__name__)
class Reader:
def __init__(self, in_path: str):
"""
Read data which is archive with ko_lm_dataformat
Args:
in_path (str): Input directory path
"""
self.in_path = in_path
def stream_data(self, get_meta=False, autojoin_sentences=False, sent_joiner=" ", threaded=False):
if not threaded:
yield from self._stream_data(
get_meta=get_meta, autojoin_sentences=autojoin_sentences, sent_joiner=sent_joiner
)
return
q = mp.Queue(1000)
p = mp.Process(target=self._stream_data_threaded, args=(q, get_meta, autojoin_sentences, sent_joiner))
p.start()
while p.is_alive():
res = q.get()
if res is None:
break
yield res
def _stream_data_threaded(self, q, get_meta=False, autojoin_sentences=False, sent_joiner=" "):
for data in self._stream_data(get_meta, autojoin_sentences, sent_joiner):
q.put(data)
q.put(None)
def _stream_data(self, get_meta=False, autojoin_sentences=False, sent_joiner=" ", jsonl_key="text"):
"""
- Support format: jsonl.zst, json, dat, txt, zip, tar.gz
Args:
get_meta (bool, optional): Whether to get meta data. Only jsonl file has metadata. Defaults to False.
jsonl_key (str, optional): Key name for text. Defaults to "text".
Yields:
if get_meta:
text: str
else:
(text: str, meta: dict)
"""
self.f_name = ""
for f in listdir_or_file(self.in_path):
self.f_name = f
if f.endswith(".jsonl.zst"):
yield from self.read_jsonl(
f, get_meta=get_meta, autojoin_sentences=autojoin_sentences, sent_joiner=sent_joiner, key=jsonl_key
)
elif f.endswith(".dat.zst"):
assert not get_meta
yield from self.read_dat(f)
elif f.endswith(".jsonl.zst.tar"):
yield from self.read_jsonl_tar(
f, get_meta=get_meta, autojoin_sentences=autojoin_sentences, sent_joiner=sent_joiner, key=jsonl_key
)
elif f.endswith(".json.zst"):
assert not get_meta
yield from self.read_json(f)
elif f.endswith(".txt"):
assert not get_meta
yield from self.read_txt(f)
elif f.endswith(".zip"):
assert not get_meta
yield from self.read_zip(f)
elif f.endswith(".tar.gz"):
assert not get_meta
yield from self.read_tgz(f)
elif f.endswith(".json.gz"):
assert not get_meta
yield from self.read_jsongz(f)
elif f.endswith(".gz"):
assert not get_meta
yield from self.read_gz(f)
else:
logger.info(f"Skipping {f} as streaming for that filetype is not implemented")
def read_txt(self, file):
with open(file, "r", encoding="utf-8") as fh:
yield fh.read()
def read_zip(self, file):
archive = ZipFile(file, "r")
for f in archive.namelist():
yield archive.read(f).decode("UTF-8")
def read_tgz(self, file):
gz = gzip.open(file)
yield from (x.decode("utf-8") for x in tarfile_reader(gz, streaming=False))
def read_gz(self, file):
with gzip.open(file, "rb") as f:
for line in f:
yield line.decode("utf-8")
def read_jsongz(self, file):
for line in self.read_gz(file):
yield json.loads(line)
def read_json(self, file):
with open(file, "rb") as fh:
cctx = zstandard.ZstdDecompressor()
reader = cctx.stream_reader(fh)
ob = json.load(reader)
yield from ob
def read_dat(self, file):
with open(file, "rb") as fh:
cctx = zstandard.ZstdDecompressor()
reader = cctx.stream_reader(fh)
while True:
ln = reader.read(16).decode("UTF-8")
if not ln:
break
ln = int(ln)
yield reader.read(ln).decode("UTF-8")
def read_jsonl(
self,
file_path: str,
get_meta: bool = False,
autojoin_sentences: bool = True,
sent_joiner: str = " ",
key: str = "text",
):
"""
Read Jsonl data.
Args:
file_path (str): input file path
get_meta (bool, optional): return metadata. Defaults to False.
autojoin_sentences (bool, optional): Join sentences if data consists of multiple texts (=paragraph). Defaults to True.
sent_joiner (str, optional): Seperator for joining multiple sentences. Defaults to "\n\n".
key (str, optional): Json key name for text. Defaults to "text".
"""
with open(file_path, "rb") as fh:
cctx = zstandard.ZstdDecompressor()
reader = io.BufferedReader(cctx.stream_reader(fh))
rdr = jsonlines.Reader(reader)
yield from handle_jsonl(rdr, get_meta, autojoin_sentences, sent_joiner, key)
def read_jsonl_tar(
self,
file_path,
get_meta=False,
autojoin_sentences: bool = True,
sent_joiner: str = " ",
key="text",
):
with open(file_path, "rb") as fh:
for f in tarfile_reader(fh, streaming=True):
cctx = zstandard.ZstdDecompressor()
reader = io.BufferedReader(cctx.stream_reader(f))
rdr = jsonlines.Reader(reader)
yield from handle_jsonl(rdr, get_meta, autojoin_sentences, sent_joiner, key)
f.close()
``` |
{
"source": "jongwony/console-calendar",
"score": 2
} |
#### File: jongwony/console-calendar/googleoauth.py
```python
import re
import os
import pickle
from datetime import datetime
from googleapiclient.discovery import build
from google.auth.transport.requests import Request
from google_auth_oauthlib.flow import InstalledAppFlow
from .inputconfig import date_triming
from .util import script_path
class GoogleCal:
def __init__(self):
self.creds = None
self.calendar = None
self.scopes = ['https://www.googleapis.com/auth/calendar']
if os.path.exists(script_path('token.pickle')):
with open(script_path('token.pickle'), 'rb') as token:
self.creds = pickle.load(token)
if not self.creds or not self.creds.valid:
if self.creds and self.creds.expired and self.creds.refresh_token:
self.creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
script_path('credentials.json'), self.scopes)
self.creds = flow.run_local_server()
with open(script_path('token.pickle'), 'wb') as token:
pickle.dump(self.creds, token)
def build_calendar(self):
self.calendar = build('calendar', 'v3', credentials=self.creds)
return self.calendar
def insert_event(self, event):
return self.calendar.events().insert(calendarId='primary',
body=event).execute()
def quick_event(self, query):
return self.calendar.events().quickAdd(calendarId='primary',
text=query).execute()
def calendar_list_all(self):
page_token = None
while True:
events = self.calendar.events().list(calendarId='primary',
pageToken=page_token).execute()
for event in events['items']:
if event['status'] == 'confirmed':
date, = {'dateTime', 'date'}.intersection(
set(event['start']))
date_trim = re.sub(r'(.*)T(\d+):(\d+)(.*)', r'\1 \2:\3',
event['start'][date])
element = '{:<16} {} {}'.format(date_trim, event['summary'],
event.get('reminders'))
print(element)
page_token = events.get('nextPageToken')
if not page_token:
break
def calendar_lists(self):
start = input('From: ')
end = input('To: ')
start_dt = date_triming(start)
end_dt = date_triming(end)
assert start_dt < end_dt, "Keep order!"
items = list()
page_token = None
while True:
events = self.calendar.events().list(calendarId='primary',
pageToken=page_token).execute()
for event in events['items']:
if 'dateTime' in event['start']:
date = datetime.strptime(event['start']['dateTime'],
'%Y-%m-%dT%H:%M:%S+09:00')
if 'date' in event['start']:
date = datetime.strptime(event['start']['date'], '%Y-%m-%d')
items.append((date, EventItems(event)))
page_token = events.get('nextPageToken')
if not page_token:
s = sorted(items, key=lambda t: t[0])
for k, e in s:
if start_dt < k < end_dt:
print('{} {}'.format(
datetime.strftime(k, '%Y-%m-%d %H:%M'), e.summary))
break
class EventItems:
def __init__(self, d):
self.__dict__ = d
``` |
{
"source": "jongwoo6852/models",
"score": 2
} |
#### File: nlp/configs/encoders.py
```python
from typing import Optional
import dataclasses
import gin
import tensorflow as tf
from official.modeling import hyperparams
from official.modeling import tf_utils
from official.nlp.modeling import layers
from official.nlp.modeling import networks
@dataclasses.dataclass
class BertEncoderConfig(hyperparams.Config):
"""BERT encoder configuration."""
vocab_size: int = 30522
hidden_size: int = 768
num_layers: int = 12
num_attention_heads: int = 12
hidden_activation: str = "gelu"
intermediate_size: int = 3072
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
max_position_embeddings: int = 512
type_vocab_size: int = 2
initializer_range: float = 0.02
embedding_size: Optional[int] = None
output_range: Optional[int] = None
return_all_encoder_outputs: bool = False
@dataclasses.dataclass
class MobileBertEncoderConfig(hyperparams.Config):
"""MobileBERT encoder configuration.
Attributes:
word_vocab_size: number of words in the vocabulary.
word_embed_size: word embedding size.
type_vocab_size: number of word types.
max_sequence_length: maximum length of input sequence.
num_blocks: number of transformer block in the encoder model.
hidden_size: the hidden size for the transformer block.
num_attention_heads: number of attention heads in the transformer block.
intermediate_size: the size of the "intermediate" (a.k.a., feed forward)
layer.
hidden_activation: the non-linear activation function to apply to the
output of the intermediate/feed-forward layer.
hidden_dropout_prob: dropout probability for the hidden layers.
attention_probs_dropout_prob: dropout probability of the attention
probabilities.
intra_bottleneck_size: the size of bottleneck.
initializer_range: The stddev of the truncated_normal_initializer for
initializing all weight matrices.
use_bottleneck_attention: Use attention inputs from the bottleneck
transformation. If true, the following `key_query_shared_bottleneck`
will be ignored.
key_query_shared_bottleneck: whether to share linear transformation for keys
and queries.
num_feedforward_networks: number of stacked feed-forward networks.
normalization_type: the type of normalization_type, only 'no_norm' and
'layer_norm' are supported. 'no_norm' represents the element-wise linear
transformation for the student model, as suggested by the original
MobileBERT paper. 'layer_norm' is used for the teacher model.
classifier_activation: if using the tanh activation for the final
representation of the [CLS] token in fine-tuning.
"""
word_vocab_size: int = 30522
word_embed_size: int = 128
type_vocab_size: int = 2
max_sequence_length: int = 512
num_blocks: int = 24
hidden_size: int = 512
num_attention_heads: int = 4
intermediate_size: int = 4096
hidden_activation: str = "gelu"
hidden_dropout_prob: float = 0.1
attention_probs_dropout_prob: float = 0.1
intra_bottleneck_size: int = 1024
initializer_range: float = 0.02
use_bottleneck_attention: bool = False
key_query_shared_bottleneck: bool = False
num_feedforward_networks: int = 1
normalization_type: str = "layer_norm"
classifier_activation: bool = True
input_mask_dtype: str = "int32"
@dataclasses.dataclass
class AlbertEncoderConfig(hyperparams.Config):
"""ALBERT encoder configuration."""
vocab_size: int = 30000
embedding_width: int = 128
hidden_size: int = 768
num_layers: int = 12
num_attention_heads: int = 12
hidden_activation: str = "gelu"
intermediate_size: int = 3072
dropout_rate: float = 0.0
attention_dropout_rate: float = 0.0
max_position_embeddings: int = 512
type_vocab_size: int = 2
initializer_range: float = 0.02
@dataclasses.dataclass
class BigBirdEncoderConfig(hyperparams.Config):
"""BigBird encoder configuration."""
vocab_size: int = 50358
hidden_size: int = 768
num_layers: int = 12
num_attention_heads: int = 12
hidden_activation: str = "gelu"
intermediate_size: int = 3072
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
max_position_embeddings: int = 4096
num_rand_blocks: int = 3
block_size: int = 64
type_vocab_size: int = 16
initializer_range: float = 0.02
embedding_width: Optional[int] = None
use_gradient_checkpointing: bool = False
@dataclasses.dataclass
class XLNetEncoderConfig(hyperparams.Config):
"""XLNet encoder configuration."""
vocab_size: int = 32000
num_layers: int = 24
hidden_size: int = 1024
num_attention_heads: int = 16
head_size: int = 64
inner_size: int = 4096
inner_activation: str = "gelu"
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
attention_type: str = "bi"
bi_data: bool = False
tie_attention_biases: bool = False
memory_length: int = 0
same_length: bool = False
clamp_length: int = -1
reuse_length: int = 0
use_cls_mask: bool = False
embedding_width: int = 1024
initializer_range: float = 0.02
two_stream: bool = False
@dataclasses.dataclass
class EncoderConfig(hyperparams.OneOfConfig):
"""Encoder configuration."""
type: Optional[str] = "bert"
albert: AlbertEncoderConfig = AlbertEncoderConfig()
bert: BertEncoderConfig = BertEncoderConfig()
bigbird: BigBirdEncoderConfig = BigBirdEncoderConfig()
mobilebert: MobileBertEncoderConfig = MobileBertEncoderConfig()
xlnet: XLNetEncoderConfig = XLNetEncoderConfig()
@gin.configurable
def build_encoder(config: EncoderConfig,
embedding_layer: Optional[tf.keras.layers.Layer] = None,
encoder_cls=None,
bypass_config: bool = False):
"""Instantiate a Transformer encoder network from EncoderConfig.
Args:
config: the one-of encoder config, which provides encoder parameters of a
chosen encoder.
embedding_layer: an external embedding layer passed to the encoder.
encoder_cls: an external encoder cls not included in the supported encoders,
usually used by gin.configurable.
bypass_config: whether to ignore config instance to create the object with
`encoder_cls`.
Returns:
An encoder instance.
"""
if bypass_config:
return encoder_cls()
encoder_type = config.type
encoder_cfg = config.get()
if encoder_cls and encoder_cls.__name__ == "EncoderScaffold":
embedding_cfg = dict(
vocab_size=encoder_cfg.vocab_size,
type_vocab_size=encoder_cfg.type_vocab_size,
hidden_size=encoder_cfg.hidden_size,
max_seq_length=encoder_cfg.max_position_embeddings,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
dropout_rate=encoder_cfg.dropout_rate,
)
hidden_cfg = dict(
num_attention_heads=encoder_cfg.num_attention_heads,
intermediate_size=encoder_cfg.intermediate_size,
intermediate_activation=tf_utils.get_activation(
encoder_cfg.hidden_activation),
dropout_rate=encoder_cfg.dropout_rate,
attention_dropout_rate=encoder_cfg.attention_dropout_rate,
kernel_initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
)
kwargs = dict(
embedding_cfg=embedding_cfg,
hidden_cfg=hidden_cfg,
num_hidden_instances=encoder_cfg.num_layers,
pooled_output_dim=encoder_cfg.hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
return_all_layer_outputs=encoder_cfg.return_all_encoder_outputs,
dict_outputs=True)
return encoder_cls(**kwargs)
if encoder_type == "mobilebert":
return networks.MobileBERTEncoder(
word_vocab_size=encoder_cfg.word_vocab_size,
word_embed_size=encoder_cfg.word_embed_size,
type_vocab_size=encoder_cfg.type_vocab_size,
max_sequence_length=encoder_cfg.max_sequence_length,
num_blocks=encoder_cfg.num_blocks,
hidden_size=encoder_cfg.hidden_size,
num_attention_heads=encoder_cfg.num_attention_heads,
intermediate_size=encoder_cfg.intermediate_size,
intermediate_act_fn=encoder_cfg.hidden_activation,
hidden_dropout_prob=encoder_cfg.hidden_dropout_prob,
attention_probs_dropout_prob=encoder_cfg.attention_probs_dropout_prob,
intra_bottleneck_size=encoder_cfg.intra_bottleneck_size,
initializer_range=encoder_cfg.initializer_range,
use_bottleneck_attention=encoder_cfg.use_bottleneck_attention,
key_query_shared_bottleneck=encoder_cfg.key_query_shared_bottleneck,
num_feedforward_networks=encoder_cfg.num_feedforward_networks,
normalization_type=encoder_cfg.normalization_type,
classifier_activation=encoder_cfg.classifier_activation,
input_mask_dtype=encoder_cfg.input_mask_dtype)
if encoder_type == "albert":
return networks.AlbertEncoder(
vocab_size=encoder_cfg.vocab_size,
embedding_width=encoder_cfg.embedding_width,
hidden_size=encoder_cfg.hidden_size,
num_layers=encoder_cfg.num_layers,
num_attention_heads=encoder_cfg.num_attention_heads,
max_sequence_length=encoder_cfg.max_position_embeddings,
type_vocab_size=encoder_cfg.type_vocab_size,
intermediate_size=encoder_cfg.intermediate_size,
activation=tf_utils.get_activation(encoder_cfg.hidden_activation),
dropout_rate=encoder_cfg.dropout_rate,
attention_dropout_rate=encoder_cfg.attention_dropout_rate,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
dict_outputs=True)
if encoder_type == "bigbird":
# TODO(frederickliu): Support use_gradient_checkpointing.
if encoder_cfg.use_gradient_checkpointing:
raise ValueError("Gradient checkpointing unsupported at the moment.")
embedding_cfg = dict(
vocab_size=encoder_cfg.vocab_size,
type_vocab_size=encoder_cfg.type_vocab_size,
hidden_size=encoder_cfg.hidden_size,
max_seq_length=encoder_cfg.max_position_embeddings,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
dropout_rate=encoder_cfg.dropout_rate)
attention_cfg = dict(
num_heads=encoder_cfg.num_attention_heads,
key_dim=int(encoder_cfg.hidden_size // encoder_cfg.num_attention_heads),
kernel_initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
max_rand_mask_length=encoder_cfg.max_position_embeddings,
num_rand_blocks=encoder_cfg.num_rand_blocks,
from_block_size=encoder_cfg.block_size,
to_block_size=encoder_cfg.block_size,
)
hidden_cfg = dict(
num_attention_heads=encoder_cfg.num_attention_heads,
intermediate_size=encoder_cfg.intermediate_size,
intermediate_activation=tf_utils.get_activation(
encoder_cfg.hidden_activation),
dropout_rate=encoder_cfg.dropout_rate,
attention_dropout_rate=encoder_cfg.attention_dropout_rate,
kernel_initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
attention_cls=layers.BigBirdAttention,
attention_cfg=attention_cfg)
kwargs = dict(
embedding_cfg=embedding_cfg,
hidden_cls=layers.TransformerScaffold,
hidden_cfg=hidden_cfg,
num_hidden_instances=encoder_cfg.num_layers,
mask_cls=layers.BigBirdMasks,
mask_cfg=dict(block_size=encoder_cfg.block_size),
pooled_output_dim=encoder_cfg.hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
return_all_layer_outputs=False,
dict_outputs=True,
layer_idx_as_attention_seed=True)
return networks.EncoderScaffold(**kwargs)
if encoder_type == "xlnet":
return networks.XLNetBase(
vocab_size=encoder_cfg.vocab_size,
num_layers=encoder_cfg.num_layers,
hidden_size=encoder_cfg.hidden_size,
num_attention_heads=encoder_cfg.num_attention_heads,
head_size=encoder_cfg.head_size,
inner_size=encoder_cfg.inner_size,
dropout_rate=encoder_cfg.dropout_rate,
attention_dropout_rate=encoder_cfg.attention_dropout_rate,
attention_type=encoder_cfg.attention_type,
bi_data=encoder_cfg.bi_data,
two_stream=encoder_cfg.two_stream,
tie_attention_biases=encoder_cfg.tie_attention_biases,
memory_length=encoder_cfg.memory_length,
clamp_length=encoder_cfg.clamp_length,
reuse_length=encoder_cfg.reuse_length,
inner_activation=encoder_cfg.inner_activation,
use_cls_mask=encoder_cfg.use_cls_mask,
embedding_width=encoder_cfg.embedding_width,
initializer=tf.keras.initializers.RandomNormal(
stddev=encoder_cfg.initializer_range))
# Uses the default BERTEncoder configuration schema to create the encoder.
# If it does not match, please add a switch branch by the encoder type.
return networks.BertEncoder(
vocab_size=encoder_cfg.vocab_size,
hidden_size=encoder_cfg.hidden_size,
num_layers=encoder_cfg.num_layers,
num_attention_heads=encoder_cfg.num_attention_heads,
intermediate_size=encoder_cfg.intermediate_size,
activation=tf_utils.get_activation(encoder_cfg.hidden_activation),
dropout_rate=encoder_cfg.dropout_rate,
attention_dropout_rate=encoder_cfg.attention_dropout_rate,
max_sequence_length=encoder_cfg.max_position_embeddings,
type_vocab_size=encoder_cfg.type_vocab_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
output_range=encoder_cfg.output_range,
embedding_width=encoder_cfg.embedding_size,
embedding_layer=embedding_layer,
return_all_encoder_outputs=encoder_cfg.return_all_encoder_outputs,
dict_outputs=True)
``` |
{
"source": "jongwook/onsets_and_frames",
"score": 3
} |
#### File: onsets_and_frames/onsets_and_frames/midi.py
```python
import multiprocessing
import sys
import mido
import numpy as np
from joblib import Parallel, delayed
from mido import Message, MidiFile, MidiTrack
from mir_eval.util import hz_to_midi
from tqdm import tqdm
def parse_midi(path):
"""open midi file and return np.array of (onset, offset, note, velocity) rows"""
midi = mido.MidiFile(path)
time = 0
sustain = False
events = []
for message in midi:
time += message.time
if message.type == 'control_change' and message.control == 64 and (message.value >= 64) != sustain:
# sustain pedal state has just changed
sustain = message.value >= 64
event_type = 'sustain_on' if sustain else 'sustain_off'
event = dict(index=len(events), time=time, type=event_type, note=None, velocity=0)
events.append(event)
if 'note' in message.type:
# MIDI offsets can be either 'note_off' events or 'note_on' with zero velocity
velocity = message.velocity if message.type == 'note_on' else 0
event = dict(index=len(events), time=time, type='note', note=message.note, velocity=velocity, sustain=sustain)
events.append(event)
notes = []
for i, onset in enumerate(events):
if onset['velocity'] == 0:
continue
# find the next note_off message
offset = next(n for n in events[i + 1:] if n['note'] == onset['note'] or n is events[-1])
if offset['sustain'] and offset is not events[-1]:
# if the sustain pedal is active at offset, find when the sustain ends
offset = next(n for n in events[offset['index'] + 1:]
if n['type'] == 'sustain_off' or n['note'] == onset['note'] or n is events[-1])
note = (onset['time'], offset['time'], onset['note'], onset['velocity'])
notes.append(note)
return np.array(notes)
def save_midi(path, pitches, intervals, velocities):
"""
Save extracted notes as a MIDI file
Parameters
----------
path: the path to save the MIDI file
pitches: np.ndarray of bin_indices
intervals: list of (onset_index, offset_index)
velocities: list of velocity values
"""
file = MidiFile()
track = MidiTrack()
file.tracks.append(track)
ticks_per_second = file.ticks_per_beat * 2.0
events = []
for i in range(len(pitches)):
events.append(dict(type='on', pitch=pitches[i], time=intervals[i][0], velocity=velocities[i]))
events.append(dict(type='off', pitch=pitches[i], time=intervals[i][1], velocity=velocities[i]))
events.sort(key=lambda row: row['time'])
last_tick = 0
for event in events:
current_tick = int(event['time'] * ticks_per_second)
velocity = int(event['velocity'] * 127)
if velocity > 127:
velocity = 127
pitch = int(round(hz_to_midi(event['pitch'])))
track.append(Message('note_' + event['type'], note=pitch, velocity=velocity, time=current_tick - last_tick))
last_tick = current_tick
file.save(path)
if __name__ == '__main__':
def process(input_file, output_file):
midi_data = parse_midi(input_file)
np.savetxt(output_file, midi_data, '%.6f', '\t', header='onset\toffset\tnote\tvelocity')
def files():
for input_file in tqdm(sys.argv[1:]):
if input_file.endswith('.mid'):
output_file = input_file[:-4] + '.tsv'
elif input_file.endswith('.midi'):
output_file = input_file[:-5] + '.tsv'
else:
print('ignoring non-MIDI file %s' % input_file, file=sys.stderr)
continue
yield (input_file, output_file)
Parallel(n_jobs=multiprocessing.cpu_count())(delayed(process)(in_file, out_file) for in_file, out_file in files())
``` |
{
"source": "jongwookyi/AC-SUM-GAN",
"score": 3
} |
#### File: AC-SUM-GAN/data/VSUMMVideo.py
```python
from . import BaseVideo
from pathlib import Path
import os
import numpy as np
# directory structure:
# /VSUMM
# /database
# /UserSummary
# /user1
# ...
# /user5
class VSUMMVideo(BaseVideo):
NUM_USERS = 5
SUMMARY_NAME_PREFIX = "Frame"
def __init__(self, name, dataset_dir="./VSUMM"):
file_name = f"{name}.mpg"
video_dir = Path(dataset_dir, "database")
super(VSUMMVideo, self).__init__(file_name, video_dir)
def ground_truth(self):
data_dir = self.video_dir / f"../UserSummary/{self.name}"
user_score = np.zeros((self.NUM_USERS, self.nframes), dtype=np.uint8)
for user_index in range(0, self.NUM_USERS):
user_dir = data_dir / f"user{user_index + 1}"
for summary in user_dir.iterdir():
summary_name = summary.stem
if not summary_name.startswith(self.SUMMARY_NAME_PREFIX):
print("Not a summary file:", summary)
continue
frame_index = int(summary_name[len(self.SUMMARY_NAME_PREFIX):])
user_score[user_index, frame_index] = 1
return user_score
```
#### File: AC-SUM-GAN/data/vsum_tool.py
```python
from .KTS import cpd_auto
from .knapsack import knapsack
import os
import numpy as np
import math
import cv2
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
def get_change_points(video_feat, n_frames, fps,
decimation_factor=1, min_cp_interval=5):
kernel = np.matmul(video_feat, video_feat.T)
ncp_1cpps = n_frames / fps # number of cp at a rate of 1 cp per sec
max_num_cp = int(math.floor(ncp_1cpps / min_cp_interval))
change_points, _ = cpd_auto(kernel, max_num_cp, 1)
change_points *= decimation_factor
change_points = np.concatenate(([0], change_points, [n_frames]))
begin_points = change_points[:-1]
end_points = change_points[1:]
change_points = np.vstack((begin_points, end_points - 1)).T
n_frame_per_seg = end_points - begin_points
return change_points, n_frame_per_seg
def generate_summary(importance_scores, change_points, n_frames, picks, proportion=0.15,
save_as_video=False, video_path=None, summary_dir=None,
save_frames=False, save_keyframes=True):
"""
Generate keyshot-based video summary. i.e. a binary vector
Args:
importance_scores: predicted importance scores.
change_points: 2D matrix, each row contains a segment.
n_frames: original number of frames.
picks: positions of subsampled frames in the original video.
proportion: length of video summary (compared to original video length).
"""
assert(importance_scores.shape == picks.shape)
picks = picks.astype(int)
if picks[-1] != n_frames:
picks = np.concatenate([picks, [n_frames]])
# Compute the importance scores for the initial frame sequence (not the subsampled one)
frame_scores = np.zeros(n_frames)
for i in range(len(picks) - 1):
score = importance_scores[i] if i < len(importance_scores) else 0
frame_scores[picks[i]:picks[i + 1]] = score
# Compute shot-level importance scores by taking the average importance scores of all frames in the shot
seg_scores = []
nfps = []
for segment in change_points:
seg_begin, seg_end = segment + np.asarray((0, 1))
seg_score = frame_scores[seg_begin:seg_end].mean()
seg_scores.append(float(seg_score))
nfps.append(int(seg_end - seg_begin))
# Select the best shots
limit = int(math.floor(n_frames * proportion))
keyshots = knapsack(seg_scores, nfps, limit)
# print("keyshots:", keyshots)
# Select all frames from each selected shot (by setting their value in the summary vector to 1)
summary = np.zeros(n_frames, dtype=np.uint8)
for seg_idx in keyshots:
seg_begin, seg_end = change_points[seg_idx] + np.asarray((0, 1))
summary[seg_begin:seg_end] = 1
keyshots = np.asarray(change_points)[keyshots]
keyframes = [begin + np.argmax(frame_scores[begin:end]) for begin, end in keyshots + (0, 1)]
# print("keyframes:", keyframes)
if save_as_video:
summary_duration = _save_summary(
video_path, summary, keyframes, summary_dir, save_frames, save_keyframes)
else:
summary_duration = None
return summary, keyshots, keyframes, summary_duration
def evaluate_summary(machine_summary, user_summary, eval_method="avg"):
"""
Compare machine summary with user summary (Keyshot-based).
Args:
machine_summary: summary by machine
user_summary: summary by user (annotation)
eval_method: {'avg', 'max'}
'avg' : average results of comparing multiple human summaries.
'max' : takes the maximum(best) out of multiple comparisons.
"""
pred_len = len(machine_summary)
num_users, n_frames = user_summary.shape
assert(pred_len == n_frames)
# if n_frames < pred_len:
# machine_summary = machine_summary[:n_frames]
# elif pred_len < n_frames:
# zero_padding = np.zeros(n_frames - pred_len)
# machine_summary = np.concatenate([machine_summary, zero_padding])
if eval_method not in ["avg", "max"]:
print("Unsupported evaluation method:", eval_method)
eval_method = "avg"
# binarization
machine_summary = (0 < machine_summary).astype(int)
user_summary = (0 < user_summary).astype(int)
epsilon = 1e-8
f_scores = []
precisions = []
recalls = []
for user_idx in range(num_users):
gt_summary = user_summary[user_idx]
overlapped = (machine_summary * gt_summary).sum()
precision = overlapped / (machine_summary.sum() + epsilon)
recall = overlapped / (gt_summary.sum() + epsilon)
if (precision == 0) and (recall == 0):
f_score = 0.
else:
f_score = (2 * precision * recall) / (precision + recall) * 100
f_scores.append(f_score)
precisions.append(precision)
recalls.append(recall)
if eval_method == "avg":
final_f_score = np.mean(f_scores)
final_precision = np.mean(precisions)
final_recall = np.mean(recalls)
elif eval_method == "max":
max_idx = np.argmax(f_scores)
final_f_score = f_scores[max_idx]
final_precision = precisions[max_idx]
final_recall = recalls[max_idx]
return final_f_score, final_precision, final_recall
def _save_summary(video_path, summary, keyframes, summary_dir=None,
save_frames=False, save_keyframes=True):
if not video_path:
return
video_name = video_path.stem
if not summary_dir:
summary_dir = video_path.parent / f"{video_name}_summary"
frames_dir = summary_dir / "frames"
keyframes_dir = summary_dir / "keyframes"
os.makedirs(summary_dir, exist_ok=True)
os.makedirs(frames_dir, exist_ok=True)
os.makedirs(keyframes_dir, exist_ok=True)
summary_path = summary_dir / "summary.avi"
reader = cv2.VideoCapture(str(video_path))
n_frames = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))
fourcc = cv2.VideoWriter_fourcc(*"DIVX")
fps = reader.get(cv2.CAP_PROP_FPS)
frame_width = int(reader.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
writer = cv2.VideoWriter(str(summary_path), fourcc, fps, (frame_width, frame_height))
print("saving summary ...")
n_frames_summary = 0
for frame_idx in tqdm(range(n_frames)):
success, frame = reader.read()
if not success:
break
if save_frames:
decimation_factor = 15
if (frame_idx % decimation_factor) == 0:
frame_path = frames_dir / f"Frame{frame_idx}.jpeg"
cv2.imwrite(str(frame_path), frame)
if not summary[frame_idx]:
continue
# reader.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
# success, frame = reader.read()
# assert(success)
if save_keyframes and (frame_idx in keyframes):
keyframe_path = keyframes_dir / f"Frame{frame_idx}.jpeg"
cv2.imwrite(str(keyframe_path), frame)
writer.write(frame)
n_frames_summary += 1
writer.release()
reader.release()
summary_duration = n_frames_summary / fps
return summary_duration
def plot_result(video_name, pred_summary, ground_truth_summary,
show=True, save_path=None):
assert(len(pred_summary) == len(ground_truth_summary))
frame_indexes = list(range(len(ground_truth_summary)))
sns.set()
plt.title(video_name)
colors = ["lightseagreen" if i == 0 else "orange" for i in pred_summary]
plt.bar(x=frame_indexes, height=ground_truth_summary, color=colors,
edgecolor=None, linewidth=0)
if show:
plt.show()
if save_path:
plt.savefig(save_path)
```
#### File: AC-SUM-GAN/evaluation/choose_best_epoch.py
```python
from pathlib import Path
import csv
import json
import sys
import torch
import numpy as np
"""
Chooses the best F-score (among 100 epochs) based on a criterion (Reward & Actor_loss).
Takes as input the path to .csv file with all the loss functions and a .txt file with the F-Scores (for each split).
Prints a scalar that represents the average best F-score value.
"""
def use_logs(logs_file, f_scores):
losses = {}
losses_names = []
with open(logs_file) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",")
line_count = 0
for (i, row) in enumerate(csv_reader):
if i == 0:
for col in range(len(row)):
losses[row[col]] = []
losses_names.append(row[col])
else:
for col in range(len(row)):
losses[losses_names[col]].append(float(row[col]))
# criterion: Reward & Actor_loss
actor = losses["actor_loss_epoch"]
reward = losses["reward_epoch"]
actor_t = torch.tensor(actor)
reward_t = torch.tensor(reward)
# Normalize values
actor_t = abs(actor_t)
actor_t = actor_t / max(actor_t)
reward_t = reward_t / max(reward_t)
product = (1 - actor_t) * reward_t
epoch = torch.argmax(product)
return np.round(f_scores[epoch], 2), epoch
# example usage: python choose_best_epoch.py <exp_dir> TVSum
exp_dir = Path(sys.argv[1])
dataset = sys.argv[2]
NUM_SPLITS = 5
NUM_SIGMAS = 10
# For each "sigma" value, compute the best F-Score of each split based on the criterion
all_fscores = np.zeros((NUM_SPLITS, NUM_SIGMAS, 2), dtype=float) # fscore, epoch
for i in range(NUM_SIGMAS):
sigma = 0.1 * (i + 1)
# change this path if you use different structure for your directories inside the experiment
path = exp_dir / f"{dataset}/sigma{sigma:.1f}"
for split in range(NUM_SPLITS):
split_dir = f"split{split}"
results_file = path / "results" / split_dir / "f_scores.txt"
logs_file = path / "logs" / split_dir / "scalars.csv"
# read F-Scores
with open(results_file) as f:
f_scores = json.loads(f.read()) # list of F-Scores
# best F-Score based on train logs
all_fscores[split, i] = use_logs(logs_file, f_scores)
all_fscore_epochs = all_fscores[:, :, 1].astype(int)
all_fscores = all_fscores[:, :, 0]
print("All F1 Scores:\n", all_fscores)
print("=> epoch:\n", all_fscore_epochs)
best_index_per_split = np.argmax(all_fscores, axis=1)
best_per_split = all_fscores[range(NUM_SPLITS), best_index_per_split]
best_sigma_per_split = (best_index_per_split + 1) * 0.1
best_epoch_per_split = all_fscore_epochs[range(NUM_SPLITS), best_index_per_split]
# best_per_split = np.max(all_fscores, axis=1)
print("Best F1 Score per Split:", best_per_split)
print("=> index:", best_index_per_split)
print("=> sigma:", best_sigma_per_split)
print("=> epoch:", best_epoch_per_split)
best_index = np.argmax(best_per_split)
best_fscore = best_per_split[best_index]
# best_fscore = np.mean(best_per_split)
print("Best F1 Score:", best_fscore)
print("=> sigma, split, epoch:", best_sigma_per_split[best_index], best_index, best_epoch_per_split[best_index])
print("Mean F1 Score:", np.mean(best_per_split))
```
#### File: AC-SUM-GAN/evaluation/compute_fscores.py
```python
import os
import sys
from pathlib import Path
_package_path = Path(__file__).parent.absolute()
_package_search_path = _package_path.parent
sys.path.append(str(_package_search_path))
import json
import numpy as np
import h5py
from data.vsum_tool import generate_summary, evaluate_summary
# example usage: python compute_fscores.py <results_dir> TVSum avg
results_dir = Path(sys.argv[1])
dataset = sys.argv[2]
eval_method = sys.argv[3]
print("results_dir:", results_dir)
print("dataset:", dataset)
print("eval_method:", eval_method)
# dataset prefix: {SumMe | TVSum}_
DS_PREFIX_LEN = 6
def epochFromFileName(fileName):
# file name format: {SumMe | TVSum}_{epoch}.json
try:
return int(fileName[DS_PREFIX_LEN:-5])
except:
return -1
results = os.listdir(results_dir)
results.sort(key=epochFromFileName)
HOME_PATH = _package_path / "../data"
DATASET_PATH = HOME_PATH / dataset / f"eccv16_dataset_{dataset.lower()}_google_pool5.h5"
# for each epoch, read the results' file and compute the f_score
f_score_epochs = []
for epoch in results:
print(epoch)
if epochFromFileName(epoch) < 0:
print(" Invalid epoch!")
continue
all_user_summary, all_summaries = [], []
with open(results_dir / epoch) as f:
epoch_results = json.loads(f.read())
with h5py.File(DATASET_PATH, "r") as hdf:
video_names = list(epoch_results.keys())
for video_name in video_names:
scores = np.asarray(epoch_results[video_name])
data = hdf[video_name]
user_summary = np.array(data["user_summary"])
change_points = np.array(data["change_points"])
n_frames = np.array(data["n_frames"])
picks = np.array(data["picks"])
summary, _, _, _ = generate_summary(scores, change_points, n_frames, picks)
all_user_summary.append(user_summary)
all_summaries.append(summary)
all_f_scores = []
# compare the resulting summary with the ground truth one, for each video
for video_index in range(len(all_summaries)):
summary = all_summaries[video_index]
user_summary = all_user_summary[video_index]
f_score, _, _ = evaluate_summary(summary, user_summary, eval_method)
all_f_scores.append(f_score)
f_score_epochs.append(np.mean(all_f_scores))
print(" f_score: ", np.mean(all_f_scores))
with open(results_dir / "f_scores.txt", "w") as outfile:
json.dump(f_score_epochs, outfile)
```
#### File: model/layers/FeatureExtractor.py
```python
import torch
import torch.nn as nn
import torchvision as tv
class FeatureExtractor(nn.Module):
"""Class is responsible for extracting deep features of a video frame (image)"""
def __init__(self, deep_feature_model="googlenet", use_gpu=True):
super(FeatureExtractor, self).__init__()
self._device = torch.device("cuda" if use_gpu and torch.cuda.is_available() else "cpu")
self.set_model(deep_feature_model)
def set_model(self, model_name):
# alexnet, resnet50, resnet152, googlenet
model_name = model_name.lower()
if not hasattr(tv.models, model_name):
print(f"Unsupported model {model_name}!")
model_name = "googlenet"
print(f"deep feature model: {model_name}")
model = getattr(tv.models, model_name)(pretrained=True)
# print(model)
pool_index = -3 if model_name == "googlenet" else -2
layers = list(model.children())[:pool_index + 1]
# print(layers)
self.model = nn.Sequential(*layers).float().eval().to(self._device)
self.preprocess = tv.transforms.Compose([
tv.transforms.Resize([224, 224]),
tv.transforms.ToTensor(),
tv.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def forward(self, frame):
with torch.no_grad():
frame = self.preprocess(frame)
# add a dimension for batch
batch = frame.unsqueeze(0).to(self._device)
features = self.model(batch)
features = features.squeeze()
return features
``` |
{
"source": "Jongy/micropython-kernel-snippets",
"score": 2
} |
#### File: Jongy/micropython-kernel-snippets/ftrace_conditional_graph.py
```python
from kernel_ffi import KP_ARGS_MODIFY, callback, current, ftrace, kprobe
# create struct casters
tcphdr = partial_struct("tcphdr")
sk_buff = partial_struct("sk_buff")
net_protocol_s = partial_struct("net_protocol")
def swap16(n):
n = n & 0xffff
return ((n & 0xff) << 8) + (n >> 8)
trace_task = None
def my_trace_ignore_this_task(orig, filtered_pids, task):
"""
Returns true if @task should *NOT* be traced.
Returns false if @task should be traced.
"""
return 0 if task == trace_task else 1
# trace_ignore_this_task can't be ftraced itself (probably because it's a core function of
# ftrace?)
# but kprobe works :)
kp = kprobe("trace_ignore_this_task", KP_ARGS_MODIFY, my_trace_ignore_this_task)
def my_pre_tcp_v4_rcv(skb):
global trace_task
trace = False
skb = sk_buff(skb)
th = tcphdr(skb.head + skb.transport_header)
# is it TCP dport 9000?
if swap16(th.dest) == 9000:
trace = True
trace_task = current()
ftrace_filter_pid_sched_switch_probe(global_trace, False, None, current())
__trace_printk(0, "trace of tcp_v4_rcv starts...")
ret = tcp_v4_rcv(int(skb))
if trace:
trace_task = 0
ftrace_filter_pid_sched_switch_probe(global_trace, False, None, current())
__trace_printk(0, "trace of tcp_v4_rcv is done.")
return ret
cb = callback(my_pre_tcp_v4_rcv)
net_protocol_s(tcp_protocol).handler = cb.ptr()
net_protocol_s(tcp_protocol).handler = int(tcp_v4_rcv)
# to enable:
# mount -t tracefs none /sys/kernel/tracing
# cd /sys/kernel/tracing
# echo tcp_v4_rcv > set_graph_function
# echo function_graph > current_tracer
# echo 2 > set_ftrace_pid
```
#### File: Jongy/micropython-kernel-snippets/nonempty_dev_null.py
```python
file_operations = partial_struct("file_operations")
null_fops = file_operations(null_fops)
from kernel_ffi import callback
def my_read_null(file, buf, count, ppos):
pos = p64(ppos)
b = "who said that /dev/null must be empty?\n"[pos:]
l = min(len(b), count)
memcpy(buf, b, l)
p64(ppos, pos + l)
return l
c = callback(my_read_null)
null_fops.read = c.ptr()
```
#### File: Jongy/micropython-kernel-snippets/proc_pid_status_fs.py
```python
from kernel_ffi import ftrace
task_struct = partial_struct("task_struct")
def my_status(orig, m, ns, pid, task):
ret = orig(m, ns, pid, task)
fs = task_struct(task).thread.fsbase
seq_printf(m, "fs: %px\n", fs)
return ret
f = ftrace("proc_pid_status", my_status)
``` |
{
"source": "jongyoungcha/fuckerSQL",
"score": 2
} |
#### File: jongyoungcha/fuckerSQL/build.py
```python
import os
import sys
import stat
from optparse import OptionParser
import subprocess as sp
def main():
executable_args = ""
print("---- Start to build ----")
parser = OptionParser()
parser.add_option("--bin", action="store", type="string",
dest="bin", help="binary to make")
parser.add_option("--home", action="store", type="string",
dest="home", help="directory of work")
parser.add_option("--debug_mode", action="store_true", default=False,
dest="is_debug", help="The option for building with debug or not.")
parser.add_option("--cmd_export", action="store_true", default=False,
dest="is_cpl_export", help="The export of the compile command.")
parser.add_option("--exec", action="store_true", default=False,
dest="is_exec_bin", help="Executing of the binary file.")
parser.add_option("-a", "--args", action="store", type="string",
dest="args", help="Arguaments of executable")
(options, args) = parser.parse_args()
print("Executable :",options.bin)
print("Home directory : ",options.home)
print("Debug Mode : ",options.is_debug)
print("Cmd Export : ",options.is_cpl_export)
print("Execute : ",options.is_exec_bin)
print("Program Args : ",options.args)
executable = options.bin;
work_dir = options.home;
os.chdir(work_dir)
arguments = ""
if options.is_debug == True:
arguments += "-DCMAKE_BUILD_TYPE=Debug "
if options.is_cpl_export == True:
arguments += "-DCMAKE_EXPORT_COMPILE_COMMANDS=1 "
print("-------- cmake arguments --------")
print(arguments)
print("---------------------------------")
cmake_cmd = " cmake {0} .".format(arguments)
os.system(cmake_cmd)
os.system("make")
print(os.getcwd())
st = os.stat(executable)
os.chmod(executable, st.st_mode | stat.S_IEXEC)
if options.is_exec_bin == True:
if options.args is not None:
executable_args += options.args
# print("executable_args : ", executable_args)
# os.system("./{} {}".format(executable))
# os.system(executable)
proc_cmd = "{0}/{1} {2}".format(work_dir, executable, executable_args)
print("proc_cmd : ", proc_cmd)
os.system("make")
os.system(proc_cmd)
# print("executable_args : ", executable_args)
if __name__ == '__main__':
main()
``` |
{
"source": "Jongy/python_dicts",
"score": 2
} |
#### File: Jongy/python_dicts/dicts.py
```python
import sys
import platform
import textwrap
import ctypes
from ctypes import c_void_p, c_char, c_uint64, POINTER, py_object, Structure
Py_ssize_t = ctypes.c_int64 if ctypes.sizeof(ctypes.c_void_p) == 8 else ctypes.c_int32
Py_hash_t = Py_ssize_t
class PyDictKeyEntry(Structure):
_fields_ = [
("me_hash", Py_hash_t),
("me_key", py_object),
("me_value", py_object),
]
class _dictkeysobject(Structure):
_fields_ = [
("dk_refcnt", Py_ssize_t),
("dk_size", Py_ssize_t),
("dk_lookup", c_void_p),
("dk_usable", Py_ssize_t),
("dk_nentries", Py_ssize_t),
("dk_indices", c_char * 0),
# "PyDictKeyEntry dk_entries[dk_nentries + dk_usable]" follows
]
# py_object is PyObject*, this is the struct itself
class PyObject(Structure):
_fields_ = [
("ob_refcnt", Py_ssize_t),
("ob_type", py_object),
]
class PyDictObject(Structure):
_fields_ = [
("ob_base", PyObject),
("ma_used", Py_ssize_t),
("ma_version_tag", c_uint64),
("ma_keys", POINTER(_dictkeysobject)),
("ma_values", POINTER(py_object)),
# dk_indices follows
# and then dk_entries
]
def DK_SIZE(dk):
assert isinstance(dk, _dictkeysobject)
return dk.dk_size
if ctypes.sizeof(ctypes.c_void_p) > 4:
def DK_IXSIZE(dk):
if DK_SIZE(dk) <= 0xff:
return 1
elif DK_SIZE(dk) <= 0xffff:
return 2
elif DK_SIZE(dk) <= 0xffffffff:
return 4
else:
return 8
else:
def DK_IXSIZE(dk):
if DK_SIZE(dk) <= 0xff:
return 1
elif DK_SIZE(dk) <= 0xffff:
return 2
else:
return 4
def DK_ENTRIES(dk):
return (PyDictKeyEntry * (dk.dk_nentries + dk.dk_usable)).from_address(ctypes.addressof(dk) + _dictkeysobject.dk_indices.offset + DK_SIZE(dk) * DK_IXSIZE(dk))
def find_lookdicts():
# lookdict_split - get the dk_lookup from a dummy instance.
class X: pass
x = X()
lookdict_split = PyDictObject.from_address(id(x.__dict__)).ma_keys.contents.dk_lookup
# lookdict_unicode_nodummy - get the dk_lookup from a dict containing strings and no dummy entries
d = {"a": 1}
lookdict_unicode_nodummy = PyDictObject.from_address(id(d)).ma_keys.contents.dk_lookup
# lookdict_unicode - get the dk_lookup from a dict containing strings and dummy entries (deleted, in this case)
del d["a"]
lookdict_unicode = PyDictObject.from_address(id(d)).ma_keys.contents.dk_lookup
# lookdict - get the dk_lookup from a dict containing non-str keys
d[1] = 1
lookdict = PyDictObject.from_address(id(d)).ma_keys.contents.dk_lookup
# if these are not different, then we didn't manage to trick cpython into selecting
# the different lookdict functions :)
assert lookdict_split != lookdict_unicode_nodummy != lookdict_unicode != lookdict
return {
lookdict_split: "lookdict_split",
lookdict_unicode: "lookdict_unicode",
lookdict_unicode_nodummy: "lookdict_unicode_nodummy",
lookdict: "lookdict",
}
lookdicts = find_lookdicts()
def _py_object_is_null(obj, attr):
# funny way to check if a py_object field is NULL. I couldn't find any other way
# to do it with a quick search, so meh :/
try:
getattr(obj, attr)
return False
except ValueError as e:
if e.args[0] == "PyObject is NULL":
return True
raise
def _is_split(d: PyDictObject):
dk = d.ma_keys.contents
if lookdicts[dk.dk_lookup] == "lookdict_split":
assert bool(d.ma_values), "ma_values is NULL for split!"
return True
else:
assert not bool(d.ma_values), "ma_values is not NULL for non-split!"
return False
def get_dict_obj(x: dict):
assert type(x) is dict
return PyDictObject.from_address(id(x))
# impl of dictiter_iternextitem
def iter_dict(x: dict, indent: int = 0):
d = get_dict_obj(x)
dk = d.ma_keys.contents
for i in range(d.ma_used):
if _is_split(d):
assert i < d.ma_used, f"{i} < {d.ma_used}"
key = DK_ENTRIES(dk)[i].me_key
value = d.ma_values[i]
else:
n = dk.dk_nentries
entries = DK_ENTRIES(dk)
while i < n and _py_object_is_null(entries[i], "me_value"):
print(textwrap.indent(f"{i:4} : unused / dummy", " " * indent))
i += 1
assert i < n, f"{i} < {n}"
key = entries[i].me_key
value = entries[i].me_value
print(textwrap.indent(f"{i:4} : {key!r} : {value!r}", " " * indent))
def print_dict(x: dict):
d = get_dict_obj(x)
dk = d.ma_keys.contents
print("lookdict function:", lookdicts[dk.dk_lookup])
print("dict size (bytes):", sys.getsizeof(x))
print()
print("dict used:", d.ma_used)
print("dict version_tag:", d.ma_version_tag)
print("dict values:", hex(ctypes.cast(d.ma_values, ctypes.c_void_p).value or 0))
print()
print("keys size:", dk.dk_size)
print("keys nentries", dk.dk_nentries)
print("keys usable:", dk.dk_usable)
print("keys refcount (used by this many dicts):", dk.dk_refcnt)
def print_dict_all(x: dict):
print_dict(x)
print()
print("entries:")
iter_dict(x, indent=4)
def dict_version(x: dict):
return get_dict_obj(x).ma_version_tag
# checked on those versions only, others may vary.
assert sys.version_info[:2] in ((3, 8), (3, 9), (3, 10))
# and on Linux
assert sys.platform == "linux"
# x86_64
assert platform.machine() == "x86_64"
``` |
{
"source": "jonhadfield/acli",
"score": 2
} |
#### File: acli/commands/ec2.py
```python
from __future__ import (absolute_import, print_function, unicode_literals)
from docopt import docopt
from acli.services import (ec2, cloudwatch)
def ec2_command(argv=None, aws_config=None):
ec2_res = docopt(__doc__, argv=argv)
if any((ec2_res.get('ls'), ec2_res.get('list'))):
ec2.ec2_list(aws_config, filter_term=ec2_res.get('--filter'))
elif ec2_res.get('info'):
ec2.ec2_info(aws_config, instance_id=ec2_res.get('<instance_id>'))
elif ec2_res.get('stop'):
ec2.ec2_manage(aws_config, instance_id=ec2_res.get('<instance_id>'), action="stop")
elif ec2_res.get('reboot'):
ec2.ec2_manage(aws_config, instance_id=ec2_res.get('<instance_id>'), action="reboot")
elif ec2_res.get('start'):
ec2.ec2_manage(aws_config, instance_id=ec2_res.get('<instance_id>'), action="start")
elif ec2_res.get('terminate'):
ec2.ec2_manage(aws_config, instance_id=ec2_res.get('<instance_id>'), action="terminate")
elif ec2_res.get('cpu'):
cloudwatch.ec2_cpu(aws_config=aws_config, instance_id=ec2_res.get('<instance_id>'))
elif ec2_res.get('net'):
cloudwatch.ec2_net(aws_config=aws_config,
instance_id=ec2_res.get('<instance_id>'),
start=ec2_res.get('--start'),
period=ec2_res.get('--end'),
intervals=ec2_res.get('intervals')
)
elif ec2_res.get('vols'):
cloudwatch.ec2_vol(aws_config=aws_config,
instance_id=ec2_res.get('<instance_id>'),
start=ec2_res.get('--start'),
period=ec2_res.get('--end'),
intervals=ec2_res.get('intervals')
)
elif ec2_res.get('summary'):
ec2.ec2_summary(aws_config=aws_config)
if __name__ == '__main__':
print(docopt(__doc__))
```
#### File: lib/acli/config.py
```python
from __future__ import (absolute_import, print_function, unicode_literals)
try:
import configparser
except ImportError:
from external.six.moves import configparser
import os
class Config(object):
def __init__(self, cli_args):
self.access_key_id = None
self.secret_access_key = None
self.region = None
self.config_loader(cli_args)
def is_config_loaded(self):
if self.access_key_id and self.secret_access_key and self.region:
return True
def config_loader(self, cli_args):
""" Load configuration from sources in order of precedence:
CLI, ENV, BOTO
"""
self.from_cli(cli_args)
self.load_acli_config()
def from_cli(self, cli_args):
cli_aws_region = cli_args.get('--region')
cli_access_key_id = cli_args.get('--access_key_id')
cli_secret_access_key = cli_args.get('--secret_access_key')
if cli_aws_region:
self.region = cli_aws_region
if cli_access_key_id:
self.access_key_id = cli_access_key_id
if cli_secret_access_key:
self.secret_access_key = cli_secret_access_key
@staticmethod
def load_acli_config():
if os.path.isfile('acli.cfg'):
configparser.read('acli.cfg')
```
#### File: lib/acli/errors.py
```python
from __future__ import (absolute_import, print_function, unicode_literals)
from botocore.exceptions import NoCredentialsError, NoRegionError, ClientError
def handle_boto_errors(function):
def handle_errors(*args, **kwargs):
try:
return function(*args, **kwargs)
except NoCredentialsError:
exit('Credentials not found. See here for more information:\n'
'http://boto3.readthedocs.org/en/latest/guide/configuration.html#configuration-files')
except NoRegionError:
exit('Cannot perform this task without specifying an AWS region.\n'
'Please check your boto/aws settings or specify using \'acli --region=<region>\'.')
except ClientError as ce:
response = ce.response
error_args = ce.args
error_message = response['Error']['Message']
error_code = response['Error']['Code']
joined_args = "".join(error_args)
if error_code in ('AccessDenied', 'AccessDeniedException'):
if 'iam:ListUsers' in error_message:
exit('You do not have permission to access IAM user list.\nDetail: {0}'.format(error_message))
elif 'DescribeLaunchConfigurations' in joined_args:
exit('You do not have permission to list Launch Configurations.')
elif 'DescribeAutoScalingGroups' in joined_args:
exit('You do not have permission to list Auto Scaling Groups.')
elif 'DescribeLoadBalancers' in joined_args:
exit('You do not have permission to list Elastic Load Balancers.')
elif 'ListDomainNames' in joined_args:
exit('You do not have permission to list Elastic Search domains.')
elif 'ListHostedZones' in joined_args:
exit('You do not have permission to list Route53 zones.')
else:
raise
elif error_code == 'UnauthorizedOperation':
if 'DescribeImages' in joined_args:
exit('You do not have permission to list AMIs.')
elif 'DescribeInstances' in joined_args:
exit('You do not have permission to list instances.')
elif 'DescribeAddresses' in joined_args:
exit('You do not have permission to list addresses.')
elif 'DescribeVpcs' in joined_args:
exit('You do not have permission to list VPCs.')
elif 'DescribeSecurityGroups' in joined_args:
exit('You do not have permission to list Security Groups.')
else:
raise
elif error_code == "RequestExpired":
exit("Temporary credentials have expired.")
else:
exit(ce)
except Exception:
raise
return handle_errors
```
#### File: acli/output/efs.py
```python
from __future__ import (absolute_import, print_function, unicode_literals)
import humanize
from colorclass import Color, Windows
from acli.output import (output_ascii_table, output_ascii_table_list, dash_if_none)
from external.six import iteritems
Windows.enable(auto_colors=True, reset_atexit=True)
def get_tag(name=None, tags=None):
if tags:
for tag in tags:
if tag.get('Key') == name:
return tag.get('Value')
def colour_state(state=None):
if not state:
return Color('{autoblack}-{/autoblack}')
elif state == 'available':
return Color('{autogreen}' + state + '{/autogreen}')
elif state in ('deleting', 'deleted'):
return Color('{autored}' + state + '{/autored}')
elif state == 'creating':
return Color('{autoyellow}' + state + '{/autoyellow}')
def output_filesystems(filesystems=None, mount_targets=None):
"""
@type filesystems: dict
@type mount_targets: list
"""
td = list()
table_header = [Color('{autoblue}id{/autoblue}'),
Color('{autoblue}name{/autoblue}'),
Color('{autoblue}state{/autoblue}'),
Color('{autoblue}size / time (UTC){/autoblue}'),
Color('{autoblue}mode{/autoblue}'),
Color('{autoblue}mount targets{/autoblue}')
]
fs_ids = list()
for fs in filesystems:
size_in_bytes = fs.get('SizeInBytes')
size_in_bytes_value = size_in_bytes.get('Value')
if size_in_bytes_value:
size_in_bytes_value = humanize.naturalsize(size_in_bytes_value)
size_in_bytes_timestamp = size_in_bytes.get('Timestamp')
if size_in_bytes_timestamp:
size_in_bytes_timestamp = size_in_bytes_timestamp.replace(tzinfo=None, second=0)
created_time = fs.get('CreationTime')
if created_time:
created_time = created_time.replace(tzinfo=None, second=0)
td.append([fs.get('FileSystemId'),
dash_if_none(fs.get('Name')),
colour_state(fs.get('LifeCycleState')),
'{0} / {1}'.format(size_in_bytes_value,
dash_if_none(size_in_bytes_timestamp)),
fs.get('PerformanceMode'),
dash_if_none(fs.get('NumberOfMountTargets'))
])
fs_ids.append(fs.get('FileSystemId'))
output_ascii_table_list(table_title=Color('{autowhite}EFS Filesystems{/autowhite}'),
table_data=td,
table_header=table_header,
inner_heading_row_border=True)
# Output mount targets
td = list()
table_header = [Color('{autoblue}mount target id{/autoblue}'),
Color('{autoblue}filesystem id{/autoblue}'),
Color('{autoblue}lifecycle state{/autoblue}')
]
for mt in mount_targets:
td.append([mt.get('MountTargetId'),
mt.get('FileSystemId'),
colour_state(mt.get('LifeCycleState'))
])
output_ascii_table_list(table_title=Color('{autowhite}EFS Mount Targets{/autowhite}'),
table_data=td,
table_header=table_header,
inner_heading_row_border=True)
exit(0)
def output_dict(dict_=None):
"""
@type dict_: dict
"""
output = list()
for k, v in iteritems(dict_):
output.append('{0}: {1}\n'.format(k, str(v)))
return ''.join(output).rstrip()
def colour_created(state=None):
if not state:
return Color('{autoyellow}False{/autoyellow}')
else:
return Color('{autogreen}True{/autogreen}')
def colour_deleted(state=None):
if not state:
return Color('{autogreen}False{/autogreen}')
else:
return Color('{autored}True{/autored}')
def colour_processing(state=None):
if not state:
return Color('{autogreen}False{/autogreen}')
else:
return Color('{autoyellow}True{/autoyellow}')
def output_filesystem_info(filesystem=None, mount_targets=None):
"""
@type filesystem: dict
@type mount_targets: list
"""
if filesystem:
filesystem_details = filesystem.get('FileSystems')[0]
td = list()
td.append([Color('{autoblue}filesystem name{/autoblue}'),
dash_if_none(filesystem_details.get('Name'))])
td.append([Color('{autoblue}id{/autoblue}'),
filesystem_details.get('FileSystemId')])
td.append([Color('{autoblue}size{/autoblue}'),
filesystem_details['SizeInBytes']['Value']])
td.append([Color('{autoblue}owner id{/autoblue}'),
dash_if_none(filesystem_details.get('OwnerId'))])
td.append([Color('{autoblue}creation time{/autoblue}'),
dash_if_none(filesystem_details.get('CreationTime'))])
td.append([Color('{autoblue}lifecycle state{/autoblue}'),
dash_if_none(filesystem_details.get('LifeCycleState'))])
td.append([Color('{autoblue}no. mount targets{/autoblue}'),
dash_if_none(filesystem_details.get('NumberOfMountTargets'))])
td.append([Color('{autoblue}performance mode{/autoblue}'),
dash_if_none(filesystem_details.get('PerformanceMode'))])
td.append([Color('{autoblue}creation token{/autoblue}'),
dash_if_none(filesystem_details.get('CreationToken'))])
output_ascii_table(table_title=Color('{autowhite}EFS filesystem info{/autowhite}'),
table_data=td)
else:
exit('filesystem does not exist.')
if mount_targets:
table_header = [Color('{autoblue}mount target id{/autoblue}'),
Color('{autoblue}filesystem id{/autoblue}'),
Color('{autoblue}lifecycle state{/autoblue}'),
Color('{autoblue}ip address{/autoblue}'),
Color('{autoblue}subnet id{/autoblue}'),
Color('{autoblue}interface id{/autoblue}'),
Color('{autoblue}owner id{/autoblue}')
]
td = list()
for mt in mount_targets:
td.append([mt.get('MountTargetId'),
mt.get('FileSystemId'),
colour_state(mt.get('LifeCycleState')),
mt.get('IpAddress'),
mt.get('SubnetId'),
mt.get('NetworkInterfaceId'),
mt.get('OwnerId')
])
output_ascii_table_list(table_title=Color('{autowhite}EFS Mount Targets{/autowhite}'),
table_header=table_header,
table_data=td,
inner_heading_row_border=True)
exit(0)
```
#### File: acli/output/es.py
```python
from __future__ import (absolute_import, print_function, unicode_literals)
from colorclass import Color, Windows
from acli.output import (output_ascii_table, output_ascii_table_list)
Windows.enable(auto_colors=True, reset_atexit=True)
from external.six import iteritems
def get_tag(name=None, tags=None):
if tags:
for tag in tags:
if tag.get('Key') == name:
return tag.get('Value')
def output_domain_list(domains=None):
"""
@type domains: dict
"""
td = list()
table_header = [Color('{autoblue}domain name{/autoblue}')]
for domain in domains:
td.append([domain.get('DomainName')])
output_ascii_table_list(table_title=Color('{autowhite}ES domains{/autowhite}'),
table_data=td,
table_header=table_header,
inner_heading_row_border=True)
exit(0)
def output_dict(dict_=None):
"""
@type dict_: dict
"""
output = list()
for k, v in iteritems(dict_):
output.append('{0}: {1}\n'.format(k, str(v)))
return ''.join(output).rstrip()
def colour_created(state=None):
if not state:
return Color('{autoyellow}False{/autoyellow}')
else:
return Color('{autogreen}True{/autogreen}')
def colour_deleted(state=None):
if not state:
return Color('{autogreen}False{/autogreen}')
else:
return Color('{autored}True{/autored}')
def colour_processing(state=None):
if not state:
return Color('{autogreen}False{/autogreen}')
else:
return Color('{autoyellow}True{/autoyellow}')
def output_domain_info(domain=None):
"""
@type domain: dict
"""
if domain:
domain_details = domain.get('DomainStatusList')[0]
cluster_conf = domain_details.get('ElasticsearchClusterConfig')
td = list()
td.append([Color('{autoblue}domain name{/autoblue}'),
domain_details.get('DomainName')])
td.append([Color('{autoblue}endpoint{/autoblue}'),
domain_details.get('Endpoint')])
td.append([Color('{autoblue}created{/autoblue}'),
colour_created(domain_details.get('Created'))])
td.append([Color('{autoblue}deleted{/autoblue}'),
colour_deleted(domain_details.get('Deleted'))])
td.append([Color('{autoblue}processing{/autoblue}'),
colour_processing(domain_details.get('Processing'))])
td.append([Color('{autoblue}cluster config{/autoblue}'),
' '])
td.append([Color('{autoblue} dedicated master enabled{/autoblue}'),
str(cluster_conf.get('DedicatedMasterEnabled'))])
td.append([Color('{autoblue} instance type{/autoblue}'),
str(cluster_conf.get('InstanceType'))])
td.append([Color('{autoblue} instance count{/autoblue}'),
str(cluster_conf.get('InstanceCount'))])
td.append([Color('{autoblue} zone awareness{/autoblue}'),
str(cluster_conf.get('ZoneAwarenessEnabled'))])
td.append([Color('{autoblue}domain id{/autoblue}'),
domain_details.get('DomainId')])
td.append([Color('{autoblue}snapshot options{/autoblue}'),
output_dict(domain_details.get('SnapshotOptions'))])
td.append([Color('{autoblue}advanced options{/autoblue}'),
output_dict(domain_details.get('AdvancedOptions'))])
td.append([Color('{autoblue}ARN{/autoblue}'),
domain_details.get('ARN')])
output_ascii_table(table_title=Color('{autowhite}ES domain info{/autowhite}'),
table_data=td)
else:
exit('Domain does not exist.')
exit(0)
```
#### File: acli/output/__init__.py
```python
from __future__ import (absolute_import, print_function, unicode_literals)
import math
import os
from colorclass import Color, Windows
from terminaltables import AsciiTable
from acli.utils import get_console_dimensions
try:
input = raw_input
except NameError:
pass
Windows.enable(auto_colors=True, reset_atexit=True)
def output_ascii_table(table_title=None,
table_data=None,
inner_heading_row_border=False,
inner_footing_row_border=False,
inner_row_border=False):
"""
@type table_title: unicode
@type table_data: list
@type inner_heading_row_border: bool
@type inner_footing_row_border: bool
@type inner_row_border: bool
"""
table = AsciiTable(table_data)
table.inner_heading_row_border = inner_heading_row_border
table.inner_row_border = inner_row_border
table.inner_footing_row_border = inner_footing_row_border
table.title = table_title
print(table.table)
def output_ascii_table_list(table_title=None,
table_data=None,
table_header=None,
inner_heading_row_border=False,
inner_row_border=False):
"""
@type table_title: unicode
@type table_data: list
@type inner_heading_row_border: bool
@type inner_row_border: bool
@type table_header: list
"""
console_rows, _ = get_console_dimensions()
console_rows = int(console_rows)
full_display_length = len(table_data) + 7
items_per_page = console_rows - 7
num_pages = 0
if full_display_length > console_rows:
try:
num_pages = int(math.ceil(float(len(table_data)) / float(items_per_page)))
except ZeroDivisionError:
exit('Console too small to display.')
if num_pages:
running_count = 0
for page in range(1, num_pages + 1):
page_table_output = list()
page_table_output.insert(0, table_header)
upper = (console_rows + running_count) - 7
if upper > len(table_data):
upper = len(table_data)
for x in range(running_count, upper):
page_table_output.append(table_data[x])
running_count += 1
table = AsciiTable(page_table_output)
table.inner_heading_row_border = inner_heading_row_border
table.inner_row_border = inner_row_border
table.title = table_title
if page != 1:
print('')
print(table.table)
if page < num_pages:
input("Press Enter to continue...")
os.system('clear')
else:
table_data.insert(0, table_header)
table = AsciiTable(table_data)
table.inner_heading_row_border = inner_heading_row_border
table.inner_row_border = inner_row_border
table.title = table_title
print(table.table)
def dash_if_none(item=None):
"""
@type item: object
"""
return str(item) if item else Color('{autoblack}-{/autoblack}')
def get_tags(tags, separator=', '):
"""
@type tags: list
@type separator: unicode
"""
tag_list = list()
for tag in tags:
tag_list.append("{0}:{1}".format(tag.get('Key'), tag.get('Value')))
if tag_list:
return separator.join(tag_list)
def get_name_tag(tags):
"""
@type tags: dict
"""
for tag_name, tag_value in tags.iteritems():
if tag_name == 'Name':
return tag_value
return "-"
```
#### File: acli/output/snapshots.py
```python
from __future__ import (absolute_import, print_function, unicode_literals)
from colorclass import Color, Windows
from acli.output import (output_ascii_table_list, dash_if_none)
Windows.enable(auto_colors=True, reset_atexit=True)
def colour_state(state=None):
if not state:
return Color('{autoblack}-{/autoblack}')
elif state == 'running':
return Color('{autogreen}' + state + '{/autogreen}')
elif state in ('stopped', 'stopping', 'shutting-down', 'terminated'):
return Color('{autored}' + state + '{/autored}')
elif state in ('rebooting', 'pending'):
return Color('{autoyellow}' + state + '{/autoyellow}')
def output_snapshot_list(snapshots=None):
"""
@type snapshots: list
"""
td = list()
table_header = [Color('{autoblue}name{/autoblue}'),
Color('{autoblue}id{/autoblue}'),
Color('{autoblue}size (GiB){/autoblue}'),
Color('{autoblue}description{/autoblue}'),
Color('{autoblue}status{/autoblue}'),
Color('{autoblue}started{/autoblue}'),
Color('{autoblue}progress{/autoblue}'),
Color('{autoblue}encrypted{/autoblue}')]
for snapshot in snapshots:
td.append([dash_if_none(snapshot.get('Tags')),
dash_if_none(snapshot.get('SnapshotId')),
dash_if_none(snapshot.get('VolumeSize')),
dash_if_none(snapshot.get('Description')),
dash_if_none(snapshot.get('State')),
dash_if_none(snapshot.get('StartTime (UTC)')),
dash_if_none(snapshot.get('Progress')),
str(snapshot.get('Encrypted'))])
output_ascii_table_list(table_title=Color('{autowhite}orphaned snapshots{/autowhite}'),
table_data=td,
table_header=table_header,
inner_heading_row_border=True)
exit(0)
```
#### File: acli/services/elb.py
```python
from __future__ import (absolute_import, print_function, unicode_literals)
import botocore.exceptions
from acli.connections import get_client
from acli.errors import handle_boto_errors
from acli.output.elb import output_elbs, output_elb_info
@handle_boto_errors
def get_elb_list(aws_config):
"""
@type aws_config: Config
"""
elb_client = get_client(client_type='elb', config=aws_config)
elbs = elb_client.describe_load_balancers().get('LoadBalancerDescriptions')
return elbs
@handle_boto_errors
def get_elb(aws_config, elb_name=None):
"""
@type aws_config: Config
@type elb_name: unicode
"""
if elb_name:
try:
elb_client = get_client(client_type='elb', config=aws_config)
elbs = elb_client.describe_load_balancers(LoadBalancerNames=[elb_name])
if elbs and elbs.get('LoadBalancerDescriptions'):
return elbs.get('LoadBalancerDescriptions')[0]
except botocore.exceptions.ClientError:
exit('ELB: {0} could not be found.'.format(elb_name))
def elb_list(aws_config):
"""
@type aws_config: Config
"""
output_elbs(elbs=get_elb_list(aws_config))
def elb_info(aws_config=None, elb_name=None):
"""
@type aws_config: Config
@type elb_name: unicode
"""
output_elb_info(elb=get_elb(aws_config,
elb_name=elb_name))
```
#### File: acli/services/s3.py
```python
from __future__ import (absolute_import, print_function, unicode_literals)
import hashlib
import io
from botocore.exceptions import ClientError
from acli.connections import get_client
from acli.errors import handle_boto_errors
from acli.output.s3 import (output_s3_list, output_s3_info)
@handle_boto_errors
def check_bucket_accessible(s3_client=None, bucket_name=None):
try:
s3_client.head_bucket(Bucket=bucket_name)
return True
except ClientError:
exit("Unable to access bucket.")
except Exception as unhandled:
exit('Unhandled exception: {0}'.format(unhandled))
@handle_boto_errors
def get_s3_file_md5(s3_client=None, bucket_name=None, path=None):
try:
key_detail = s3_client.head_object(Bucket=bucket_name, Key=path)
return key_detail.get('ETag')[1:-1]
except ClientError:
pass
def get_local_file_md5(path=None):
try:
with io.open(path, "rb") as local_file:
return hashlib.md5(local_file.read()).hexdigest()
except IOError:
pass
@handle_boto_errors
def s3_list(aws_config=None, item=None):
"""
@type aws_config: Config
@type item: unicode
"""
s3_client = get_client(client_type='s3', config=aws_config)
buckets = s3_client.list_buckets()
if not item:
if buckets.get('Buckets'):
output_s3_list(buckets=buckets.get('Buckets'))
else:
exit("No buckets found.")
else:
prefix = ''
if item and '/' in item:
path_elements = item.split('/')
bucket_name = path_elements[0]
prefix = "/".join(path_elements[1:])
else:
bucket_name = item
check_bucket_accessible(s3_client=s3_client, bucket_name=bucket_name)
try:
objects = s3_client.list_objects(Bucket=bucket_name, Prefix=prefix, Delimiter='/')
if not any((objects.get('CommonPrefixes'),
(objects.get('Contents')))):
exit('Nothing found in: {0}'.format(item[:-1]))
common_prefixes = objects.get('CommonPrefixes', list())
folders = list()
for first_bit in common_prefixes:
folders.append(first_bit)
output_s3_list(objects=objects, folders=folders, item=item, bucket_name=bucket_name)
except ClientError as ce:
if 'NoSuchBucket' in ce.response['Error']['Code']:
exit('Bucket not found.')
else:
exit('Unhandled error: {0}'.format(ce.response['Error']['Code']))
@handle_boto_errors
def s3_info(aws_config=None, item=None):
"""
@type aws_config: Config
@type item: unicode
"""
s3_client = get_client(client_type='s3', config=aws_config)
prefix = ''
if item and '/' in item:
path_elements = item.split('/')
bucket_name = path_elements[0]
prefix = "/".join(path_elements[1:])
if prefix.endswith('/'):
prefix = prefix[:-1]
else:
bucket_name = item
buckets = s3_client.list_buckets()
owner = buckets.get('Owner')
try:
if bucket_name:
check_bucket_accessible(s3_client=s3_client, bucket_name=bucket_name)
s3_object = s3_client.get_object(Bucket=bucket_name, Key=prefix)
output_s3_info(s3_object=s3_object, key=prefix, bucket=bucket_name)
else:
output_s3_info(owner=owner)
except ClientError as ce:
if 'NoSuchBucket' in ce.response['Error']['Code']:
exit('Bucket not found.')
elif 'NoSuchKey' in ce.response['Error']['Code']:
exit('Key not found.')
else:
exit('Unhandled error: {0}'.format(ce.response['Error']['Code']))
@handle_boto_errors
def s3_cp(aws_config=None, source=None, dest=None):
"""
@type aws_config: Config
@type source: unicode
@type dest: unicode
"""
from acli.utils import (is_readable)
from boto3.s3.transfer import S3Transfer, TransferConfig
import os
config = TransferConfig(
multipart_threshold=200 * 1024 * 1024,
max_concurrency=10,
num_download_attempts=10,
)
s3_prefix = 's3://'
s3_client = get_client(client_type='s3', config=aws_config)
if source.startswith(s3_prefix) and not dest.startswith(s3_prefix):
# COPYING FROM S3 TO LOCAL
s3_location = source[5:].split('/')
bucket_name = s3_location[0]
s3_source = '/'.join(s3_location[1:])
check_bucket_accessible(s3_client=s3_client, bucket_name=bucket_name)
if dest == '/':
dest = '{0}{1}'.format(os.path.abspath(dest), s3_location[-1])
elif dest == '.' or dest.endswith('/'):
dest = '{0}/{1}'.format(os.path.abspath(dest), s3_location[-1])
elif os.path.isdir(os.path.abspath(dest)):
dest = '{0}/{1}'.format(dest, s3_location[-1])
s3_file_md5 = get_s3_file_md5(s3_client=s3_client,
bucket_name=bucket_name,
path=s3_source)
if s3_file_md5:
if s3_file_md5 == get_local_file_md5(dest):
exit('Not transferring as identical file already exists.')
else:
exit('Cannot find: {0}/{1}'.format(bucket_name, s3_source))
transfer = S3Transfer(s3_client, config)
try:
print('Transferring: {0} to: {1}'.format(source, dest))
transfer.download_file(bucket_name, s3_source, dest)
except BaseException as e:
if hasattr(e, 'strerror') and e.strerror == 'Permission denied':
exit('Permission denied.')
else:
print('Unhandled exception: {0}'.format(e))
elif source.startswith(s3_prefix) and dest.startswith(s3_prefix):
# COPYING FROM S3 TO S3
print('Transferring: {0} to: {1}'.format(source, dest))
exit('Not yet implemented.')
elif not source.startswith(s3_prefix) and dest.startswith(s3_prefix):
try:
# COPYING ITEM(S) FROM LOCAL TO S3
if os.path.isdir(source):
exit('File transfers only for now.')
else:
# COPY LOCAL FILE TO S3
if not is_readable(source):
exit('Cannot access: {0}'.format(source))
s3_location = dest[5:].split('/')
bucket_name = s3_location[0]
s3_dest = '/'.join(s3_location[1:])
# COPYING FILE TO A FOLDER
if dest.endswith('/'):
file_name = source.split('/')[-1]
s3_dest += file_name
check_bucket_accessible(s3_client=s3_client, bucket_name=bucket_name)
# CHECK IF FILES ARE IDENTICAL
s3_file_md5 = get_s3_file_md5(s3_client=s3_client,
bucket_name=bucket_name,
path=s3_dest)
# If it's mulipart, then don't bother checking and just transfer
if s3_file_md5 and '-' not in s3_file_md5:
local_file_md5 = get_local_file_md5(path=source)
if local_file_md5 == s3_file_md5:
exit('Not transferring as identical file already exists.')
print('Transferring: {0} to: {1}'.format(source, dest))
transfer = S3Transfer(s3_client, config)
transfer.upload_file(source, bucket_name, s3_dest)
except ClientError as ce:
if 'AccessDenied' in ce.response['Error']['Code']:
exit('Access denied. Please check permissions.')
except Exception as e:
print('Unhandled exception: {0}'.format(e))
else:
exit('Source or dest must be an S3 location defined with s3://.')
exit()
@handle_boto_errors
def s3_rm(aws_config=None, item=None):
"""
@type aws_config: Config
@type item: unicode
"""
s3_client = get_client(client_type='s3', config=aws_config)
prefix = ''
bucket_name = ''
if item and '/' in item:
path_elements = item.split('/')
bucket_name = path_elements[0]
prefix = "/".join(path_elements[1:])
if prefix.endswith('/'):
exit('Only keys can currently be removed.')
check_bucket_accessible(s3_client=s3_client, bucket_name=bucket_name)
else:
exit('Invalid key.')
try:
s3_client.head_object(Bucket=bucket_name, Key=prefix)
except ClientError:
exit('Cannot access \'{0}\'.'.format(item))
try:
s3_client.delete_objects(Bucket=bucket_name, Delete={'Objects': [{'Key': prefix}, ]})
exit('\'{0}\' deleted.'.format(item))
except ClientError as error:
if 'NoSuchBucket' in error.response['Error']['Code']:
exit('Bucket not found.')
elif 'NoSuchKey' in error.response['Error']['Code']:
exit('Key not found.')
else:
exit('Unhandled error: {0}'.format(error.response['Error']['Code']))
```
#### File: acli/services/vpc.py
```python
from __future__ import (absolute_import, print_function, unicode_literals)
from botocore.exceptions import ClientError
from acli.connections import get_client
from acli.errors import handle_boto_errors
from acli.output.vpc import (output_vpc_list, output_vpc_info)
@handle_boto_errors
def vpc_list(aws_config=None):
"""
@type aws_config: Config
"""
ec2_client = get_client(client_type='ec2', config=aws_config)
vpcs = ec2_client.describe_vpcs()
if vpcs.get('Vpcs'):
output_vpc_list(vpcs=vpcs)
else:
exit("No VPCs found.")
@handle_boto_errors
def vpc_info(aws_config=None, vpc_id=None):
"""
@type aws_config: Config
@type vpc_id: unicode
"""
ec2_client = get_client(client_type='ec2', config=aws_config)
try:
vpcs = ec2_client.describe_vpcs(VpcIds=[vpc_id])
all_subnets = ec2_client.describe_subnets(Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}])
output_vpc_info(vpc=vpcs['Vpcs'][0], subnets=all_subnets)
except (ClientError, IndexError):
exit("Cannot find VPC: {0}".format(vpc_id))
```
#### File: lib/acli/utils.py
```python
from __future__ import (absolute_import, print_function, unicode_literals)
import sys
import os
import platform
import base64
BASH_COMPLETION_SCRIPT = "TBC"
BASH_COMPLETION_PATH_OSX = "/usr/local/etc/bash_completion.d"
BASH_COMPLETION_PATH_LINUX = "/etc/bash_completion.d"
def install_completion():
if platform.system() == 'Darwin':
if os.path.exists(BASH_COMPLETION_PATH_OSX):
with open("{0}/acli".format(BASH_COMPLETION_PATH_OSX), 'w') as acli_file:
acli_file.write(base64.b64decode(BASH_COMPLETION_SCRIPT))
sys.exit("bash completion script written to: {0}/acli\nrestart terminal to use.".format(
BASH_COMPLETION_PATH_OSX)
)
else:
sys.exit("bash completion not installed. try 'brew install bash-completion'.")
elif platform.system() == 'Linux':
if os.path.exists(BASH_COMPLETION_PATH_LINUX):
with open("{0}/acli".format(BASH_COMPLETION_PATH_LINUX), 'w') as acli_file:
acli_file.write(base64.b64decode(BASH_COMPLETION_SCRIPT))
sys.exit("bash completion script written to: {0}/acli\nrestart terminal to use.".format(
BASH_COMPLETION_PATH_LINUX)
)
else:
sys.exit("bash completion not installed.")
else:
sys.exit("Shell completion only available on Linux and OS X.")
def get_console_dimensions():
try:
rows, columns = os.popen('stty size', 'r').read().split()
except ValueError:
rows, columns = 80, 25
return rows, columns
def is_readable(path=None):
"""Test if the supplied filesystem path can be read
:param path: A filesystem path
:return: True if the path is a file that can be read. Otherwise, False.
"""
if os.path.isfile(path) and os.access(path, os.R_OK):
return True
return False
def is_writeable(path=None):
"""Test if the supplied filesystem path can be written to
:param path: A filesystem path
:return: True if the path is a file that can be written. Otherwise, False.
"""
if os.path.isfile(path) and os.access(path, os.W_OK):
return True
def get_tag_value(name=None, tags=None):
if tags:
for tag in tags:
if tag.get('Key') == name:
return tag.get('Value')
``` |
{
"source": "jonhadfield/ansible-lookups",
"score": 2
} |
#### File: jonhadfield/ansible-lookups/aws_ec2_allocation_id_from_eip.py
```python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import codecs
from ansible.errors import *
from ansible.plugins.lookup import LookupBase
try:
import boto3
import botocore
except ImportError:
raise AnsibleError("aws_ec2_allocation_id_from_eip lookup cannot be run without boto installed")
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
region = terms[0][0]
public_ip = terms[0][1]
session=boto3.session.Session(region_name=region)
try:
ec2_client=session.connect('ec2')
except botocore.exception.NoRegionError:
raise AnsibleError("AWS region not specified")
ip_filter=[{'Name': 'public-ip', 'Values': [public_ip]}]
result=ec2_client.describe_addresses(Filders=ip_filter)
if result and result.get('Addresses'):
return [result.get('Addresses')[0].get('AllocationId').encode('utf-8')]
return None
```
#### File: jonhadfield/ansible-lookups/aws_ec2_instance_status_from_id.py
```python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# import os
# import codecs
from ansible.errors import *
from ansible.plugins.lookup import LookupBase
try:
# import boto
# import boto.ec2
import boto3
import botocore
except ImportError:
raise AnsibleError("aws_ec2_instance_status_from_name lookup cannot be run without boto installed")
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
region=terms[0][0]
instance_id=terms[0][1]
session=boto3.session.Session(region_name=region)
try:
ec2_client=session.client('ec2')
except botocore.exceptions.NoRegionError:
raise AnsibleError("AWS region not specified")
instance_filter=[{'Name': 'instance-id', 'Values': [instance_id]}]
result=ec2_client.describe_instance_status(InstanceIds=[instance_id])
if result and result.get('InstanceStatuses'):
return [result.get('InstanceStatuses')[0].get('InstanceState').get('Name').encode('utf-8')]
return None
```
#### File: jonhadfield/ansible-lookups/aws_vpc_id_from_name.py
```python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import *
from ansible.plugins.lookup import LookupBase
try:
import boto3
import botocore
except ImportError:
raise AnsibleError("aws_vpc_id_from_name lookup cannot be run without boto installed")
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
if isinstance(terms, basestring):
terms = [terms]
vpc_ids = []
region = terms[0][0]
vpc_names = terms[0][1]
session = boto3.session.Session(region_name=region)
try:
ec2_client = session.client('ec2')
except botocore.exceptions.NoRegionError:
raise AnsibleError("AWS region not specified.")
vpc_filter = [{'Name': 'tag:Name', 'Values': [vpc_names]}]
result = ec2_client.describe_vpcs(Filters=vpc_filter)
vpcs = result.get('Vpcs')
if vpcs:
vpc_ids.append(vpcs[0].get('VpcId').encode('utf-8'))
return vpc_ids
``` |
{
"source": "jonhadfield/aws_info",
"score": 2
} |
#### File: aws_info/aws_info/eip.py
```python
def get_unassociated_eips(connection):
all_ips = connection.get_all_addresses()
return [x for x in all_ips if not x.instance_id]
```
#### File: aws_info/aws_info/utils.py
```python
import sys
import time
def spinning_cursor():
while True:
for cursor in '|/-\\':
yield cursor
``` |
{
"source": "jonhadfield/creds",
"score": 2
} |
#### File: creds/tests/test_ssh.py
```python
from __future__ import (absolute_import, unicode_literals, print_function)
import pytest
from creds.ssh import PublicKey
from .sample_data import PUBLIC_KEYS
def test_invalid_public_key_setup():
with pytest.raises(AttributeError):
assert PublicKey()
def test_create_public_key_from_encoded():
for key in PUBLIC_KEYS:
public_key = PublicKey(
b64encoded=key['encoded'])
assert public_key.raw == key['raw']
assert public_key.b64encoded == key['encoded']
def test_create_public_key_from_raw():
for key in PUBLIC_KEYS:
public_key = PublicKey(raw=key['raw'])
assert public_key.b64encoded == key['encoded']
assert public_key.raw == key['raw']
def test_public_key_repr_and_str():
public_key = PublicKey(raw=PUBLIC_KEYS[0]['raw'])
assert str(public_key) == public_key.__repr__()
``` |
{
"source": "jon-hagg/fs-azureblob",
"score": 2
} |
#### File: fs/azblob/blob_fs.py
```python
import datetime
import io
from azure.storage.blob import ContainerClient
from fs.base import FS
from fs.enums import ResourceType
from fs.errors import PermissionDenied
from fs.info import Info
from fs.path import basename
from fs.subfs import SubFS
from fs.time import datetime_to_epoch
def _convert_to_epoch(props: dict) -> None:
for k, v in props.items():
if isinstance(v, datetime.datetime):
props[k] = datetime_to_epoch(v)
class BlobFS(FS):
def __init__(self, account_name: str, container: str):
super().__init__()
self.client = ContainerClient(
account_url=f"https://{account_name}.blob.core.windows.net",
container_name=container,
)
def getinfo(self, path, namespaces=None) -> Info:
namespaces = namespaces or ()
info = {"basic": {"name": basename(path), "is_dir": False}}
if "details" in namespaces:
blob_client = self.client.get_blob_client(path)
props = blob_client.get_blob_properties()
details = {}
details["accessed"] = props["last_accessed_on"]
details["created"] = props["creation_time"]
details["metadata_changed"] = None
details["modified"] = props["last_modified"]
details["size"] = props["size"]
details["type"] = ResourceType.file
_convert_to_epoch(details)
info["details"] = details
return Info(info)
def listdir(self, path) -> list:
path = self.validatepath(path)
parts = path.split("/")
num_parts = 0 if path == "" else len(parts)
suffix = parts[-1]
all = (b.name.split("/") for b in self.client.list_blobs(path))
return list({p[num_parts] for p in all if suffix in p or suffix == ""})
def openbin(self, path, mode="r", buffering=-1, **options) -> io.IOBase:
path = self.validatepath(path)
blob_client = self.client.get_blob_client(path)
download_stream = blob_client.download_blob()
result = io.BytesIO()
download_stream.readinto(result)
result.seek(0)
return result
def validatepath(self, path: str) -> str:
if path == ".":
path = ""
path = path.strip("/")
return path
def makedir(self, path, permissions=None, recreate=False) -> SubFS:
raise PermissionDenied
def remove(self, path) -> None:
raise PermissionDenied
def removedir(self, path) -> None:
raise PermissionDenied
def setinfo(self, path, info) -> None:
raise PermissionDenied
``` |
{
"source": "jonhealy1/lambda_serverless",
"score": 2
} |
#### File: lambda_serverless/hello-world-python/handler.py
```python
def hello(event, context):
print("second update!")
return "hello-world"
``` |
{
"source": "jonhealy1/stac-fastapi-nodb",
"score": 2
} |
#### File: stac_fastapi/nodb/transactions.py
```python
import logging
import asyncio
import json
from datetime import datetime
import redis
from redis.commands.json.path import Path
import attr
from stac_pydantic.shared import DATETIME_RFC339
from stac_fastapi.nodb.config import Tile38Settings
from stac_fastapi.nodb.serializers import CollectionSerializer, ItemSerializer
from stac_fastapi.nodb.session import Session
from stac_fastapi.extensions.third_party.bulk_transactions import (
BaseBulkTransactionsClient,
Items,
)
from stac_fastapi.types import stac as stac_types
from stac_fastapi.types.core import BaseTransactionsClient
from stac_fastapi.types.errors import ConflictError, ForeignKeyError, NotFoundError
from stac_fastapi.types.links import CollectionLinks
logger = logging.getLogger(__name__)
COLLECTIONS = []
@attr.s
class TransactionsClient(BaseTransactionsClient):
"""Transactions extension specific CRUD operations."""
session: Session = attr.ib(default=attr.Factory(Session.create_from_env))
settings = Tile38Settings()
tile38_client = settings.create_tile_38_client
redis_client = settings.create_redis_client
def create_item(self, model: stac_types.Item, **kwargs):
"""Create item."""
base_url = str(kwargs["request"].base_url)
##### implement after bulk sync post request
# # If a feature collection is posted
# if model["type"] == "FeatureCollection":
# bulk_client = BulkTransactionsClient()
# processed_items = [
# bulk_client._preprocess_item(item, base_url)
# for item in model["features"]
# ]
# return_msg = f"Successfully added {len(processed_items)} items."
# bulk_client.bulk_sync(processed_items)
# return return_msg
# If a single item is posted
self.check_collection_not_exists(model)
if self.redis_client.json().get(model["id"]):
raise ConflictError(
f"Item {model['id']} in collection {model['collection']} already exists"
)
data = ItemSerializer.stac_to_db(model, base_url)
self.redis_client.json().set(model["id"], Path.rootPath(), data)
### run async code for tile38 client
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
coroutine = self.create_geojson_object(data)
loop.run_until_complete(coroutine)
return ItemSerializer.db_to_stac(data, base_url)
# async example for tile38 client
async def create_geojson_object(self, item: stac_types.Item):
### tile 38 def function
# loop = asyncio.new_event_loop()
# asyncio.set_event_loop(loop)
# coroutine = self.jset_collection(model)
# response = loop.run_until_complete(coroutine)
# return str(response)
await self.tile38_client.set("items", item["id"]).object(item["geometry"]).exec()
# response = await self.tile38_client.get("items", item["id"]).asObject()
# return response.object
def create_collection(self, model: stac_types.Collection, **kwargs):
"""Create collection."""
base_url = str(kwargs["request"].base_url)
collection_links = CollectionLinks(
collection_id=model["id"], base_url=base_url
).create_links()
model["links"] = collection_links
if self.redis_client.json().get(model["id"]):
raise ConflictError(f"Collection {model['id']} already exists")
self.redis_client.json().set(model["id"], Path.rootPath(), model)
self.redis_client.sadd("collections", model["id"])
collection = self.redis_client.json().get(model["id"])
return CollectionSerializer.db_to_stac(collection, base_url)
def check_collection_not_exists(self, model):
if not self.redis_client.json().get(model["collection"]):
raise ForeignKeyError(f"Collection {model['collection']} does not exist")
def check_collection_not_found(self, collection_id):
if not self.redis_client.json().get(collection_id):
raise NotFoundError(f"Collection {collection_id} not found")
def check_item_not_exists(self, item_id, collection_id):
if not self.redis_client.json().get(item_id):
raise NotFoundError(
f"Item {item_id} in collection {collection_id} doesn't exist"
)
def update_item(self, item: stac_types.Item, **kwargs):
"""Update item."""
base_url = str(kwargs["request"].base_url)
now = datetime.utcnow().strftime(DATETIME_RFC339)
item["properties"]["updated"] = str(now)
self.check_collection_not_exists(item)
self.check_item_not_exists(item["id"], item["collection"])
self.delete_item(item["id"], item["collection"])
self.create_item(item, **kwargs)
return ItemSerializer.db_to_stac(item, base_url)
def update_collection(self, model: stac_types.Collection, **kwargs):
"""Update collection."""
base_url = str(kwargs["request"].base_url)
self.check_collection_not_found(model["id"])
self.delete_collection(model["id"])
self.create_collection(model, **kwargs)
return CollectionSerializer.db_to_stac(model, base_url)
def delete_item(self, item_id: str, collection_id: str, **kwargs):
"""Delete item."""
self.check_item_not_exists(item_id, collection_id)
self.redis_client.json().delete(item_id, Path.rootPath())
def delete_collection(self, collection_id: str, **kwargs):
"""Delete collection."""
self.check_collection_not_found(collection_id)
self.redis_client.json().delete(collection_id, Path.rootPath())
self.redis_client.srem("collections", collection_id)
# @attr.s
# class BulkTransactionsClient(BaseBulkTransactionsClient):
# """Postgres bulk transactions."""
# session: Session = attr.ib(default=attr.Factory(Session.create_from_env))
# def __attrs_post_init__(self):
# """Create es engine."""
# settings = ElasticsearchSettings()
# self.client = settings.create_client
# def _preprocess_item(self, model: stac_types.Item, base_url) -> stac_types.Item:
# """Preprocess items to match data model."""
# if not self.client.exists(index="stac_collections", id=model["collection"]):
# raise ForeignKeyError(f"Collection {model['collection']} does not exist")
# if self.client.exists(index="stac_items", id=model["id"]):
# raise ConflictError(
# f"Item {model['id']} in collection {model['collection']} already exists"
# )
# item = ItemSerializer.stac_to_db(model, base_url)
# return item
# def bulk_sync(self, processed_items):
# """Elasticsearch bulk insertion."""
# actions = [
# {"_index": "stac_items", "_source": item} for item in processed_items
# ]
# helpers.bulk(self.client, actions)
# def bulk_item_insert(self, items: Items, **kwargs) -> str:
# """Bulk item insertion using es."""
# transactions_client = TransactionsClient()
# transactions_client._create_item_index()
# try:
# base_url = str(kwargs["request"].base_url)
# except Exception:
# base_url = ""
# processed_items = [
# self._preprocess_item(item, base_url) for item in items.items.values()
# ]
# return_msg = f"Successfully added {len(processed_items)} items."
# self.bulk_sync(processed_items)
# return return_msg
``` |
{
"source": "jonhealy1/stac-mongo-api",
"score": 2
} |
#### File: stac-mongo-api/api/db.py
```python
import motor.motor_asyncio
from dotenv import load_dotenv
import os
load_dotenv()
MONGO_CONN_STRING = os.environ.get("MONGO_CONN_STRING")
client = motor.motor_asyncio.AsyncIOMotorClient(MONGO_CONN_STRING)
database = client.stac
stac_collection = database.get_collection("stac_collection")
stac_item = database.get_collection("stac_item")
async def add_collection(new_collection: dict):
collection = await stac_collection.insert_one(new_collection)
async def add_item(new_item: dict):
item = await stac_item.insert_one(new_item)
async def get_collections():
collections = []
async for collection in stac_collection.find():
if "content" in collection:
collections.append(collection["content"])
return collections
async def get_one_collection(id: str):
if (collection := await stac_collection.find_one({"_id": id})) is not None:
return collection
else:
return {id: "Not found"}
async def get_item_collection(id: str):
items = []
async for item in stac_item.find({"collection": id}):
#if "content" in item:
items.append(item["content"])
return items
```
#### File: api/routers/stac_router.py
```python
from fastapi import APIRouter
from fastapi.encoders import jsonable_encoder
router = APIRouter()
from api.db import (
add_collection,
add_item,
get_collections,
get_one_collection,
get_item_collection
)
@router.get("/")
async def root_catalog():
return {"root_catalog": "Hello World"}
@router.get("/collections")
async def get_all_collections():
collections = await get_collections()
if collections:
return collections
return {"error": "No collections"}
@router.post("/collections/")
async def post_collection(content: dict):
collection = {"_id": content["id"], "content": content}
collection = jsonable_encoder(collection)
stac_collection = await add_collection(collection)
@router.post("/collections/{collection_id}")
async def post_item(content: dict):
item = {"_id": content["id"], "collection": content["collection"], "content": content}
item = jsonable_encoder(item)
stac_item = await add_item(item)
@router.get("/collections/{collection_id}")
async def get_collection(collection_id: str):
collection_id = jsonable_encoder(collection_id)
collection = await get_one_collection(collection_id)
return collection
@router.get("/collections/{collection_id}/items")
async def get_items(collection_id: str):
items = await get_item_collection(collection_id)
if items:
return items
```
#### File: jonhealy1/stac-mongo-api/conftest.py
```python
import pytest
from starlette.testclient import TestClient
from api.main import app
@pytest.fixture
def app_client():
with TestClient(app) as test_client:
yield test_client
``` |
{
"source": "jonhealy1/stac-validator",
"score": 2
} |
#### File: stac-validator/tests/test_extensions.py
```python
__authors__ = "<NAME>", "<NAME>"
from stac_validator import stac_validator
def test_item_local_v080():
stac_file = "tests/test_data/v080/items/sample-full.json"
stac = stac_validator.StacValidate(stac_file, extensions=True)
stac.run()
assert stac.message == [
{
"version": "0.8.0",
"path": "tests/test_data/v080/items/sample-full.json",
"asset_type": "ITEM",
"validation_method": "extensions",
"schema": ["https://cdn.staclint.com/v0.8.0/extension/eo.json"],
"valid_stac": True,
}
]
def test_v090():
stac_file = "tests/test_data/v090/extensions/eo/examples/example-landsat8.json"
stac = stac_validator.StacValidate(stac_file, extensions=True)
stac.run()
assert stac.message == [
{
"version": "0.9.0",
"path": "tests/test_data/v090/extensions/eo/examples/example-landsat8.json",
"asset_type": "ITEM",
"validation_method": "extensions",
"schema": [
"https://cdn.staclint.com/v0.9.0/extension/eo.json",
"https://cdn.staclint.com/v0.9.0/extension/view.json",
],
"valid_stac": True,
}
]
def test_v1beta1():
stac_file = "tests/test_data/1beta1/sentinel2.json"
stac = stac_validator.StacValidate(stac_file, extensions=True)
stac.run()
assert stac.message == [
{
"version": "1.0.0-beta.1",
"path": "tests/test_data/1beta1/sentinel2.json",
"schema": [
"https://cdn.staclint.com/v1.0.0-beta.1/collection.json",
],
"asset_type": "COLLECTION",
"validation_method": "extensions",
"valid_stac": True,
}
]
def test_no_extensions_v1beta2():
stac_file = "tests/test_data/1beta2/stac_item.json"
stac = stac_validator.StacValidate(stac_file, extensions=True)
stac.run()
assert stac.message == [
{
"path": "tests/test_data/1beta2/stac_item.json",
"asset_type": "ITEM",
"version": "1.0.0-beta.2",
"validation_method": "extensions",
"schema": [],
"valid_stac": True,
}
]
def test_v1beta2():
stac_file = "tests/test_data/1beta2/CBERS_4.json"
stac = stac_validator.StacValidate(stac_file, extensions=True)
stac.run()
assert stac.message == [
{
"version": "1.0.0-beta.2",
"path": "tests/test_data/1beta2/CBERS_4.json",
"schema": [
"https://cdn.staclint.com/v1.0.0-beta.1/extension/projection.json",
"https://cdn.staclint.com/v1.0.0-beta.1/extension/view.json",
],
"asset_type": "ITEM",
"validation_method": "extensions",
"valid_stac": True,
}
]
def test_remote_v1rc3():
stac_file = "https://raw.githubusercontent.com/radiantearth/stac-spec/v1.0.0-rc.3/examples/extended-item.json"
stac = stac_validator.StacValidate(stac_file, extensions=True)
stac.run()
assert stac.message == [
{
"version": "1.0.0-rc.3",
"path": "https://raw.githubusercontent.com/radiantearth/stac-spec/v1.0.0-rc.3/examples/extended-item.json",
"schema": [
"https://stac-extensions.github.io/eo/v1.0.0/schema.json",
"https://stac-extensions.github.io/projection/v1.0.0/schema.json",
"https://stac-extensions.github.io/scientific/v1.0.0/schema.json",
"https://stac-extensions.github.io/view/v1.0.0/schema.json",
"https://stac-extensions.github.io/remote-data/v1.0.0/schema.json",
],
"valid_stac": True,
"asset_type": "ITEM",
"validation_method": "extensions",
}
]
def test_remote_v1rc4():
stac_file = "https://raw.githubusercontent.com/radiantearth/stac-spec/v1.0.0-rc.4/examples/extended-item.json"
stac = stac_validator.StacValidate(stac_file, extensions=True)
stac.run()
assert stac.message == [
{
"version": "1.0.0-rc.4",
"path": "https://raw.githubusercontent.com/radiantearth/stac-spec/v1.0.0-rc.4/examples/extended-item.json",
"schema": [
"https://stac-extensions.github.io/eo/v1.0.0/schema.json",
"https://stac-extensions.github.io/projection/v1.0.0/schema.json",
"https://stac-extensions.github.io/scientific/v1.0.0/schema.json",
"https://stac-extensions.github.io/view/v1.0.0/schema.json",
"https://stac-extensions.github.io/remote-data/v1.0.0/schema.json",
],
"valid_stac": True,
"asset_type": "ITEM",
"validation_method": "extensions",
}
]
def test_local_v1rc2():
stac_file = (
"tests/test_data/1rc2/extensions-collection/./proj-example/proj-example.json"
)
stac = stac_validator.StacValidate(stac_file, extensions=True)
stac.run()
assert stac.message == [
{
"version": "1.0.0-rc.2",
"path": "tests/test_data/1rc2/extensions-collection/./proj-example/proj-example.json",
"schema": ["https://stac-extensions.github.io/eo/v1.0.0/schema.json"],
"valid_stac": False,
"error_type": "ValidationError",
"error_message": "'panchromatic' is not one of ['coastal', 'blue', 'green', 'red', 'rededge', 'yellow', 'pan', 'nir', 'nir08', 'nir09', 'cirrus', 'swir16', 'swir22', 'lwir', 'lwir11', 'lwir12']. Error is in assets -> B8 -> eo:bands -> 0 -> common_name",
}
]
def test_catalog_v1rc2():
stac_file = "tests/test_data/1rc2/catalog.json"
stac = stac_validator.StacValidate(stac_file, extensions=True)
stac.run()
assert stac.message == [
{
"version": "1.0.0-rc.2",
"path": "tests/test_data/1rc2/catalog.json",
"schema": [
"https://schemas.stacspec.org/v1.0.0-rc.2/catalog-spec/json-schema/catalog.json"
],
"asset_type": "CATALOG",
"validation_method": "extensions",
"valid_stac": True,
}
]
def test_item_v100():
stac_file = "tests/test_data/v100/extended-item.json"
stac = stac_validator.StacValidate(stac_file, extensions=True)
stac.run()
assert stac.message == [
{
"version": "1.0.0",
"path": "tests/test_data/v100/extended-item.json",
"schema": [
"https://stac-extensions.github.io/eo/v1.0.0/schema.json",
"https://stac-extensions.github.io/projection/v1.0.0/schema.json",
"https://stac-extensions.github.io/scientific/v1.0.0/schema.json",
"https://stac-extensions.github.io/view/v1.0.0/schema.json",
"https://stac-extensions.github.io/remote-data/v1.0.0/schema.json",
],
"valid_stac": True,
"asset_type": "ITEM",
"validation_method": "extensions",
}
]
``` |
{
"source": "jonhehir/amundsenfrontendlibrary",
"score": 2
} |
#### File: api/search/v0.py
```python
import logging
import json
from http import HTTPStatus
from typing import Any, Dict # noqa: F401
from flask import Response, jsonify, make_response, request
from flask import current_app as app
from flask.blueprints import Blueprint
from amundsen_application.log.action_log import action_logging
from amundsen_application.api.utils.metadata_utils import marshall_dashboard_partial
from amundsen_application.api.utils.request_utils import get_query_param, request_search
from amundsen_application.api.utils.search_utils import generate_query_json, has_filters, \
map_table_result, transform_filters
from amundsen_application.models.user import load_user, dump_user
LOGGER = logging.getLogger(__name__)
REQUEST_SESSION_TIMEOUT_SEC = 3
search_blueprint = Blueprint('search', __name__, url_prefix='/api/search/v0')
SEARCH_DASHBOARD_ENDPOINT = '/search_dashboard'
SEARCH_TABLE_ENDPOINT = '/search'
SEARCH_TABLE_FILTER_ENDPOINT = '/search_table'
SEARCH_USER_ENDPOINT = '/search_user'
@search_blueprint.route('/table', methods=['POST'])
def search_table() -> Response:
"""
Parse the request arguments and call the helper method to execute a table search
:return: a Response created with the results from the helper method
"""
try:
request_json = request.get_json()
search_term = get_query_param(request_json, 'term', '"term" parameter expected in request data')
page_index = get_query_param(request_json, 'pageIndex', '"pageIndex" parameter expected in request data')
search_type = request_json.get('searchType')
transformed_filters = transform_filters(filters=request_json.get('filters', {}))
results_dict = _search_table(filters=transformed_filters,
search_term=search_term,
page_index=page_index,
search_type=search_type)
return make_response(jsonify(results_dict), results_dict.get('status_code', HTTPStatus.INTERNAL_SERVER_ERROR))
except Exception as e:
message = 'Encountered exception: ' + str(e)
logging.exception(message)
return make_response(jsonify(results_dict), HTTPStatus.INTERNAL_SERVER_ERROR)
@action_logging
def _search_table(*, search_term: str, page_index: int, filters: Dict, search_type: str) -> Dict[str, Any]:
"""
Call the search service endpoint and return matching results
Search service logic defined here:
https://github.com/lyft/amundsensearchlibrary/blob/master/search_service/api/table.py
:return: a json output containing search results array as 'results'
"""
# Default results
tables = {
'page_index': int(page_index),
'results': [],
'total_results': 0,
}
results_dict = {
'search_term': search_term,
'msg': '',
'tables': tables,
}
try:
if has_filters(filters=filters):
query_json = generate_query_json(filters=filters, page_index=page_index, search_term=search_term)
url_base = app.config['SEARCHSERVICE_BASE'] + SEARCH_TABLE_FILTER_ENDPOINT
response = request_search(url=url_base,
headers={'Content-Type': 'application/json'},
method='POST',
data=json.dumps(query_json))
else:
url_base = app.config['SEARCHSERVICE_BASE'] + SEARCH_TABLE_ENDPOINT
url = f'{url_base}?query_term={search_term}&page_index={page_index}'
response = request_search(url=url)
status_code = response.status_code
if status_code == HTTPStatus.OK:
results_dict['msg'] = 'Success'
results = response.json().get('results')
tables['results'] = [map_table_result(result) for result in results]
tables['total_results'] = response.json().get('total_results')
else:
message = 'Encountered error: Search request failed'
results_dict['msg'] = message
logging.error(message)
results_dict['status_code'] = status_code
return results_dict
except Exception as e:
message = 'Encountered exception: ' + str(e)
results_dict['msg'] = message
logging.exception(message)
return results_dict
@search_blueprint.route('/user', methods=['GET'])
def search_user() -> Response:
"""
Parse the request arguments and call the helper method to execute a user search
:return: a Response created with the results from the helper method
"""
try:
search_term = get_query_param(request.args, 'query', 'Endpoint takes a "query" parameter')
page_index = get_query_param(request.args, 'page_index', 'Endpoint takes a "page_index" parameter')
search_type = request.args.get('search_type')
results_dict = _search_user(search_term=search_term, page_index=int(page_index), search_type=search_type)
return make_response(jsonify(results_dict), results_dict.get('status_code', HTTPStatus.INTERNAL_SERVER_ERROR))
except Exception as e:
message = 'Encountered exception: ' + str(e)
logging.exception(message)
return make_response(jsonify(results_dict), HTTPStatus.INTERNAL_SERVER_ERROR)
@action_logging
def _search_user(*, search_term: str, page_index: int, search_type: str) -> Dict[str, Any]:
"""
Call the search service endpoint and return matching results
Search service logic defined here:
https://github.com/lyft/amundsensearchlibrary/blob/master/search_service/api/user.py
:return: a json output containing search results array as 'results'
"""
def _map_user_result(result: Dict) -> Dict:
user_result = dump_user(load_user(result))
user_result['type'] = 'user'
return user_result
users = {
'page_index': page_index,
'results': [],
'total_results': 0,
}
results_dict = {
'search_term': search_term,
'msg': 'Success',
'status_code': HTTPStatus.OK,
'users': users,
}
try:
url_base = app.config['SEARCHSERVICE_BASE'] + SEARCH_USER_ENDPOINT
url = f'{url_base}?query_term={search_term}&page_index={page_index}'
response = request_search(url=url)
status_code = response.status_code
if status_code == HTTPStatus.OK:
results_dict['msg'] = 'Success'
results = response.json().get('results')
users['results'] = [_map_user_result(result) for result in results]
users['total_results'] = response.json().get('total_results')
else:
message = 'Encountered error: Search request failed'
results_dict['msg'] = message
logging.error(message)
results_dict['status_code'] = status_code
return results_dict
except Exception as e:
message = 'Encountered exception: ' + str(e)
results_dict['msg'] = message
results_dict['status_code'] = HTTPStatus.INTERNAL_SERVER_ERROR
logging.exception(message)
return results_dict
@search_blueprint.route('/dashboard', methods=['GET'])
def search_dashboard() -> Response:
"""
Parse the request arguments and call the helper method to execute a dashboard search
:return: a Response created with the results from the helper method
"""
try:
search_term = get_query_param(request.args, 'query', 'Endpoint takes a "query" parameter')
page_index = get_query_param(request.args, 'page_index', 'Endpoint takes a "page_index" parameter')
search_type = request.args.get('search_type')
results_dict = _search_dashboard(search_term=search_term, page_index=int(page_index), search_type=search_type)
return make_response(jsonify(results_dict), results_dict.get('status_code', HTTPStatus.INTERNAL_SERVER_ERROR))
except Exception as e:
message = 'Encountered exception: ' + str(e)
logging.exception(message)
return make_response(jsonify(results_dict), HTTPStatus.INTERNAL_SERVER_ERROR)
@action_logging
def _search_dashboard(*, search_term: str, page_index: int, search_type: str) -> Dict[str, Any]:
"""
Call the search service endpoint and return matching results
Search service logic defined here:
https://github.com/lyft/amundsensearchlibrary/blob/master/search_service/api/dashboard.py
:return: a json output containing search results array as 'results'
"""
# Default results
dashboards = {
'page_index': page_index,
'results': [],
'total_results': 0,
}
results_dict = {
'search_term': search_term,
'msg': '',
'dashboards': dashboards,
}
try:
url_base = app.config['SEARCHSERVICE_BASE'] + SEARCH_DASHBOARD_ENDPOINT
url = f'{url_base}?query_term={search_term}&page_index={page_index}'
response = request_search(url=url)
status_code = response.status_code
if status_code == HTTPStatus.OK:
results_dict['msg'] = 'Success'
results = response.json().get('results')
dashboards['results'] = [marshall_dashboard_partial(result) for result in results]
dashboards['total_results'] = response.json().get('total_results')
else:
message = 'Encountered error: Search request failed'
results_dict['msg'] = message
logging.error(message)
results_dict['status_code'] = status_code
return results_dict
except Exception as e:
message = 'Encountered exception: ' + str(e)
results_dict['msg'] = message
logging.exception(message)
return results_dict
```
#### File: api/utils/request_utils.py
```python
from typing import Dict
import requests
from flask import current_app as app
def get_query_param(args: Dict, param: str, error_msg: str = None) -> str:
value = args.get(param)
if value is None:
msg = 'A {0} parameter must be provided'.format(param) if error_msg is not None else error_msg
raise Exception(msg)
return value
def request_metadata(*, # type: ignore
url: str,
method: str = 'GET',
headers=None,
timeout_sec: int = 0,
data=None):
"""
Helper function to make a request to metadata service.
Sets the client and header information based on the configuration
:param headers: Optional headers for the request, e.g. specifying Content-Type
:param method: DELETE | GET | POST | PUT
:param url: The request URL
:param timeout_sec: Number of seconds before timeout is triggered.
:param data: Optional request payload
:return:
"""
if headers is None:
headers = {}
if app.config['REQUEST_HEADERS_METHOD']:
headers.update(app.config['REQUEST_HEADERS_METHOD'](app))
elif app.config['METADATASERVICE_REQUEST_HEADERS']:
headers.update(app.config['METADATASERVICE_REQUEST_HEADERS'])
return request_wrapper(method=method,
url=url,
client=app.config['METADATASERVICE_REQUEST_CLIENT'],
headers=headers,
timeout_sec=timeout_sec,
data=data)
def request_search(*, # type: ignore
url: str,
method: str = 'GET',
headers=None,
timeout_sec: int = 0,
data=None):
"""
Helper function to make a request to search service.
Sets the client and header information based on the configuration
:param headers: Optional headers for the request, e.g. specifying Content-Type
:param method: DELETE | GET | POST | PUT
:param url: The request URL
:param timeout_sec: Number of seconds before timeout is triggered.
:param data: Optional request payload
:return:
"""
if headers is None:
headers = {}
if app.config['REQUEST_HEADERS_METHOD']:
headers.update(app.config['REQUEST_HEADERS_METHOD'](app))
elif app.config['SEARCHSERVICE_REQUEST_HEADERS']:
headers.update(app.config['SEARCHSERVICE_REQUEST_HEADERS'])
return request_wrapper(method=method,
url=url,
client=app.config['SEARCHSERVICE_REQUEST_CLIENT'],
headers=headers,
timeout_sec=timeout_sec,
data=data)
# TODO: Define an interface for envoy_client
def request_wrapper(method: str, url: str, client, headers, timeout_sec: int, data=None): # type: ignore
"""
Wraps a request to use Envoy client and headers, if available
:param method: DELETE | GET | POST | PUT
:param url: The request URL
:param client: Optional Envoy client
:param headers: Optional Envoy request headers
:param timeout_sec: Number of seconds before timeout is triggered. Not used with Envoy
:param data: Optional request payload
:return:
"""
# If no timeout specified, use the one from the configurations.
timeout_sec = timeout_sec or app.config['REQUEST_SESSION_TIMEOUT_SEC']
if client is not None:
if method == 'DELETE':
return client.delete(url, headers=headers, raw_response=True)
elif method == 'GET':
return client.get(url, headers=headers, raw_response=True)
elif method == 'POST':
return client.post(url, headers=headers, raw_response=True, raw_request=True, data=data)
elif method == 'PUT':
return client.put(url, headers=headers, raw_response=True, raw_request=True, data=data)
else:
raise Exception('Method not allowed: {}'.format(method))
else:
with requests.Session() as s:
if method == 'DELETE':
return s.delete(url, headers=headers, timeout=timeout_sec)
elif method == 'GET':
return s.get(url, headers=headers, timeout=timeout_sec)
elif method == 'POST':
return s.post(url, headers=headers, timeout=timeout_sec, data=data)
elif method == 'PUT':
return s.put(url, headers=headers, timeout=timeout_sec, data=data)
else:
raise Exception('Method not allowed: {}'.format(method))
```
#### File: unit/utils/test_metadata_utils.py
```python
import unittest
from unittest.mock import patch, Mock
from amundsen_application.api.utils.metadata_utils import _update_prog_descriptions, _sort_prog_descriptions
from amundsen_application import create_app
local_app = create_app('amundsen_application.config.TestConfig', 'tests/templates')
class ProgrammaticDescriptionsTest(unittest.TestCase):
def setUp(self) -> None:
pass
@patch('amundsen_application.api.utils.metadata_utils._sort_prog_descriptions')
def test_update_prog_descriptions(self, sort_mock) -> None:
with local_app.app_context():
test_desc = [
{'source': 'c_1', 'text': 'description c'},
{'source': 'a_1', 'text': 'description a'},
{'source': 'b_1', 'text': 'description b'}
]
# Pretend config exists
local_app.config['PROGRAMMATIC_DISPLAY'] = Mock()
# Mock the effects of the sort method
sort_mock.side_effect = [1, 0, 1]
# Expected order based on mocked side effect
expected_programmatic_desc = [
{'source': 'a_1', 'text': 'description a'},
{'source': 'c_1', 'text': 'description c'},
{'source': 'b_1', 'text': 'description b'}
]
_update_prog_descriptions(test_desc)
self.assertEqual(test_desc, expected_programmatic_desc)
def test_sort_prog_descriptions_returns_value_from_config(self) -> None:
"""
Verify the method will return the display order from the programmtic description
configuration if it exists for the given source
:return:
"""
with local_app.app_context():
mock_order = 1
mock_config = {
"c_1": {
"display_order": mock_order
}
}
in_config_value = {'source': 'c_1', 'text': 'I am a test'}
self.assertEqual(_sort_prog_descriptions(mock_config, in_config_value), mock_order)
def test_sort_prog_descriptions_returns_default_value(self) -> None:
"""
Verify the method will return the expected default value if programmtic decsription
source is not included in teh configuration
:return:
"""
with local_app.app_context():
mock_config = {
"c_1": {
"display_order": 0
}
}
not_in_config_value = {'source': 'test', 'text': 'I am a test'}
self.assertEqual(_sort_prog_descriptions(mock_config, not_in_config_value), len(mock_config))
``` |
{
"source": "jonheng/sgnlp",
"score": 3
} |
#### File: models/nea/tokenization.py
```python
import collections
import logging
import nltk
import operator
import os
import pathlib
import re
from typing import List, Optional, Tuple
from transformers import PreTrainedTokenizer
logging.basicConfig(level=logging.DEBUG)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
class NEA_NLTK_Tokenizer(object):
"""Tokenizer for NEA.
Performs word level tokenization via NLTK package followed by combining entity placeholders.
"""
def __init__(self, do_lower_case: bool = True):
self.do_lower_case = do_lower_case
def tokenize(self, text: str) -> List[str]:
"""Main tokenize method
Args:
text (str): text string to tokenize
Returns:
List[str]: return a list of tokenized string
"""
if self.do_lower_case:
text = text.lower()
tokens = nltk.word_tokenize(text)
tokens = self._merge_tokens(tokens)
return tokens
def _merge_tokens(self, tokens: List[str]) -> List[str]:
for index, token in enumerate(tokens):
if token == '@' and (index + 1) < len(tokens):
tokens[index + 1] = '@' + re.sub('[0-9]+.*', '', tokens[index + 1])
tokens.pop(index)
return tokens
class NEATokenizer(PreTrainedTokenizer):
"""
The NEA Tokenizer class used WordLevel tokenization to generate tokens.
Args:
text (:obj:`str`):
input text string to tokenize
Example::
# 1. From local vocab file
vocab_file = 'vocab.txt'
tokenizer = NEATokenizer(vocab_file=vocab_file)
tokens = tokenizer("Hello world!")
tokens["input_ids"]
# 2. Train vocab from dataset file
train_file = 'dataset.tsv'
tokenizer = NEATokenizer(train_file=train_file, train_vocab=True)
tokens = tokenizer("Hello world!")
tokens["input_ids"]
# 3. Download pretrained from Azure storage
import sgnlp.models.nea import NEAArguments
import sgnlp.models.nea.utils import download_tokenizer_files_from_azure
cfg = NEAArguments()
download_tokenizer_files_from_azure(cfg)
tokenizer = NEATokenizer.from_pretrained(cfg.tokenizer_args["save_folder"])
tokens = tokenizer("Hello world!")
tokens["input_ids"]
"""
vocab_files_names = VOCAB_FILES_NAMES
def __init__(
self,
vocab_file: str = None,
train_file: str = None,
train_vocab: bool = False,
prompt_id: int = 1,
maxlen: int = 0,
vocab_size: int = 4000,
do_lower_case: bool = True,
unk_token: str = "<unk>",
pad_token: str = "<pad>",
num_token: str = "<num>",
**kwargs):
super().__init__(
prompt_id=prompt_id,
do_lower_case=do_lower_case,
unk_token=unk_token,
pad_token=pad_token,
num_token=num_token,
**kwargs)
self.nea_tokenizer = NEA_NLTK_Tokenizer(do_lower_case)
if train_vocab:
self.vocab = self.create_vocab(train_file, prompt_id, maxlen, vocab_size)
else:
self.vocab = NEATokenizer.load_vocabulary(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
@property
def do_lower_case(self):
return self.nea_tokenizer.do_lower_case
@property
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
return dict(self.vocab)
def _tokenize(self, text: str) -> List[str]:
return self.nea_tokenizer.tokenize(text)
def _convert_token_to_id(self, token: str):
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index: int):
return self.ids_to_tokens(index, self.unk_token)
def create_vocab(self, file_path: str, prompt_id: int, maxlen: int, vocab_size: int):
total_words, unique_words = 0, 0
word_freqs = {}
with open(file_path, 'r', encoding='utf-8') as input_file:
next(input_file)
for line in input_file:
tokens = line.strip().split('\t')
essay_set = int(tokens[1])
content = tokens[2].strip()
if essay_set == prompt_id or prompt_id <= 0:
if self.do_lower_case:
content = content.lower()
content = self.tokenize(content)
if maxlen > 0 and len(content) > maxlen:
continue
for word in content:
try:
word_freqs[word] += 1
except KeyError:
unique_words += 1
word_freqs[word] = 1
total_words += 1
sorted_word_freqs = sorted(word_freqs.items(), key=operator.itemgetter(1), reverse=True)
if vocab_size <= 0:
vocab_size = 0
for word, freq in sorted_word_freqs:
if freq > 1:
vocab_size += 1
vocab = collections.OrderedDict()
vocab['<pad>'] = 0
vocab['<unk>'] = 1
vocab['<num>'] = 2
vocab_len = len(vocab.keys())
offset = vocab_len
for word, _ in sorted_word_freqs[:vocab_size - vocab_len]:
vocab[word] = offset
offset += 1
return vocab
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
index = 0
os.makedirs(save_directory, exist_ok=True)
vocab_file = pathlib.Path(save_directory).joinpath(VOCAB_FILES_NAMES['vocab_file'])
with open(vocab_file, 'w', encoding='utf-8') as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logging.warning(
f'Saving vocabulary to {vocab_file}: Vocabulary indices are not consecutive.'
'Please check vocabulary is not corrupted!')
writer.write(token + '\n')
index += 1
return (str(vocab_file),)
@staticmethod
def load_vocabulary(vocab_file: str):
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip("\n")
vocab[token] = index
return vocab
```
#### File: tests/nea/test_nea.py
```python
import unittest
import pathlib
import shutil
import pytest
import torch
from transformers import PretrainedConfig
from sgnlp.models.nea import (
NEAConfig,
NEARegPoolingModel,
NEARegModel,
NEABiRegModel,
NEABiRegPoolingModel,
NEATokenizer,
)
PARENT_DIR = pathlib.Path(__file__).parent
class NEATest(unittest.TestCase):
def setUp(self):
self.config = NEAConfig
self.reg_model = NEARegModel
self.reg_pooling_model = NEARegPoolingModel
self.bi_reg_model = NEABiRegModel
self.bi_reg_pooling_model = NEABiRegPoolingModel
self.model_input = torch.ones((2, 20)).int()
self.model_input_with_label = {
"input_ids": self.model_input,
"labels": torch.tensor([1, 1]),
}
def test_config_can_be_init(self):
config = self.config()
self.assertIsNotNone(config)
self.assertIsInstance(config, PretrainedConfig)
self.assertEqual(config.vocab_size, 4000)
self.assertEqual(config.embedding_dim, 50)
self.assertEqual(config.dropout, 0.5)
self.assertEqual(config.cnn_input_dim, 0)
self.assertEqual(config.cnn_output_dim, 0)
self.assertEqual(config.cnn_kernel_size, 0)
self.assertEqual(config.cnn_padding, 0)
self.assertEqual(config.rec_layer_type, "lstm")
self.assertEqual(config.rec_input_dim, 50)
self.assertEqual(config.rec_output_dim, 300)
self.assertEqual(config.aggregation, "mot")
self.assertEqual(config.linear_input_dim, 300)
self.assertEqual(config.linear_output_dim, 1)
self.assertEqual(config.skip_init_bias, False)
self.assertEqual(config.loss_function, "mse")
def test_reg_model_can_be_init(self):
config = self.config()
model = self.reg_model(config=config)
self.assertIsNotNone(model)
def test_reg_pooling_model_can_be_init(self):
config = self.config()
model = self.reg_pooling_model(config=config)
self.assertIsNotNone(model)
def test_bi_reg_model_can_be_init(self):
config = self.config(linear_input_dim=600)
model = self.bi_reg_model(config=config)
self.assertIsNotNone(model)
def test_bi_reg_pooling_model_can_be_init(self):
config = self.config(linear_input_dim=600)
model = self.bi_reg_pooling_model(config=config)
self.assertIsNotNone(model)
def test_reg_model_forward_pass(self):
config = self.config()
model = self.reg_model(config=config)
output = model(self.model_input)
self.assertIsInstance(output["logits"], torch.Tensor)
self.assertEqual(output["logits"].shape, torch.Size([2, 1]))
output_with_label = model(**self.model_input_with_label)
self.assertIsInstance(output_with_label["logits"], torch.Tensor)
self.assertEqual(output_with_label["logits"].shape, torch.Size([2, 1]))
self.assertIsNotNone(output_with_label["loss"])
def test_reg_pooling_model_forward_pass(self):
config = self.config()
model = self.reg_pooling_model(config=config)
output = model(self.model_input)
self.assertIsInstance(output["logits"], torch.Tensor)
self.assertEqual(output["logits"].shape, torch.Size([2, 1]))
output_with_label = model(**self.model_input_with_label)
self.assertIsInstance(output_with_label["logits"], torch.Tensor)
self.assertEqual(output_with_label["logits"].shape, torch.Size([2, 1]))
self.assertIsNotNone(output_with_label["loss"])
def test_bi_reg_model_forward_pass(self):
config = self.config(linear_input_dim=600)
model = self.bi_reg_model(config=config)
output = model(self.model_input)
self.assertIsInstance(output["logits"], torch.Tensor)
self.assertEqual(output["logits"].shape, torch.Size([2, 1]))
output_with_label = model(**self.model_input_with_label)
self.assertIsInstance(output_with_label["logits"], torch.Tensor)
self.assertEqual(output_with_label["logits"].shape, torch.Size([2, 1]))
self.assertIsNotNone(output_with_label["loss"])
def test_bi_reg_pooling_model_forward_pass(self):
config = self.config(linear_input_dim=600)
model = self.bi_reg_pooling_model(config=config)
output = model(self.model_input)
self.assertIsInstance(output["logits"], torch.Tensor)
self.assertEqual(output["logits"].shape, torch.Size([2, 1]))
output_with_label = model(**self.model_input_with_label)
self.assertIsInstance(output_with_label["logits"], torch.Tensor)
self.assertEqual(output_with_label["logits"].shape, torch.Size([2, 1]))
self.assertIsNotNone(output_with_label["loss"])
@pytest.mark.slow
def test_from_pretrained(self):
config = self.config.from_pretrained(
"https://sgnlp.blob.core.windows.net/models/nea/config.json"
)
model = self.reg_pooling_model.from_pretrained(
"https://sgnlp.blob.core.windows.net/models/nea/pytorch_model.bin",
config=config,
)
output = model(self.model_input)
self.assertIsInstance(output["logits"], torch.Tensor)
self.assertEqual(output["logits"].shape, torch.Size([2, 1]))
output_with_label = model(**self.model_input_with_label)
self.assertIsInstance(output_with_label["logits"], torch.Tensor)
self.assertEqual(output_with_label["logits"].shape, torch.Size([2, 1]))
self.assertIsNotNone(output_with_label["loss"])
class NEAIntegrationTest(unittest.TestCase):
def setUp(self):
self.config = NEAConfig
self.tokenizer = NEATokenizer
self.vocab_path = PARENT_DIR / "test_data/vocab"
self.reg_model = NEARegModel
self.reg_pooling_model = NEARegPoolingModel
self.bi_reg_model = NEABiRegModel
self.bi_reg_pooling_model = NEABiRegPoolingModel
# for initialising linear bias
self.y_train = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.5])
# for loading embedding
self.emb_matrix = torch.ones((4000, 50))
# train tokenizer to get the vocab artifacts
train_path = str(PARENT_DIR / "test_data/train.tsv")
vocab_dir = str(self.vocab_path)
nea_tokenizer = NEATokenizer(train_file=train_path, train_vocab=True)
nea_tokenizer.save_pretrained(vocab_dir)
def test_reg_model_integration(self):
config = self.config()
model = self.reg_model(config=config)
model.initialise_linear_bias(self.y_train)
model.load_pretrained_embedding(self.emb_matrix)
tokenizer = self.tokenizer.from_pretrained(self.vocab_path)
inputs = tokenizer("this is a test", return_tensors="pt")["input_ids"]
output = model(inputs)
self.assertIsInstance(output["logits"], torch.Tensor)
self.assertEqual(output["logits"].shape, torch.Size([1, 1]))
inputs_with_labels = {"input_ids": inputs, "labels": torch.Tensor([0.9])}
output_with_label = model(**inputs_with_labels)
self.assertIsInstance(output_with_label["logits"], torch.Tensor)
self.assertEqual(output_with_label["logits"].shape, torch.Size([1, 1]))
self.assertIsNotNone(output_with_label["loss"])
def test_reg_pooling_model_integration(self):
config = self.config()
model = self.reg_pooling_model(config=config)
model.initialise_linear_bias(self.y_train)
model.load_pretrained_embedding(self.emb_matrix)
tokenizer = self.tokenizer.from_pretrained(self.vocab_path)
inputs = tokenizer("this is a test", return_tensors="pt")["input_ids"]
output = model(inputs)
self.assertIsInstance(output["logits"], torch.Tensor)
self.assertEqual(output["logits"].shape, torch.Size([1, 1]))
inputs_with_labels = {"input_ids": inputs, "labels": torch.Tensor([0.9])}
output_with_label = model(**inputs_with_labels)
self.assertIsInstance(output_with_label["logits"], torch.Tensor)
self.assertEqual(output_with_label["logits"].shape, torch.Size([1, 1]))
self.assertIsNotNone(output_with_label["loss"])
def test_bi_reg_model_integration(self):
config = self.config(linear_input_dim=600)
model = self.bi_reg_model(config=config)
model.initialise_linear_bias(self.y_train)
model.load_pretrained_embedding(self.emb_matrix)
tokenizer = self.tokenizer.from_pretrained(self.vocab_path)
inputs = tokenizer("this is a test", return_tensors="pt")["input_ids"]
output = model(inputs)
self.assertIsInstance(output["logits"], torch.Tensor)
self.assertEqual(output["logits"].shape, torch.Size([1, 1]))
inputs_with_labels = {"input_ids": inputs, "labels": torch.Tensor([0.9])}
output_with_label = model(**inputs_with_labels)
self.assertIsInstance(output_with_label["logits"], torch.Tensor)
self.assertEqual(output_with_label["logits"].shape, torch.Size([1, 1]))
self.assertIsNotNone(output_with_label["loss"])
def test_bi_reg_pooling_model_integration(self):
config = self.config(linear_input_dim=600)
model = self.bi_reg_pooling_model(config=config)
model.initialise_linear_bias(self.y_train)
model.load_pretrained_embedding(self.emb_matrix)
tokenizer = self.tokenizer.from_pretrained(self.vocab_path)
inputs = tokenizer("this is a test", return_tensors="pt")["input_ids"]
output = model(inputs)
self.assertIsInstance(output["logits"], torch.Tensor)
self.assertEqual(output["logits"].shape, torch.Size([1, 1]))
inputs_with_labels = {"input_ids": inputs, "labels": torch.Tensor([0.9])}
output_with_label = model(**inputs_with_labels)
self.assertIsInstance(output_with_label["logits"], torch.Tensor)
self.assertEqual(output_with_label["logits"].shape, torch.Size([1, 1]))
self.assertIsNotNone(output_with_label["loss"])
def tearDown(self) -> None:
shutil.rmtree(self.vocab_path)
``` |
{
"source": "jonherke/pygraphistry",
"score": 2
} |
#### File: pygraphistry/graphistry/bolt_util.py
```python
from .pygraphistry import util
node_id_key = u'_bolt_node_id_key'
node_type_key = u'type'
node_label_prefix_key = u'_lbl_'
start_node_id_key = u'_bolt_start_node_id_key'
end_node_id_key = u'_bolt_end_node_id_key'
relationship_id_key = u'_bolt_relationship_id'
relationship_type_key = u'type'
def is_neotime(v):
try:
return v.__module__ == 'neotime'
except:
return False
def stringify_neotimes(df):
#Otherwise currently encountering a toString error
import neotime
df2 = df.copy()
for c in df.columns:
df2[c] = df[c].apply(lambda v: str(v) if is_neotime(v) else v)
return df2
def to_bolt_driver(driver=None):
if driver is None:
return None
try:
from neo4j import GraphDatabase, Driver
if isinstance(driver, Driver):
return driver
return GraphDatabase.driver(**driver)
except ImportError:
raise BoltSupportModuleNotFound()
def bolt_graph_to_edges_dataframe(graph):
import pandas as pd
df = pd.DataFrame([
util.merge_two_dicts(
{ key: value for (key, value) in relationship.items() },
{
relationship_id_key: relationship.id,
relationship_type_key: relationship.type,
start_node_id_key: relationship.start_node.id,
end_node_id_key: relationship.end_node.id
}
)
for relationship in graph.relationships
])
return stringify_neotimes(df)
def bolt_graph_to_nodes_dataframe(graph):
import pandas as pd
df = pd.DataFrame([
util.merge_two_dicts(
{ key: value for (key, value) in node.items() },
util.merge_two_dicts(
{
node_id_key: node.id,
node_type_key: ",".join(sorted([str(label) for label in node.labels]))
},
{ node_label_prefix_key + str(label): True for label in node.labels }))
for node in graph.nodes
])
return stringify_neotimes(df)
class BoltSupportModuleNotFound(Exception):
def __init__(self):
super(BoltSupportModuleNotFound, self).__init__(
"The neo4j module was not found but is required for pygraphistry bolt support. Try running `!pip install pygraphistry[bolt]`."
)
```
#### File: pygraphistry/graphistry/hyper.py
```python
import pandas as pd
import sys
### COMMON TO HYPERGRAPH AND SIMPLE GRAPH
def makeDefs(DEFS, opts={}):
defs = {key: opts[key] if key in opts else DEFS[key] for key in DEFS}
base_skip = opts['SKIP'] if 'SKIP' in opts else defs['SKIP']
skip = [x for x in base_skip] #copy
defs['SKIP'] = skip
for key in DEFS:
if not defs[key] in skip:
skip.append(defs[key])
return defs
def screen_entities(events, entity_types, defs):
base = entity_types if not entity_types == None else events.columns
return [x for x in base if not x in defs['SKIP']]
def col2cat(cat_lookup, col):
return cat_lookup[col] if col in cat_lookup else col
def make_reverse_lookup(categories):
lookup = {}
for category in categories:
for col in categories[category]:
lookup[col] = category
return lookup
def valToSafeStr (v):
if sys.version_info < (3,0):
t = type(v)
if t is unicode: # noqa: F821
return v
elif t is str:
return v
else:
return repr(v)
else:
t = type(v)
if t is str:
return v
else:
return repr(v)
#ex output: pd.DataFrame([{'val::state': 'CA', 'nodeType': 'state', 'nodeID': 'state::CA'}])
def format_entities(events, entity_types, defs, drop_na):
cat_lookup = make_reverse_lookup(defs['CATEGORIES'])
lst = sum([[{
col: v,
defs['TITLE']: v,
defs['NODETYPE']: col,
defs['NODEID']: col2cat(cat_lookup, col) + defs['DELIM'] + valToSafeStr(v)
}
for v in events[col].unique() if not drop_na or valToSafeStr(v) != 'nan'] for col in entity_types], [])
df = pd.DataFrame(lst).drop_duplicates([defs['NODEID']])
df[defs['CATEGORY']] = df[defs['NODETYPE']].apply(lambda col: col2cat(cat_lookup, col))
return df
DEFS_HYPER = {
'TITLE': 'nodeTitle',
'DELIM': '::',
'NODEID': 'nodeID',
'ATTRIBID': 'attribID',
'EVENTID': 'EventID',
'SOURCE': 'src',
'DESTINATION': 'dst',
'CATEGORY': 'category',
'NODETYPE': 'type',
'EDGETYPE': 'edgeType',
'SKIP': [],
'CATEGORIES': {} # { 'categoryName': ['colName', ...], ... }
}
#ex output: pd.DataFrame([{'edgeType': 'state', 'attribID': 'state::CA', 'eventID': 'eventID::0'}])
def format_hyperedges(events, entity_types, defs, drop_na, drop_edge_attrs):
is_using_categories = len(defs['CATEGORIES'].keys()) > 0
cat_lookup = make_reverse_lookup(defs['CATEGORIES'])
subframes = []
for col in sorted(entity_types):
fields = list(set([defs['EVENTID']] + ([x for x in events.columns] if not drop_edge_attrs else [col])))
raw = events[ fields ]
if drop_na:
raw = raw.dropna(axis=0, subset=[col])
raw = raw.copy()
if len(raw):
if is_using_categories:
raw[defs['EDGETYPE']] = raw.apply(lambda r: col2cat(cat_lookup, col), axis=1)
raw[defs['CATEGORY']] = raw.apply(lambda r: col, axis=1)
else:
raw[defs['EDGETYPE']] = raw.apply(lambda r: col, axis=1)
raw[defs['ATTRIBID']] = raw.apply(lambda r: col2cat(cat_lookup, col) + defs['DELIM'] + valToSafeStr(r[col]), axis=1)
subframes.append(raw)
if len(subframes):
result_cols = list(set(
([x for x in events.columns.tolist() if not x == defs['NODETYPE']]
if not drop_edge_attrs
else [])
+ [defs['EDGETYPE'], defs['ATTRIBID'], defs['EVENTID']]
+ ([defs['CATEGORY']] if is_using_categories else []) ))
out = pd.concat(subframes, ignore_index=True, sort=False).reset_index(drop=True)[ result_cols ]
return out
else:
return pd.DataFrame([])
# [ str ] * {?'EDGES' : ?{str: [ str ] }} -> {str: [ str ]}
def direct_edgelist_shape(entity_types, defs):
if 'EDGES' in defs and not defs['EDGES'] is None:
return defs['EDGES']
else:
out = {}
for entity_i in range(len(entity_types)):
out[ entity_types[entity_i] ] = entity_types[(entity_i + 1):]
return out
#ex output: pd.DataFrame([{'edgeType': 'state', 'attribID': 'state::CA', 'eventID': 'eventID::0'}])
def format_direct_edges(events, entity_types, defs, edge_shape, drop_na, drop_edge_attrs):
is_using_categories = len(defs['CATEGORIES'].keys()) > 0
cat_lookup = make_reverse_lookup(defs['CATEGORIES'])
subframes = []
for col1 in sorted(edge_shape.keys()):
for col2 in edge_shape[col1]:
fields = list(set([defs['EVENTID']] + ([x for x in events.columns] if not drop_edge_attrs else [col1, col2])))
raw = events[ fields ]
if drop_na:
raw = raw.dropna(axis=0, subset=[col1, col2])
raw = raw.copy()
if len(raw):
if not drop_edge_attrs:
if is_using_categories:
raw[defs['EDGETYPE']] = raw.apply(lambda r: col2cat(cat_lookup, col1) + defs['DELIM'] + col2cat(cat_lookup, col2), axis=1)
raw[defs['CATEGORY']] = raw.apply(lambda r: col1 + defs['DELIM'] + col2, axis=1)
else:
raw[defs['EDGETYPE']] = raw.apply(lambda r: col1 + defs['DELIM'] + col2, axis=1)
raw[defs['SOURCE']] = raw.apply(lambda r: col2cat(cat_lookup, col1) + defs['DELIM'] + valToSafeStr(r[col1]), axis=1)
raw[defs['DESTINATION']] = raw.apply(lambda r: col2cat(cat_lookup, col2) + defs['DELIM'] + valToSafeStr(r[col2]), axis=1)
subframes.append(raw)
if len(subframes):
result_cols = list(set(
([x for x in events.columns.tolist() if not x == defs['NODETYPE']]
if not drop_edge_attrs
else [])
+ [defs['EDGETYPE'], defs['SOURCE'], defs['DESTINATION'], defs['EVENTID']]
+ ([defs['CATEGORY']] if is_using_categories else []) ))
out = pd.concat(subframes, ignore_index=True).reset_index(drop=True)[ result_cols ]
return out
else:
return pd.DataFrame([])
def format_hypernodes(events, defs, drop_na):
event_nodes = events.copy()
event_nodes[defs['NODETYPE']] = defs['EVENTID']
event_nodes[defs['CATEGORY']] = 'event'
event_nodes[defs['NODEID']] = event_nodes[defs['EVENTID']]
event_nodes[defs['TITLE']] = event_nodes[defs['EVENTID']]
return event_nodes
def hyperbinding(g, defs, entities, event_entities, edges, source, destination):
nodes = pd.concat([entities, event_entities], ignore_index=True, sort=False).reset_index(drop=True)
return {
'entities': entities,
'events': event_entities,
'edges': edges,
'nodes': nodes,
'graph': g\
.bind(source=source, destination=destination).edges(edges)\
.bind(node=defs['NODEID'], point_title=defs['TITLE']).nodes(nodes)
}
#turn lists etc to strs, and preserve nulls
def flatten_objs_inplace(df, cols):
for c in cols:
name = df[c].dtype.name
if name == 'category':
#Avoid warning
df[c] = df[c].astype(str).where(~df[c].isnull(), df[c])
elif name == 'object':
df[c] = df[c].where(df[c].isnull(), df[c].astype(str))
###########
class Hypergraph(object):
@staticmethod
def hypergraph(g, raw_events, entity_types=None, opts={}, drop_na=True, drop_edge_attrs=False, verbose=True, direct=False):
defs = makeDefs(DEFS_HYPER, opts)
entity_types = screen_entities(raw_events, entity_types, defs)
events = raw_events.copy().reset_index(drop=True)
flatten_objs_inplace(events, entity_types)
if defs['EVENTID'] in events.columns:
events[defs['EVENTID']] = events.apply(
lambda r: defs['EVENTID'] + defs['DELIM'] + valToSafeStr(r[defs['EVENTID']]),
axis=1)
else:
events[defs['EVENTID']] = events.reset_index().apply(
lambda r: defs['EVENTID'] + defs['DELIM'] + valToSafeStr(r['index']),
axis=1)
events[defs['NODETYPE']] = 'event'
entities = format_entities(events, entity_types, defs, drop_na)
event_entities = None
edges = None
if direct:
edge_shape = direct_edgelist_shape(entity_types, opts)
event_entities = pd.DataFrame()
edges = format_direct_edges(events, entity_types, defs, edge_shape, drop_na, drop_edge_attrs)
else:
event_entities = format_hypernodes(events, defs, drop_na)
edges = format_hyperedges(events, entity_types, defs, drop_na, drop_edge_attrs)
if verbose:
print('# links', len(edges))
print('# events', len(events))
print('# attrib entities', len(entities))
return hyperbinding(
g, defs, entities, event_entities, edges,
defs['SOURCE'] if direct else defs['ATTRIBID'],
defs['DESTINATION'] if direct else defs['EVENTID'])
``` |
{
"source": "jonhermansen/gbi",
"score": 2
} |
#### File: gbi/src/create_cfg.py
```python
import os
import pickle
from subprocess import Popen, PIPE
# Directory use from the installer.
tmp = "/tmp/.gbi/"
installer = "/usr/local/lib/gbi/"
# Installer data file.
disk = '%sdisk' % tmp
layout = '%slayout' % tmp
model = '%smodel' % tmp
pcinstallcfg = '%spcinstall.cfg' % tmp
user_passwd = <PASSWORD>' % tmp
language = '%slanguage' % tmp
dslice = '%sslice' % tmp
left = '%sleft' % tmp
partlabel = '%spartlabel' % tmp
timezone = '%stimezone' % tmp
KBFile = '%skeyboard' % tmp
boot_file = '%sboot' % tmp
disk_schem = '%sscheme' % tmp
zfs_config = '%szfs_config' % tmp
ufs_config = tmp + 'ufs_config'
class gbsd_cfg():
def __init__(self):
f = open('%spcinstall.cfg' % tmp, 'w')
# Installation Mode
f.writelines('# Installation Mode\n')
f.writelines('installMode=fresh\n')
f.writelines('installInteractive=no\n')
f.writelines('installType=GhostBSD\n')
f.writelines('installMedium=livecd\n')
f.writelines('packageType=livecd\n')
# System Language
langfile = open(language, 'r')
lang = langfile.readlines()[0].rstrip()
f.writelines('\n# System Language\n\n')
f.writelines('localizeLang=%s\n' % lang)
os.remove(language)
# Keyboard Setting
if os.path.exists(model):
f.writelines('\n# Keyboard Setting\n')
os.remove(model)
if os.path.exists(KBFile):
rkb = open(KBFile, 'r')
kb = rkb.readlines()
kbl = kb[0].rstrip()
f.writelines('localizeKeyLayout=%s\n' % kbl)
kbv = kb[1].rstrip()
if kbv != 'None':
f.writelines('localizeKeyVariant=%s\n' % kbv)
kbm = kb[2].rstrip()
if kbm != 'None':
f.writelines('localizeKeyModel=%s\n' % kbm)
# Timezone
if os.path.exists(timezone):
time = open(timezone, 'r')
t_output = time.readlines()[0].strip()
f.writelines('\n# Timezone\n')
f.writelines('timeZone=%s\n' % t_output)
f.writelines('enableNTP=yes\n')
os.remove(timezone)
if os.path.exists(zfs_config):
# Disk Setup
r = open(zfs_config, 'r')
zfsconf = r.readlines()
for line in zfsconf:
if 'partscheme' in line:
f.writelines(line)
read = open(boot_file, 'r')
boot = read.readlines()[0].strip()
if boot == 'refind':
f.writelines('bootManager=none\n')
f.writelines('efiLoader=%s\n' % boot)
else:
f.writelines('bootManager=%s\n' % boot)
f.writelines('efiLoader=none\n')
os.remove(boot_file)
else:
f.writelines(line)
# os.remove(zfs_config)
elif os.path.exists(ufs_config):
# Disk Setup
r = open(ufs_config, 'r')
ufsconf = r.readlines()
for line in ufsconf:
if 'partscheme' in line:
f.writelines(line)
read = open(boot_file, 'r')
boot = read.readlines()[0].strip()
if boot == 'refind':
f.writelines('bootManager=none\n')
f.writelines('efiLoader=%s\n' % boot)
else:
f.writelines('bootManager=%s\n' % boot)
f.writelines('efiLoader=none\n')
os.remove(boot_file)
else:
f.writelines(line)
else:
# Disk Setup
r = open(disk, 'r')
drive = r.readlines()
d_output = drive[0].strip()
f.writelines('\n# Disk Setup\n')
f.writelines('disk0=%s\n' % d_output)
os.remove(disk)
# Partition Slice.
p = open(dslice, 'r')
line = p.readlines()
part = line[0].rstrip()
f.writelines('partition=%s\n' % part)
os.remove(dslice)
# Boot Menu
read = open(boot_file, 'r')
line = read.readlines()
boot = line[0].strip()
if boot == 'refind':
f.writelines('bootManager=none\n')
f.writelines('efiLoader=%s\n' % boot)
else:
f.writelines('bootManager=%s\n' % boot)
f.writelines('efiLoader=none\n')
# os.remove(boot_file)
# Sheme sheme
read = open(disk_schem, 'r')
shem = read.readlines()[0]
f.writelines(shem + '\n')
f.writelines('commitDiskPart\n')
# os.remove(disk_schem)
# Partition Setup
f.writelines('\n# Partition Setup\n')
part = open(partlabel, 'r')
# If slice and auto file exist add first partition line.
# But Swap need to be 0 it will take the rest of the freespace.
for line in part:
if 'BOOT' in line or 'BIOS' in line or 'UEFI' in line:
pass
else:
f.writelines('disk0-part=%s\n' % line.strip())
f.writelines('commitDiskLabel\n')
os.remove(partlabel)
# Network Configuration
f.writelines('\n# Network Configuration\n')
readu = open(user_passwd, 'rb')
uf = pickle.load(readu)
net = uf[5]
f.writelines('hostname=%s\n' % net)
# Set the root pass
f.writelines('\n# Network Configuration\n')
readr = open('%sroot' % tmp, 'rb')
rf = pickle.load(readr)
root = rf[0]
f.writelines('\n# Set the root pass\n')
f.writelines('rootPass=%s\n' % root)
# Setup our users
user = uf[0]
f.writelines('\n# Setup user\n')
f.writelines('userName=%s\n' % user)
name = uf[1]
f.writelines('userComment=%s\n' % name)
passwd = uf[2]
f.writelines('userPass=%<PASSWORD>' % passwd.rstrip())
shell = uf[3]
f.writelines('userShell=%s\n' % shell)
upath = uf[4]
f.writelines('userHome=%s\n' % upath.rstrip())
f.writelines('defaultGroup=wheel\n')
f.writelines('userGroups=operator\n')
f.writelines('commitUser\n')
ifvbox = open('/tmp/.ifvbox', 'w')
vbguest = Popen('pciconf -lv | grep "VirtualBox Graphics"', shell=True,
stdout=PIPE, close_fds=True, universal_newlines=True)
if "VirtualBox Graphics" in vbguest.stdout.read():
ifvbox.writelines('True\n')
else:
ifvbox.writelines('False\n')
ifvbox.close()
f.writelines('runExtCommand=cat /etc/rc.conf | grep kld_list >> $FSMNT/etc/rc.conf\n')
if os.path.exists("/etc/X11/xorg.conf"):
f.writelines('runExtCommand=cp /etc/X11/xorg.conf $FSMNT/etc/X11/xorg.conf\n')
f.writelines('runScript=/root/iso_to_hd.sh\n')
f.writelines('runCommand=rm -f /root/iso_to_hd.sh\n')
if os.path.exists(zfs_config):
zfsark = """echo 'vfs.zfs.arc_max="512M"' >> /boot/loader.conf"""
f.writelines('runCommand=%s\n' % zfsark)
# adding setting for keyboard in slim
keyboard_conf = '/usr/local/etc/X11/xorg.conf.d/keyboard.conf'
k_conf_list = [
'Section "InputClass"',
' Identifier "Keyboard0"',
' Driver "kbd"',
' Option "XkbLayout" "%s"' % kbl
]
if kbv != 'None':
k_conf_list.append(' Option "XkbVariant" "%s"' % kbv)
if kbm != 'None':
k_conf_list.append(' Option "XkbModel" "%s"' % kbm)
k_conf_list.append('EndSection')
for conf_line in k_conf_list:
if 'Section "InputClass"' == conf_line:
cmd = """echo '%s' > %s""" % (conf_line, keyboard_conf)
else:
cmd = """echo '%s' >> %s""" % (conf_line, keyboard_conf)
f.writelines('runCommand=%s\n' % cmd)
f.close()
os.remove(user_passwd)
``` |
{
"source": "jonhermsen-UNO/home-kneads",
"score": 2
} |
#### File: home-kneads/app/views.py
```python
from django.shortcuts import render
from django.views import generic
from .models import Animal, Species
from . import forms
def index(request):
context = {
'page_title': 'Welcome to Home Kneads!',
'species_list': Species.objects.all().order_by('name'),
}
return render(request, 'app/index.html', context)
class AdoptView(generic.ListView):
template_name = 'app/adopt.html'
context_object_name = 'adoption_list'
def get_queryset(self):
queryset = None
if 'species' in self.kwargs:
queryset = Animal.objects.filter(species__name__iexact = self.kwargs['species'])
else:
queryset = Animal.objects.all()
return queryset.order_by('species__name', 'birth_date', 'name')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['page_title'] = 'Adoptable Pets'
return context
class AdoptCreate(generic.CreateView):
template_name = 'app/adopt-edit.html'
form_class = forms.AdoptForm
success_url = '/adopt/'
model = Animal
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['page_title'] = 'Create Adoption Record'
return context
class AdoptUpdate(generic.UpdateView):
template_name = 'app/adopt-edit.html'
form_class = forms.AdoptForm
success_url = '/adopt/'
model = Animal
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['page_title'] = 'Update Adoption Record'
return context
class AdoptDelete(generic.DeleteView):
template_name = 'app/adopt-delete.html'
success_url = '/adopt/'
model = Animal
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['page_title'] = 'Remove Adoption Record'
return context
``` |
{
"source": "JonHGee/iPill",
"score": 3
} |
#### File: iPill/python/gui2.py
```python
import tkinter as tk
import tkinter.font as tkFont
import time
import iPill
# Declare global variables
dfont = None
dtime = None
font_size = 24
fullscreen = False
######################################################
class SampleApp(tk.Tk):
def __init__(self):
global time_dfont
global button_dfont
global label_dfont
global dtime
global pill
global dispensed
root = tk.Tk.__init__(self)
time_dfont = tkFont.Font(family='Courier New', size=font_size, weight=tkFont.BOLD)
button_dfont = tkFont.Font(size=font_size)
label_dfont = tkFont.Font(size=font_size)
dtime = tk.StringVar()
pill = iPill.pillInfo()
dispensed = False
self._frame = None
self.switch_frame(StartPage)
self.bind('<F11>', self.toggle_fullscreen)
self.bind('<Escape>', self.end_fullscreen)
self.bind('<Configure>', self.resize)
self.after(20,self.update)
def toggle_fullscreen(self, event=None):
global root
global fullscreen
# Toggle between fullscreen and windowed modes
fullscreen = not fullscreen
self.attributes('-fullscreen', fullscreen)
self.resize
# Return to windowed mode
def end_fullscreen(self,event=None):
global root
global fullscreen
# Turn off fullscreen mode
fullscreen = False
self.attributes('-fullscreen', False)
self.resize
# Automatically resize font size based on window size
def resize(self,event=None):
global time_dfont
global button_dfont
global label_dfont
new_size = -max(12, int((self._frame.winfo_height() / 6)))
time_dfont.configure(size=new_size)
new_size = -max(12, int((self._frame.winfo_height() / 12)))
label_dfont.configure(size=new_size)
new_size = -max(12, int((self._frame.winfo_height() / 8)))
button_dfont.configure(size=new_size)
# Read values from the sensors at regular intervals
def update(self):
global dtime
global dispensed
# Get local time
local_time = time.localtime()
# Convert time to 12 hour clock
hours = local_time.tm_hour
if hours > 12:
hours -= 12
# Add leading 0s
shours = str(hours)
smin = str(local_time.tm_min)
ssec = str(local_time.tm_sec)
if hours < 10:
shours = '0' + shours
if local_time.tm_min < 10:
smin = '0' + smin
if local_time.tm_sec < 10:
ssec = '0' + ssec
# Construct string out of time
if dispensed == False:
dtime.set(shours + ':' + smin + ':' + ssec)
self.after(700, self.update)
def switch_frame(self, frame_class):
"""Destroys current frame and replaces it with a new one."""
new_frame = frame_class(self)
if self._frame is not None:
self._frame.destroy()
self._frame = new_frame
self._frame.pack(fill=tk.BOTH, expand=1)
class StartPage(tk.Frame):
def __init__(self, master):
global time_dfont
global button_dfont
sp = tk.Frame.__init__(self, master)
tk.Label(self, text="Choose User Mode",font=time_dfont,highlightthickness=0).grid(row=0, column=0, padx=0, pady=0)
tk.Button(self, text="Pharmacist", font=button_dfont,highlightthickness=0,
command=lambda: master.switch_frame(pharmframe)).grid(row=1, column=0, padx=0, pady=0)
tk.Button(self, text="User", font=button_dfont,highlightthickness=0,
command=lambda: master.switch_frame(userframe)).grid(row=2, column=0, padx=0, pady=0)
tk.Button(self, text="Quit", font=button_dfont,highlightthickness=0,
command=lambda: master.destroy()).grid(row=3, column=0, padx=0, pady=0)
class pharmframe(tk.Frame):
global time_dfont
global button_dfont
global label_dfont
global pill
def __init__(self, master):
pharm = tk.Frame.__init__(self, master)
tk.Label(self, font=time_dfont,text="Pharmacist Mode",highlightthickness=0).grid(row=0, columnspan = 3,sticky=tk.E+tk.W)
self.pname = tk.Entry(self, font =label_dfont, text="<NAME>")
self.pname.grid(row=1, column = 1, columnspan = 2, pady=15,sticky=tk.W+tk.E)
tk.Label(self, text="Patient Name", font =label_dfont).grid(row=1, column = 0,sticky=tk.W+tk.E)
self.pinfo = tk.Entry(self, font =label_dfont, text="Ibuprofen")
self.pinfo.grid(row=2, column = 1, columnspan = 2,pady=15, sticky=tk.W+tk.E)
tk.Label(self, text="Prescription Info", font =label_dfont).grid(row=2, column = 0, sticky=tk.W+tk.E)
self.dinfo = tk.Entry(self, font =label_dfont, text="Two tablets three times a day")
self.dinfo.grid(row=3, column = 1, columnspan = 2,pady=15, sticky=tk.W+tk.E)
tk.Label(self, text="Dosage Info", font =label_dfont).grid(row=3, column = 0, sticky=tk.W+tk.E)
self.numpills = tk.Spinbox(self, from_=1, to = 99, width=5,font = label_dfont)
self.numpills.grid(row=4, column = 1, pady=15, sticky=tk.W)
tk.Label(self, text="Number of Pills", font =label_dfont).grid(row=4, column = 0, sticky=tk.W+tk.E)
tk.Button(self, text="Encode Info", font=button_dfont, command=lambda:self.setinfo()).grid(row=4,column=2)
tk.Button(self, text="Return to start page", font=button_dfont,highlightthickness=0,
command=lambda: master.switch_frame(StartPage)).grid(row=5, columnspan = 3, sticky=tk.W+tk.E)
self.pinfo.delete(0, tk.END)
self.pname.delete(0, tk.END)
self.dinfo.delete(0, tk.END)
self.pname.insert(0,"<NAME>")
self.pinfo.insert(0,"Ibuprofen 400mg")
self.dinfo.insert(0,"1 pill taken 3x per day")
self.columnconfigure(1, weight=0)
self.columnconfigure(0, weight=10)
self.columnconfigure(2, weight=10)
def setinfo(self):
pill.setinfo(self.pname.get(),self.pinfo.get(),self.dinfo.get(),self.numpills.get())
class userframe(tk.Frame):
def __init__(self, master):
global time_dfont
global button_dfont
global dtime
global pill
homeuser = tk.Frame.__init__(self, master)
dtime = tk.StringVar()
tk.Label(self, text="Home User Mode", font=time_dfont,highlightthickness=0).grid(row=0, columnspan=2,column=0, padx=0, pady=0)
tk.Label(self, text = "Patient Name: "+pill.pname, font=label_dfont).grid(row=2,column=0)
tk.Label(self, text = "Pill info: "+pill.pinfo, font=label_dfont).grid(row=3,column=0,columnspan=2)
tk.Label(self, text = "Dosage: "+pill.dinfo, font=label_dfont).grid(row=4,column=0,columnspan=2)
self.pill_label = tk.Label(self, text = "Pills left: "+pill.getpills(), font=label_dfont)
self.pill_label.grid(row=2,column=1)
button_quit = tk.Button(self,
text="Quit",
font=button_dfont,
command=lambda: master.switch_frame(StartPage),
borderwidth=0,
highlightthickness=0,
fg='gray10',
bg='white')
button_dispense = tk.Button(self,
text="Dispense Pill",
font=button_dfont,
command=self.dispense,
borderwidth=0,
highlightthickness=0,
fg='gray10',
bg='white')
self.label_time = tk.Label(self,
textvariable=dtime,
font=time_dfont,
highlightthickness=0,
fg='black')
self.label_time.grid(row=1, column=0,columnspan=2, padx=0, pady=0)
button_quit.grid(row=5, column=1, padx=5, pady=5, sticky=tk.W+tk.E)
button_dispense.grid(row=5, column=0, padx=5, pady=5, sticky=tk.W+tk.E)
self.rowconfigure(0, weight=10)
self.rowconfigure(1, weight=10)
self.columnconfigure(1, weight=10)
self.columnconfigure(0, weight=10)
def dispense(self):
global dispensed
global dtime
dispensed = True
dtime.set("Dispensing Pill")
pill.dispense()
iPill.dispensePill()
self.pill_label.config(text="Pills left: "+pill.getpills())
dispensed = False
if __name__ == "__main__":
app = SampleApp()
app.toggle_fullscreen()
app.mainloop()
``` |
{
"source": "jonhilgart22/data-science-is-software",
"score": 3
} |
#### File: src/features/preprocess_solution.py
```python
from engarde.decorators import none_missing
@none_missing()
def clean_raw_data(df):
""" Takes a dataframe and performs four steps:
- Selects columns for modeling
- For numeric variables, replaces 0 values with mean for that region
- Fills invalid construction_year values with the mean construction_year
- Converts strings to categorical variables
:param df: A raw dataframe that has been read into pandas
:returns: A dataframe with the preprocessing performed.
"""
useful_columns = ['amount_tsh',
'gps_height',
'longitude',
'latitude',
'region',
'population',
'construction_year',
'extraction_type_class',
'management_group',
'quality_group',
'source_type',
'waterpoint_type',
'status_group']
zero_is_bad_value = ['longitude', 'population']
other_bad_value = ['latitude']
# subset to columns we care about
df = df[useful_columns]
for column, column_type in df.dtypes.iteritems():
# special case construction year
if column == 'construction_year':
invalid_rows = df.construction_year < 1000
valid_mean = int(df.construction_year[~invalid_rows].mean())
df.loc[invalid_rows, column] = valid_mean
# replace 0 values where they are not right
elif column in zero_is_bad_value:
df = replace_value_with_grouped_mean(df, 0, column, 'region')
elif column in other_bad_value:
df = replace_value_with_grouped_mean(df, -2e-8, column, 'region')
# strings to categoricals
elif column_type == "object":
df.loc[:, column] = df[column].astype('category')
return df
def replace_value_with_grouped_mean(df, value, column, to_groupby):
""" For a given numeric value (e.g., 0) in a particular column, take the
mean of column (excluding value) grouped by to_groupby and return that
column with the value replaced by that mean.
:param df: The dataframe to operate on.
:param value: The value in column that should be replaced.
:param column: The column in which replacements need to be made.
:param to_groupby: Groupby this variable and take the mean of column.
Replace value with the group's mean.
:returns: The data frame with the invalid values replaced
"""
invalid_mask = (df[column] == value)
# get the mean without the invalid value
means_by_group = (df[~invalid_mask]
.groupby(to_groupby)[column]
.mean())
# get an array of the means for all of the data
means_array = means_by_group[df[to_groupby].values].values
# assignt the invalid values to means
df.loc[invalid_mask, column] = means_array[invalid_mask]
return df
```
#### File: src/tests/test_example.py
```python
import pytest
import numpy as np
import pandas as pd
from .example import replace_all_nulls_with_value
@pytest.fixture
def df_none_missing():
""" return a 3x3 dataframe with no missing values """
cols = ['a', 'b', 'c']
data = [[0, 1, 0], [0, 0, 1], [1, 1, 1]]
return pd.DataFrame(data, columns=cols)
@pytest.fixture
def df_missing():
""" return a 3x3 dataframe with a couple of NaNs """
df = df_none_missing()
df.ix[0, 2] = np.nan
df.ix[2, 1] = np.nan
return df
def test_replace_all_nulls_does_nothing_if_no_nulls(df_none_missing):
new_df = replace_all_nulls_with_value(df_none_missing, -1)
assert (df_none_missing.values == new_df.values).all()
assert pd.notnull(new_df.values).all()
def test_replace_all_nulls(df_missing):
n_null_before = pd.isnull(df_missing.values).sum()
assert n_null_before == 2
new_df = replace_all_nulls_with_value(df_missing, -1)
n_null_after = pd.isnull(new_df.values).sum()
assert n_null_after == 0
assert pd.notnull(new_df.values).all()
def test_engarde_rejects_replacing_nulls_with_nulls(df_missing):
with pytest.raises(AssertionError):
replace_all_nulls_with_value(df_missing, np.nan)
``` |
{
"source": "jonhilgart22/glavanize-projects",
"score": 3
} |
#### File: advanced_stats_bayesian_optimization/python_scripts/bayesian_optimization.py
```python
import numpy as np
import seaborn as sns
import scipy as sp
import functools
import numpy as np
from scipy.stats import multivariate_normal
import scipy.stats as stats
import time
import scipy as scipy
import sys
import pandas as pd
from scipy.stats import norm
from numpy import linalg as la
import pandas as pd
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import train_test_split
import itertools
__author__ = '<NAME>'
class IBO(object):
"""
IBO: Intelligent Bayesian OPtimization
A class to perform Bayesian Optimization on a 1D or 2D domain.
Can either have an objective function to maximize or a true function
to maximize"""
def __init__(self, kernel = 'squared_kernel'):
"""Define the parameters for the bayesian optimization.
The train points should be x,y coordinate that you already know about your
function"""
if kernel == 'squared_kernel':
self.kernel = self.__squared_kernel__
elif kernel == 'matern':
self.kernel = self.__matern_kernel__
def fit(self, train_points_x, train_points_y,
test_domain, train_y_func, y_func_type = 'real',
samples = 10 , test_points_x = None, test_points_y = None,
model_train_points_x = None, model_train_points_y = None,
covariance_noise = 5e-5, n_posteriors = 30, kernel_params = None,
model_obj = GradientBoostingRegressor,
verbose = True):
"""Define the parameters for the GP.
PARAMS:
train_points_x, - x coordinates to train on
train_points_y, - resulting output from the function, either objective or
true function
test_domain - the domain to test
test_points_y - If using ab objective function, this is from the
train test split data
test_points_x = if using an objective function, this is from the
train test split
model - the model to fit for use with the objective function. Currently
works with Gradient Boosting
y_func_type - either the real function or the objective function.
The objective function implemented in negative MSE (since BO is
a maximization procedure)
verbose = Whether to print out the points Bayesian OPtimization is
picking
train_y_func - This can either be an objective function or a true function
kernel_params: dictionary of {'length':value} for squaredkernel
model_train_points: the training points for the objective function
"""
try:
type(train_points_x).__module__ == np.__name__
type(train_points_y).__module__ == np.__name__
except Exception as e:
print(e)
return ' You need to input numpy types'
# Store the training points
self.train_points_x = train_points_x
self.train_points_y = train_points_y
self.test_domain = test_domain
# setup the kernel parameters
if kernel_params != None:
self.squared_length = kernel_params['rbf_length']
else:
self.squared_length = None
# Y func can either be an objective function, or the true underlying func.
if y_func_type == 'real':
self.train_y_func = train_y_func
elif y_func_type == 'objective':
if model_obj == None:
return ' you need to pass in a model (GradientBoostingRegressor)'
# Only if using an objective function, from the 'test' split
self.test_points_x = test_points_x
self.test_points_y = test_points_y
self.model_train_points_x = model_train_points_x
self.model_train_points_y = model_train_points_y
# model to train and fit
self.model = model_obj
self.train_y_func = self.hyperparam_choice_function
# store the testing parameters
self.covariance_noise = covariance_noise
self.n_posteriors = n_posteriors
self.samples = samples
self.verbose = verbose
if self.train_points_x.shape[1] ==1: # one dimension
self.dimensions ='one'
elif self.train_points_x.shape[1] ==2:
self.dimensions = 'two'
else:
print('Either you entered more than two dimensions, \
or not a numpy array.')
print(type(self.train_points_x))
# create the generator
self.bo_gen = self.__sample_from_function__(verbose=self.verbose)
def predict(self):
"""returns x_sampled_points, y_sampled_points, best_x, best_y"""
x_sampled_points, y_sampled_points, sampled_var, \
best_x, best_y, improvements, domain, mus = next(self.bo_gen)
return x_sampled_points, y_sampled_points, best_x, best_y
def maximize(self, n_steps=10, verbose = None):
"""For the n_steps defined, find the best x and y coordinate
and return them.
Verbose controls whether to print out the points being sampled"""
verbose_ = self.verbose
self.samples = n_steps
bo_gen = self.__sample_from_function__(verbose = verbose_)
for _ in range(self.samples):
x_sampled_points, y_sampled_points, sampled_var, \
best_x, best_y, improvements, domain, mus = next(self.bo_gen)
self.best_x = best_x
self.best_y = best_y
# return the best PARAMS
return best_x, best_y
def __test_gaussian_process__(self, return_cov = False,
return_sample = False):
"""Test one new point in the Gaussian process or an array of points
Returns the mu, variance, as well as the posterior vector.
Improvements is the expected improvement for each potential test point.
Domain, is the domain over which you are searching.
Return cov = True will return the full covariance matrix.
If return_sample= True
returns samples ( a vector) from the
informed posterior and the uninformed prior distribution
Covariance diagonal noise is used to help enforce positive definite matrices
"""
# Update the covaraince matrices
self.covariance_train_train = self.kernel(self.train_points_x,
self.train_points_x, train=True)
self.covariance_test_train = self.kernel(self.test_domain,
self.train_points_x)
self.covariance_test_test = self.kernel(self.test_domain,
self.test_domain)
# Use cholskey decomposition to increase speed for calculating mean
try :# First try,
L_test_test = np.linalg.cholesky(self.covariance_test_test + \
self.covariance_noise * np.eye(len(self.covariance_test_test)))
L_train_train = np.linalg.cholesky(self.covariance_train_train + \
self.covariance_noise * np.eye(len(self.covariance_train_train)))
Lk = np.linalg.solve(L_train_train, self.covariance_test_train.T)
mus = np.dot(Lk.T, np.linalg.solve(L_train_train,
self.train_points_y)).reshape(
(len(self.test_domain),))
# Compute the standard deviation so we can plot it
s2 = np.diag(self.covariance_test_test) - np.sum(Lk**2, axis=0)
stdv = np.sqrt(abs(s2))
except Exception as e:
print(e)#LinAlgError: # In case the covariance matrix is not positive definite
# Find the near positive definite matrix to decompose
decompose_train_train = self.nearestPD(
self.covariance_train_train + self.covariance_noise * np.eye(
len(self.train_points_x)))
decompose_test_test = self.nearestPD(
self.covariance_test_test + self.covariance_noise * np.eye(
len(self.test_domain)))
# cholskey decomposition on the nearest PD matrix
L_train_train = np.linalg.cholesky(decompose_train_train)
L_test_test = np.linalg.cholesky(decompose_test_test)
Lk = np.linalg.solve(L_train_train, self.covariance_test_train.T)
mus = np.dot(Lk.T, np.linalg.solve(L_train_train,
self.train_points_y)).reshape((len(self.test_domain)),)
# Compute the standard deviation so we can plot it
s2 = np.diag(self.covariance_test_test) - np.sum(Lk**2, axis=0)
stdv = np.sqrt(abs(s2))
# ##### FULL INVERSION ####
# mus = covariance_test_train @ np.linalg.pinv(covariance_train_train) @ train_y_numbers
# s2 = covariance_test_test - covariance_test_train @ np.linalg.pinv(covariance_train_train ) \
# @ covariance_test_train.T
def sample_from_posterior(n_priors=3):
"""Draw samples from the prior distribution of the GP.
len(test_x) is the number of samplese to draw.
Resource: http://katbailey.github.io/post/gaussian-processes-for-dummies/.
N-Posteriors / N-Priors tells the number of functions to samples from the dsitribution"""
try: # try inside sample from posterior function
L = np.linalg.cholesky(self.covariance_test_test +
self.covariance_noise * np.eye(
len(self.test_domain))- np.dot(Lk.T, Lk))
except Exception as e:
print(e)
# Find the neareset Positive Definite Matrix
near_decompose = self.nearestPD(self.covariance_test_test +
self.covariance_noise * np.eye(
len(self.test_domain)) - np.dot(Lk.T, Lk))
L = np.linalg.cholesky(near_decompose.astype(float) )
# within posterior
# sample from the posterior
f_post = mus.reshape(-1,1) + np.dot(L, np.random.normal(
size=(len(self.test_domain), self.n_posteriors)))
# Sample X sets of standard normals for our test points,
# multiply them by the square root of the covariance matrix
f_prior_uninformed = np.dot(L_test_test,
np.random.normal(size=(len(self.test_domain), n_priors)))
# For the posterior, the columns are the vector for that function
return (f_prior_uninformed, f_post)
if return_cov == True:
return y_pred_mean.ravel(), var_y_pred_diag.ravel(), var_y_pred
if return_sample == True:
f_prior, f_post = sample_from_posterior()
return mus.ravel(), s2.ravel(), f_prior, f_post
else:
return mus.ravel(), s2.ravel()
def __sample_from_function__(self, verbose=None):
"""Sample N times from the unknown function and for each time find the
point that will have the highest expected improvement (find the maxima of the function).
Verbose signifies if the function should print out the points where it is sampling
Returns a generator of x_sampled_points, y_sampled_points, vars_, best_x, best_y, \
list_of_expected_improvements, testing_domain, mus
for improvements. Mus and Vars are the mean and var for each sampled point
in the gaussian process.
Starts off the search for expected improvement with a coarse search and then hones in on
the domain the the highest expected improvement.
Note - the y-function can EITHER by the actual y-function (for evaluation
purposes, or an objective function
(i.e. - RMSE))"""
verbose = self.verbose
# for plotting the points sampled
x_sampled_points = []
y_sampled_points = []
best_x = self.train_points_x[np.argmax(self.train_points_y ),:]
best_y =self.train_points_y [np.argmax(self.train_points_y ),:]
for i in range(self.samples):
if i == 0:
if self.train_points_x .shape[1]==1: ## one dimensional case
testing_domain = np.array([self.test_domain]).reshape(-1,1)
else:
testing_domain = self.test_domain
# find the next x-point to sample
mus, vars_, prior, post = self.__test_gaussian_process__(
return_sample = True)
sigmas_post = np.var(post,axis=1)
mus_post = np.mean(post,axis=1)
# get the expected values from the posterior distribution
list_of_expected_improvements = self.expected_improvement(
mus_post, sigmas_post ,best_y)
max_improv_x_idx = np.argmax(np.array(
list_of_expected_improvements))
#print(max_improv_x_idx,'max_improv_x_idx')
max_improv_x = testing_domain[max_improv_x_idx]
# don't resample the same point
c = 1
while max_improv_x in x_sampled_points:
if c == 1:
if self.train_points_x .shape[1]==1:
sorted_points_idx = np.argsort(list(np.array(
list_of_expected_improvements)))
else:
sorted_points_idx = np.argsort(list(np.array(
list_of_expected_improvements)),axis=0)
c+=1
max_improv_x_idx = int(sorted_points_idx[c])
max_improv_x = testing_domain[max_improv_x_idx]
# only wait until we've gon through half of the list
if c > round(len(list_of_expected_improvements)/2):
max_improv_x_idx = int(
np.argmax(list_of_expected_improvements))
max_improv_x = testing_domain[max_improv_x_idx]
break
if self.train_points_x.shape[1]==1:
max_improv_y = self.train_y_func(max_improv_x)
else: # Two D
try: # see if we are passing in the actual function
max_improv_y = self.train_y_func(
max_improv_x[0], max_improv_x[1])
except: # we are passing the objective function in
max_improv_y = self.train_y_func(
max_improv_x[0], dimensions = 'two',
hyperparameter_value_two = max_improv_x[1])
if max_improv_y > best_y: ## use to find out where to search next
best_y = max_improv_y
best_x = max_improv_x
if verbose:
print(f"Bayesian Optimization just sampled point = {best_x}")
print(f"Best x (Bayesian Optimization) = {best_x},\
Best y = {best_y}")
# append the point to sample
x_sampled_points.append(max_improv_x)
y_sampled_points.append(max_improv_y)
# append our new the newly sampled point to the training data
self.train_points_x = np.vstack((self.train_points_x,
max_improv_x))
self.train_points_y = np.vstack((self.train_points_y,
max_improv_y))
yield x_sampled_points, y_sampled_points, vars_, best_x, best_y, \
list_of_expected_improvements, testing_domain, mus
else:
# append the point to sample
x_sampled_points.append(max_improv_x)
y_sampled_points.append(max_improv_y)
# append our new the newly sampled point to the training data
self.train_points_x = np.vstack((self.train_points_x, max_improv_x))
self.train_points_y = np.vstack((self.train_points_y, max_improv_y))
yield x_sampled_points, y_sampled_points, vars_, best_x, best_y, \
list_of_expected_improvements, testing_domain, mus
else:
if self.train_points_x.shape[1]==1:
testing_domain = np.array([testing_domain]).reshape(-1,1)
else:
testing_domain = self.test_domain
mus, vars_, prior, post = self.__test_gaussian_process__(
return_sample = True)
igmas_post = np.var(post,axis=1)
mus_post = np.mean(post,axis=1)
# get the expected values from the posterior distribution
list_of_expected_improvements = self.expected_improvement(
mus_post, sigmas_post ,best_y)
max_improv_x_idx = np.argmax(list_of_expected_improvements)
max_improv_x = testing_domain[max_improv_x_idx]
# don't resample the same point
c = 1
while max_improv_x in x_sampled_points:
if c == 1:
if self.train_points_x .shape[1]==1:
sorted_points_idx = np.argsort(list(np.array(
list_of_expected_improvements)))
else:
sorted_points_idx = np.argsort(list(np.array(
list_of_expected_improvements)),axis=0)
c+=1
max_improv_x_idx = int(sorted_points_idx[c])
max_improv_x = testing_domain[max_improv_x_idx]
# only wait until we've gon through half of the list
if c > round(len(list_of_expected_improvements)/2):
max_improv_x_idx = int(
np.argmax(list_of_expected_improvements))
max_improv_x = testing_domain[max_improv_x_idx]
break
if self.train_points_x .shape[1]==1:
max_improv_y = self.train_y_func(max_improv_x)
else: # Two D
try: # see if we are passing in the actual function
max_improv_y = self.train_y_func(
max_improv_x[0], max_improv_x[1])
except: # we are passing the objective function in
max_improv_y = self.train_y_func(
max_improv_x[0], dimensions = 'two',
hyperparameter_value_two = max_improv_x[1])
if max_improv_y > best_y: ## use to find out where to search next
best_y = max_improv_y
best_x = max_improv_x
if verbose:
print(f"Bayesian Optimization just sampled point = {max_improv_x}")
print(f"Best x (Bayesian Optimization) = {best_x}, Best y = {best_y}")
# append the point to sample
x_sampled_points.append(max_improv_x)
y_sampled_points.append(max_improv_y)
# append our new the newly sampled point to the training data
self.train_points_x = np.vstack((self.train_points_x, max_improv_x))
self.train_points_y = np.vstack((self.train_points_y, max_improv_y))
yield x_sampled_points, y_sampled_points, vars_, best_x, best_y, \
list_of_expected_improvements, testing_domain, mus
else:
# append the point to sample
x_sampled_points.append(max_improv_x)
y_sampled_points.append(max_improv_y)
# append our new the newly sampled point to the training data
self.train_points_x = np.vstack((self.train_points_x, max_improv_x))
self.train_points_y = np.vstack((self.train_points_y, max_improv_y))
yield x_sampled_points, y_sampled_points, vars_, best_x, best_y, \
list_of_expected_improvements, testing_domain, mus
def hyperparam_choice_function(self, hyperparameter_value,
dimensions = 'one', hyperparameter_value_two = None):
"""Returns the negative MSE of the input hyperparameter for the given
hyperparameter.
Used with GradientBoostingRegressor estimator currently
If dimensions = one, then search n_estimators. if dimension equal
two then search over n_estimators and max_depth"""
#definethe model
model = self.model
# define the training points
train_points_x = self.model_train_points_x
train_points_y = self.model_train_points_y
if self.dimensions == 'one':
try:
m = model(n_estimators= int(hyperparameter_value))
except:
m = model(n_estimators= hyperparameter_value)
m.fit(train_points_x, train_points_y)
pred = m.predict(self.test_points_x )
n_mse = self.root_mean_squared_error(self.test_points_y , pred)
return n_mse
elif self.dimensions =='two':
try:
m = model(n_estimators = int(hyperparameter_value),
max_depth = int(hyperparameter_value_two))
except:
m = model(n_estimators = hyperparameter_value,
max_depth = hyperparameter_value_two)
m.fit(train_points_x, train_points_y)
pred = m.predict(self.test_points_x)
n_mse = self.root_mean_squared_error(self.test_points_y , pred)
return n_mse
else:
return ' We do not support this number of dimensions yet'
def root_mean_squared_error(self, actual, predicted, negative = True):
"""MSE of actual and predicted value.
Negative turn the MSE negative to allow for
maximization instead of minimization"""
if negative == True:
return - np.sqrt(sum((actual.reshape(-1,1) - predicted.reshape(-1,1)**2))
/len(actual))
else:
return np.sqrt(sum((actual.reshape(-1,1) - predicted.reshape(-1,1)**2))
/len(actual))
def expected_improvement(self, mean_x, sigma_squared_x,
y_val_for_best_hyperparameters, normal_dist=None,
point_est = False):
"""Finds the expected improvement of a point give the current best point.
If point_est = False, then computes the expected value on a vector
from the posterior distribution.
"""
with np.errstate(divide='ignore'): # in case sigma equals zero
# Expected val for one point
if point_est ==True:
sigma_x = np.sqrt(sigma_squared_x) # get the standard deviation from the variance
Z = (mean_x - y_val_for_best_hyperparameters) / sigma_x
if round(sigma_x,8) == 0:
return 0
else:
return (mean_x -
y_val_for_best_hyperparameters)*normal_dist.cdf(Z)+\
sigma_x*normal_dist.pdf(Z)
else:
# Sample from the posterior functions
for _ in range(len(mean_x)):
list_of_improvements = []
m_s = []
for m, z, s in zip(mean_x, ((mean_x -y_val_for_best_hyperparameters)\
/ np.std(sigma_squared_x)),np.sqrt(sigma_squared_x) ):
list_of_improvements.append(((m-y_val_for_best_hyperparameters)*\
norm().cdf(z)\
+s * norm().pdf(z)))
m_s.append(m)
return list_of_improvements
def nearestPD(self, A):
"""
#https://stackoverflow.com/questions/43238173/python-convert-matrix-to-positive-semi-definite/43244194#43244194
Find the nearest positive-definite matrix to input
A Python/Numpy port of <NAME>'s `nearestSPD` MATLAB code [1], which
credits [2].
[1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd
[2] <NAME>, "Computing a nearest symmetric positive semidefinite
matrix" (1988): https://doi.org/10.1016/0024-3795(88)90223-6
"""
def isPD(B):
"""Returns true when input is positive-definite, via Cholesky"""
try:
_ = la.cholesky(B)
return True
except la.LinAlgError:
return False
B = (A + A.T) / 2
_, s, V = la.svd(B)
H = np.dot(V.T, np.dot(np.diag(s), V))
A2 = (B + H) / 2
A3 = (A2 + A2.T) / 2
if isPD(A3):
return A3
spacing = np.spacing(la.norm(A))
# The above is different from [1]. It appears that MATLAB's `chol` Cholesky
# decomposition will accept matrixes with exactly 0-eigenvalue, whereas
# Numpy's will not. So where [1] uses `eps(mineig)` (where `eps` is Matlab
# for `np.spacing`), we use the above definition. CAVEAT: our `spacing`
# will be much larger than [1]'s `eps(mineig)`, since `mineig` is usually on
# the order of 1e-16, and `eps(1e-16)` is on the order of 1e-34, whereas
# `spacing` will, for Gaussian random matrixes of small dimension, be on
# othe order of 1e-16. In practice, both ways converge, as the unit test
# below suggests.
I = np.eye(A.shape[0])
k = 1
while not self.isPD(A3):
mineig = np.min(np.real(la.eigvals(A3)))
A3 += I * (-mineig * k**2 + spacing)
k += 1
return A3
def __squared_kernel__(self, a, b, param=2.0, train=False,
train_noise = 5e-3, vertical_scale=1.5):
"""Calculated the squared exponential kernel.
Adds a noise term for the covariance of the training data
Adjusting the param changes the difference where points will have a positive covariance
Returns a covaraince Matrix.
Vertical scale controls the vertical scale of the function"""
if self.squared_length != None:
vertical_scale = self.squared_length
if train == False:
# ensure a and b are numpy arrays
a = np.array(a)
b = np.array(b)
sqdist = np.sum(a**2,1).reshape(-1,1) + np.sum(b**2,1) - 2*np.dot(a, b.T)
return vertical_scale*np.exp(-.5 * (1/param) * sqdist)
else:
# ensure a and b are numpy arrays
a = np.array(a)
b = np.array(b)
noisy_observations = train_noise*np.eye(len(a))
sqdist = np.sum(a**2,1).reshape(-1,1) + np.sum(b**2,1) - 2*np.dot(a, b.T)
return vertical_scale*np.exp(-.5 * (1/param) * sqdist) + noisy_observations
def __matern_kernel__(self, a,b,C_smoothness=3/2,train=False, train_noise = 5e-2):
"""The class of Matern kernels is a generalization of the RBF and the
absolute exponential kernel parameterized by an additional parameter
nu. The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for nu=0.5
to the absolute exponential kernel. Important intermediate values are
nu=1.5 (once differentiable functions) and nu=2.5 (twice differentiable
functions).
c_smoother = inf = RBF
The train keyword is used to add noisy observations to the matrix"""
if C_smoothness not in [1/2,3/2]:
return "You choose an incorrect hyparameter, please choose either 1/2 or 3/2"
matrix_norm = np.array([np.linalg.norm(a[i] - b,axis=(1)) for i in range(len(a))])
if C_smoothness == 1/2:
if train == True:
max(np.var(a),np.var(b)) * np.exp(-matrix_norm) + np.eye(len(matrix_norm))*train_noise
else:
return max(np.var(a),np.var(b)) * np.exp(-matrix_norm)
elif C_smoothness == 3/2:
if train == True:
return max(np.var(a),np.var(b))* (1
+ np.sqrt(3)*matrix_norm)*np.exp(-np.sqrt(3)*matrix_norm) \
+ np.eye(len(matrix_norm))*train_noise
else:
return max(np.var(a),np.var(b))* (1 +np.sqrt(3) *
matrix_norm) * np.exp(-np.sqrt(3)*matrix_norm)
```
#### File: glavanize-projects/capstone_work/ESP_Markov_Model_Client_Lifetime.py
```python
from pgmpy.models import MarkovModel, BayesianModel
from pgmpy.factors import Factor
from pgmpy.inference import BeliefPropagation
import time
import numpy as np
from scipy import stats
def ESP_Joint_Product_Probabilities(week_n,increase_mmb=0,increase_cmma=0,increase_cm=0,increase_fx=0,
increase_loc=0, increase_es = 0,increase_checking=0):
"""Returns the conditions probabilities of the following ESP products.
'Money Market Bonus',
'Collateral MMA',
'Cash Management',
'FX Products',
'Letters of Credit',
'Enterprise Sweep',
'Checking USD'
Joint probability are from 2013-2016 GP with L10 desc data.
Returns a dictionary of each probabilitiy distribution given the time.
Takes inputs as weeks. need to convert to days interally in the function.
The parameters increase_cmma - increase_checking corerspond to increaseing the probabilities of having these
products by a certain percent."""
days = week_n*7
# find the probabilities given a month number for money market bonus
mmb1_cmma1 = np.poly1d([ -3.97387788e-08 , 8.39060495e-05 , -1.41648742e-03])
mmb1_cmma0 = np.poly1d([ 6.53083270e-09 ,-1.06768753e-05 , 8.97296652e-03] )
mmb0_cmma1 = np.poly1d([ -5.75924616e-09 , 2.91090945e-06 , 5.97039453e-03] )
mmb0_cmma0 = np.poly1d([ -9.17148387e-06 , 1.28446720e-02] )
mmb1_cm1 = np.poly1d([ -3.99173667e-08 , 8.52748866e-05 , -1.26911672e-03])
mmb1_cm0 = np.poly1d([ -1.42073046e-09 , -3.01074706e-06 , 7.24356190e-03])
mmb0_cm1 = np.poly1d([ -4.32310836e-06 , 6.61057651e-03] )
mmb0_cm0 = np.poly1d([ -1.04364552e-05 , 1.13630152e-02] )
mmb1_fx1 = np.poly1d([ 3.77558215e-08 , -1.70896360e-05 , 1.41902186e-02] )
mmb1_fx0 = np.poly1d([ -3.39320861e-09 , 1.00679851e-07, 7.34716596e-03] )
mmb0_fx1 = np.poly1d([ -7.11607895e-09 , 8.69248176e-06 , 1.55942016e-03])
mmb0_fx0 = np.poly1d([ 3.56992186e-09 , -1.07772712e-05 , 1.36477158e-02])
mmb1_loc1 = np.poly1d([ 5.25454187e-08 , 9.81576217e-04] )
mmb1_loc0 = np.poly1d([ -1.52993041e-07 , 9.99214116e-04] )
mmb0_loc1 = np.poly1d([ -3.56373660e-07 , 4.02453535e-04] )
mmb0_loc0 = np.poly1d([ 2.78458433e-09 , -5.55324556e-06 , 2.52137996e-03])
mmb1_es1 = np.poly1d([ -8.11515816e-09 , 1.30677967e-05 , -1.66164976e-03])
mmb1_es0 = np.poly1d([ -2.83726125e-09 , 3.02318628e-06 , 7.70547714e-04])
mmb0_es1 = np.poly1d([ -1.03463875e-07 , 2.17269614e-04])
mmb0_es0 = np.poly1d([ -1.72630448e-06 , 1.91353792e-03] )
mmb1_checking1 = np.poly1d([ 9.90340592e-11 ,-2.20755206e-07 , 2.08171476e-04 , 2.25396450e-02] )
mmb1_checking0 = np.poly1d([ -6.22848774e-08 , 6.20852344e-05])
mmb0_checking1 = np.poly1d([ 1.61567597e-08 , -5.48140827e-05 , 5.02368463e-02] )
mmb0_checking0 = np.poly1d([ -2.10425978e-06 , 2.14375451e-03] )
## Collatral CMMA
cmma1_cm1 = np.poly1d([ -1.07147840e-07 , 2.62003505e-04 , 7.77949524e-02] ) # done
cmma1_cm0 = np.poly1d([ 3.94757263e-08 , -8.44541127e-05 , 4.60047128e-02] )# done
cmma0_cm1 = np.poly1d([ -9.19873088e-10 , -1.38993772e-06 , 3.55769565e-03] )
cmma0_cm0 = np.poly1d([ 7.66885633e-08 , -1.83903621e-04 , 1.18526969e-01] ) # done
cmma1_fx1 = np.poly1d([ -4.11203208e-08 , 1.25165510e-04 , 5.92837749e-03] )
cmma1_fx0 = np.poly1d([ 3.49627401e-09 , -2.55933675e-05, 3.06700660e-02])
cmma0_fx1 = np.poly1d([ 1.35117893e-08 , -1.18747924e-05 , 1.29550469e-02])
cmma0_fx0 = np.poly1d([ 2.88145904e-08 , -6.70744145e-05 , 4.35294657e-02])
cmma1_loc1 = np.poly1d([ -2.11396231e-09 , 1.63332685e-05 , 4.88690981e-03] )
cmma1_loc0 = np.poly1d([ 1.03395083e-09 , -3.02603195e-06 , 2.09169313e-03] )
cmma0_loc1 = np.poly1d([ 0.00010498 ,-0.02384952])
cmma0_loc0 = np.poly1d([ 3.89447845e-10 ,-9.42534361e-06 , 1.17229878e-02] )
cmma1_es1 = np.poly1d([ -1.45986565e-09 , 2.12493933e-06 , 3.73789940e-03] )
cmma1_es0 = np.poly1d([ 3.92800083e-12 , -1.04503251e-08 , 5.45319813e-06 , 1.89477511e-03] )
cmma0_es1 = np.poly1d([ -1.56676750e-09 , 2.07520362e-06, 1.30382436e-04] )
cmma0_es0 = np.poly1d([ -1.03287399e-12 , 3.69559395e-09 ,-6.11002712e-06 , 3.96829922e-03] )
cmma1_checking1 = np.poly1d([ 7.93112441e-05 , 1.61708520e-01] )
cmma1_checking0 = np.poly1d([ 2.53481141e-05 , 1.44230769e-02] )
cmma0_checking1 = np.poly1d([ 8.71213861e-08 , -1.96494017e-04 , 1.33087417e-01] )
cmma0_checking0 = np.poly1d([ 8.58582251e-09 , -2.12376410e-05, 1.44889333e-02] )
# Cash Management HERE
cm1_fx1 = np.poly1d([ 6.33125977e-05 , 1.90599649e-02] )
cm1_fx0 = np.poly1d([ 9.11177591e-11 , -1.48383331e-05 , 2.08985055e-02] )
cm0_fx1 = np.poly1d([ 7.24260624e-10, -4.41520195e-06 , 1.34512441e-02])
cm0_fx0 = np.poly1d([ 3.34690552e-08 , -8.19709941e-05 , 5.16518003e-02] )
cm1_loc1 = np.poly1d([ 1.19793814e-08 ,-4.28289261e-06 , 2.90739113e-03])
cm1_loc0 = np.poly1d([ 4.46840142e-10 , -1.47337813e-06 , 1.10497669e-03])
cm0_loc1 = np.poly1d([ 3.74222984e-10 , -2.14616795e-06 , 2.07542983e-03])
cm0_loc0 = np.poly1d([ 5.01831593e-09 , -1.05949007e-05 , 5.24536410e-03])
cm1_es1 = np.poly1d([ -9.87965875e-10 , 1.00430187e-06 , 3.88336150e-03] )
cm1_es0 = np.poly1d([ -2.32181212e-09 , 1.44931612e-06 , 2.01929468e-03])
cm0_es1 = np.poly1d([ 1.10258527e-09 , -2.63413534e-06 , 1.51801238e-03] )
cm0_es0 = np.poly1d([ -2.42557725e-06 , 2.55554739e-03] )
cm1_checking1 = np.poly1d([ 1.16641954e-04 , 1.35553265e-01] )
cm1_checking0 = np.poly1d([ -2.83461971e-08 , 2.88136671e-05] )
cm0_checking1 = np.poly1d([ -9.72041225e-05 , 1.21239440e-01])
cm0_checking0 = np.poly1d([ -9.07981889e-06 , 1.22044805e-02] )
# FX Product
fx1_loc1 = np.poly1d([ 4.03018760e-08 , -3.23774136e-05 , 6.69409722e-03] )
fx1_loc0 = np.poly1d([ -8.32916056e-10 , -4.01476298e-07 , 1.80753249e-03] )
fx0_loc1 = np.poly1d( [ -8.79676701e-09 , 1.49704286e-05 ,-2.35403981e-04])
fx0_loc0 = np.poly1d([ 4.20273828e-09 , -1.17805576e-05 , 8.16185994e-03])
fx1_es1 = np.poly1d([ -8.79344719e-07 , 3.11640690e-03] )
fx1_es0 = np.poly1d([ 6.70680662e-06 , -2.38916674e-03] )
fx0_es1 = np.poly1d([ -1.39399064e-06 , 2.63688800e-03] )
fx0_es0 = np.poly1d([ 1.65322255e-07 , 2.67717965e-03])
fx1_checking1 = np.poly1d([ 0.00015544 , 0.11177389] )
fx1_checking0 = np.poly1d([ -5.76078153e-08 , 5.73748319e-05])
fx0_checking1 = np.poly1d([ 8.65723071e-08 ,-2.47578484e-04 , 1.92836896e-01] )
fx0_checking0 = np.poly1d([ -1.12875457e-05 , 1.35901392e-02] )
# Letters of Credit
loc1_es1 = np.poly1d([ 5.30097525e-07 , -7.69620529e-05] )
loc1_es0 = np.poly1d([ 1.08483248e-05 , -4.31603149e-03] )
loc0_es1 = np.poly1d([ 2.77403931e-07 , 8.97384536e-05] )
loc0_es0 = np.poly1d( [ -1.86682330e-06 , 2.59526233e-03])
loc1_checking1 = np.poly1d([ 1.98720295e-08 ,-2.25224995e-06 , 8.08277786e-03] )
loc1_checking0 = np.poly1d([ 1.19975953e-08 , -4.36318499e-06 , 8.83611847e-03] )
loc0_checking1 = np.poly1d([ 8.23942003e-10 , -1.31357980e-05 , 1.55262399e-02] )
loc0_checking0 = np.poly1d([ 1.73617194e-09 , -3.13832001e-06 , 1.19825383e-03] )
# Enterprise sweep
es1_checking1 = np.poly1d([ -1.95193364e-06 , 1.19513294e-02])
es1_checking0 = np.poly1d([ -5.76078153e-08 , 5.73748319e-05])
es0_checking1 = np.poly1d([ 2.35648445e-08 , -3.48007869e-05 , 1.76964238e-02] )
es0_checking0 = np.poly1d([ 1.14997040e-09 , -2.08301674e-06 , 7.98522218e-04])
# return the probabilities in the form of a dictionary
# ensure that nothing has a 0% probabiliy (will block the markob model)
money_market_joint_probabilities = {}
#print(mmb1_cmma1 , 'mmb1_cmma1')
#print(mmb1_cmma1(days),'mmb1_cmma1(days)')
money_market_joint_probabilities['mmb1_cmma1'] = mmb1_cmma1(days)
money_market_joint_probabilities['mmb1_cmma0'] = mmb1_cmma0(days) + increase_mmb
money_market_joint_probabilities['mmb0_cmma1'] = mmb0_cmma1(days) + increase_cmma
money_market_joint_probabilities['mmb0_cmma0'] = mmb0_cmma0(days)
money_market_joint_probabilities['mmb1_checking1'] = mmb1_checking1(days)
money_market_joint_probabilities['mmb1_checking0'] = mmb1_checking0(days) + increase_mmb
money_market_joint_probabilities['mmb0_checking1'] = mmb0_checking1(days) + increase_checking
money_market_joint_probabilities['mmb0_checking0'] = mmb0_checking0(days)
money_market_joint_probabilities['mmb1_cm1'] = mmb1_cm1(days)
money_market_joint_probabilities['mmb1_cm0'] = mmb1_cm0(days) + increase_mmb
money_market_joint_probabilities['mmb0_cm1'] =mmb0_cm1(days) + increase_cm
money_market_joint_probabilities['mmb0_cm0'] = mmb0_cm0(days)
money_market_joint_probabilities['mmb1_fx1'] =mmb1_fx1(days)
money_market_joint_probabilities['mmb1_fx0'] = mmb1_fx0(days) + increase_mmb
money_market_joint_probabilities['mmb0_fx0'] = mmb0_fx0(days)
money_market_joint_probabilities['mmb0_fx1'] = mmb0_fx1(days) + increase_fx
money_market_joint_probabilities['mmb1_loc1'] = mmb1_loc1(days)
money_market_joint_probabilities['mmb1_loc0'] = mmb1_loc0(days) + increase_mmb
money_market_joint_probabilities['mmb0_loc1'] = mmb0_loc1(days) + increase_loc
money_market_joint_probabilities['mmb0_loc0'] = mmb0_loc0(days)
money_market_joint_probabilities['mmb1_es1'] = mmb1_es1(days)
money_market_joint_probabilities['mmb1_es0'] =mmb1_es0(days) + increase_mmb
money_market_joint_probabilities['mmb0_es1'] = mmb0_es1(days) + increase_es
money_market_joint_probabilities['mmb0_es0'] =mmb0_es0(days)
money_market_joint_probabilities['mmb1_checking1'] = mmb1_checking1(days)
money_market_joint_probabilities['mmb1_checking0'] = mmb1_checking0(days) + increase_mmb
money_market_joint_probabilities['mmb0_checking1'] = mmb0_checking1(days) + increase_checking
money_market_joint_probabilities['mmb0_checking0'] = mmb0_checking0(days)
money_market_joint_probabilities['cmma1_cm1'] = cmma1_cm1(days)
money_market_joint_probabilities['cmma1_cm0'] =cmma1_cm0(days) + increase_cmma
money_market_joint_probabilities['cmma0_cm1'] = cmma0_cm1(days) + increase_cm
money_market_joint_probabilities['cmma0_cm0'] = cmma0_cm0(days)
money_market_joint_probabilities['cmma1_fx1'] = cmma1_fx1(days)
money_market_joint_probabilities['cmma1_fx0'] = cmma1_fx0(days) + increase_cmma
money_market_joint_probabilities['cmma0_fx1'] =cmma0_fx1(days) + increase_fx
money_market_joint_probabilities['cmma0_fx0'] = cmma0_fx0(days)
money_market_joint_probabilities['cmma1_loc1'] = cmma1_loc1(days)
money_market_joint_probabilities['cmma1_loc0'] =cmma1_loc0(days) + increase_cmma
money_market_joint_probabilities['cmma0_loc1'] = cmma0_loc1(days) + increase_loc
money_market_joint_probabilities['cmma0_loc0'] = cmma0_loc0(days)
money_market_joint_probabilities['cmma1_es1'] = cmma1_es1(days)
money_market_joint_probabilities['cmma1_es0'] = cmma1_es0(days) + increase_cmma
money_market_joint_probabilities['cmma0_es1'] = cmma0_es1(days) + increase_es
money_market_joint_probabilities['cmma0_es0'] = cmma0_es0(days)
money_market_joint_probabilities['cmma1_checking1'] = cmma1_checking1(days)
money_market_joint_probabilities['cmma1_checking0'] =cmma1_checking0(days) + increase_cmma
money_market_joint_probabilities['cmma0_checking1'] = cmma0_checking1(days) + increase_checking
money_market_joint_probabilities['cmma0_checking0'] = cmma0_checking0(days)
money_market_joint_probabilities['cm1_fx1'] = cm1_fx1(days)
money_market_joint_probabilities['cm1_fx0'] = cm1_fx0(days) + increase_cm
# if round( cm0_fx1(days),3)== 0:
money_market_joint_probabilities['cm0_fx1'] = cm0_fx1(days) + increase_fx
money_market_joint_probabilities['cm0_fx0'] = cm0_fx0(days)
money_market_joint_probabilities['cm1_loc1'] = cm1_loc1(days)
money_market_joint_probabilities['cm1_loc0'] = cm1_loc0(days) + increase_cm
money_market_joint_probabilities['cm0_loc1'] =cm0_loc1(days) + increase_loc
money_market_joint_probabilities['cm0_loc0'] =cm0_loc0(days)
money_market_joint_probabilities['cm1_es1'] =cm1_es1(days)
money_market_joint_probabilities['cm1_es0'] = cm1_es0(days) + increase_cm
money_market_joint_probabilities['cm0_es1'] = cm0_es1(days) + increase_es
money_market_joint_probabilities['cm0_es0'] = cm0_es0(days)
money_market_joint_probabilities['cm1_checking1'] = cm1_checking1(days)
money_market_joint_probabilities['cm1_checking0'] = cm1_checking0(days)+ increase_cm
money_market_joint_probabilities['cm0_checking1'] = cm0_checking1(days) + increase_checking
money_market_joint_probabilities['cm0_checking0'] =cm0_checking0(days)
money_market_joint_probabilities['fx1_loc1'] =fx1_loc1(days)
money_market_joint_probabilities['fx1_loc0'] = fx1_loc0(days) + increase_fx
money_market_joint_probabilities['fx0_loc1'] = fx0_loc1(days) + increase_loc
money_market_joint_probabilities['fx0_loc0'] = fx0_loc0(days)
money_market_joint_probabilities['fx1_es1'] = fx1_es1(days)
money_market_joint_probabilities['fx1_es0'] = fx1_es0(days)+ increase_fx
money_market_joint_probabilities['fx0_es1'] = fx0_es1(days) + increase_es
money_market_joint_probabilities['fx0_es0'] = fx0_es0(days)
money_market_joint_probabilities['fx1_checking1'] = fx1_checking1(days)
money_market_joint_probabilities['fx1_checking0'] = fx1_checking0(days)+ increase_fx
money_market_joint_probabilities['fx0_checking1'] = fx0_checking1(days) + increase_checking
money_market_joint_probabilities['fx0_checking0'] = fx0_checking0(days)
money_market_joint_probabilities['loc1_es1'] =loc1_es1(days)
money_market_joint_probabilities['loc1_es0'] = loc1_es0(days) + increase_loc
money_market_joint_probabilities['loc0_es1'] = loc0_es1(days) + increase_es
money_market_joint_probabilities['loc0_es0'] = loc0_es0(days)
money_market_joint_probabilities['loc1_checking1'] = loc1_checking1(days)
money_market_joint_probabilities['loc1_checking0'] = loc1_checking0(days) + increase_loc
money_market_joint_probabilities['loc0_checking1'] = loc0_checking1(days)
money_market_joint_probabilities['loc0_checking0'] = loc0_checking0(days)
money_market_joint_probabilities['es1_checking1'] = es1_checking1(days)
money_market_joint_probabilities['es1_checking0'] = es1_checking0(days) + increase_es
money_market_joint_probabilities['es0_checking1'] = es0_checking1(days) + increase_checking
money_market_joint_probabilities['es0_checking0'] = es0_checking0(days)
return money_market_joint_probabilities
def ESP_Markov_Model_Joint_Prob(esp_money_market_jointprob_probabilities,week_n_one_time= None,
product_name = None,range_of_weeks=24,evidence_=None,single=False):
"""Returns the probability of having a given ESP product during a certain month.
Parameters:
esp_money_market_jointprob_probabilities: the dictionary of joint probabilities between each of the products
week_n_one_time
single , goes along with week_n_one_time . These parameters are used for calculating the probabilities of a given
product ONCE. If you want to infer for multiple weeks, leave these parameters along and you can change the
range_of_weeks parameters.
The range_of_weeks parameter will run a loop for that number of weeks to perform probability inference over.
This model works using BeliefPropogation to infer the probability of each product, given evidence from other
products.
Returns the probabilities associated with each product.
"""
#Record the probabilities of difference products
prob_checking_original = []
start_time = time.time()
prob_mmb = []
prob_cmma = []
prob_cm = []
prob_fx = []
prob_loc = []
prob_es = []
prob_checking = []
prob_given_month_no_priors_having_product = {}
products =['money_market_bonus','collateral_mma','cash_management',
'fx_products','letters_of_credit','enterprise_sweep','checking_usd']
# Define the factors (joint probabilities) for our markov model
model = MarkovModel([('money_market_bonus', 'collateral_mma'), ('money_market_bonus', 'checking_usd'),
('money_market_bonus', 'cash_management'), ('money_market_bonus', 'fx_products'),
('money_market_bonus', 'letters_of_credit'), ('money_market_bonus', 'enterprise_sweep'),
('collateral_mma','cash_management'),('collateral_mma', 'fx_products'),('collateral_mma', 'letters_of_credit'),
('collateral_mma', 'enterprise_sweep'),('collateral_mma', 'checking_usd'),('cash_management', 'fx_products'),
('cash_management', 'fx_products'),('cash_management', 'letters_of_credit'),('cash_management', 'enterprise_sweep'),
('cash_management', 'checking_usd'),('fx_products', 'letters_of_credit'),('fx_products', 'enterprise_sweep'),
('fx_products', 'checking_usd'),('letters_of_credit', 'enterprise_sweep'),('letters_of_credit', 'checking_usd'),
('enterprise_sweep', 'checking_usd')])
def markov_inference(dict_of_esp_jointprob):
"""Calculate the markov model """
factor_mmb_cmma = Factor(variables=['money_market_bonus', 'collateral_mma'],
cardinality=[2, 2],
values=[dict_of_esp_jointprob['mmb0_cmma0'], dict_of_esp_jointprob['mmb0_cmma1'],
dict_of_esp_jointprob['mmb1_cmma0'], dict_of_esp_jointprob['mmb1_cmma1']])
factor_mmb_cm = Factor(variables=['money_market_bonus', 'cash_management'],
cardinality=[2, 2],
values=[dict_of_esp_jointprob['mmb0_cm0'], dict_of_esp_jointprob['mmb0_cm1'],
dict_of_esp_jointprob['mmb1_cm0'], dict_of_esp_jointprob['mmb1_cm1']])
factor_mmb_fx = Factor(variables=['money_market_bonus', 'fx_products'],
cardinality=[2, 2],
values=[dict_of_esp_jointprob['mmb0_fx0'], dict_of_esp_jointprob['mmb0_fx1'],
dict_of_esp_jointprob['mmb1_fx0'], dict_of_esp_jointprob['mmb1_fx1']])
factor_mmb_loc = Factor(variables=['money_market_bonus', 'letters_of_credit'],
cardinality=[2, 2],
values=[dict_of_esp_jointprob['mmb0_loc0'], dict_of_esp_jointprob['mmb0_loc1'],
dict_of_esp_jointprob['mmb1_loc0'], dict_of_esp_jointprob['mmb1_loc1']])
factor_mmb_es = Factor(variables=['money_market_bonus', 'enterprise_sweep'],
cardinality=[2, 2],
values=[dict_of_esp_jointprob['mmb0_es0'], dict_of_esp_jointprob['mmb0_es1'],
dict_of_esp_jointprob['mmb1_es0'], dict_of_esp_jointprob['mmb1_es1']])
factor_mmb_checking = Factor(variables=['money_market_bonus', 'checking_usd'],
cardinality=[2, 2],
values=[dict_of_esp_jointprob['mmb0_checking0'], dict_of_esp_jointprob['mmb0_checking1'],
dict_of_esp_jointprob['mmb1_checking0'], dict_of_esp_jointprob['mmb1_checking1']])
# collateral mma
factor_cmma_cm = Factor(variables=['collateral_mma','cash_management'],
cardinality=[2, 2],
values=[dict_of_esp_jointprob['cmma0_cm0'], dict_of_esp_jointprob['cmma0_cm1'],
dict_of_esp_jointprob['cmma1_cm0'], dict_of_esp_jointprob['cmma1_cm1']])
factor_cmma_fx = Factor(variables=['collateral_mma', 'fx_products'],
cardinality=[2, 2],
values=[dict_of_esp_jointprob['cmma0_fx0'], dict_of_esp_jointprob['cmma0_fx1'],
dict_of_esp_jointprob['cmma1_fx0'], dict_of_esp_jointprob['cmma1_fx1']])
factor_cmma_loc = Factor(variables=['collateral_mma', 'letters_of_credit'],
cardinality=[2, 2],
values=[dict_of_esp_jointprob['cmma0_loc0'], dict_of_esp_jointprob['cmma0_loc1'],
dict_of_esp_jointprob['cmma1_loc0'], dict_of_esp_jointprob['cmma1_loc1']])
factor_cmma_es= Factor(variables=['collateral_mma', 'enterprise_sweep'],
cardinality=[2, 2],
values=[dict_of_esp_jointprob['cmma0_es0'], dict_of_esp_jointprob['cmma0_es1'],
dict_of_esp_jointprob['cmma1_es0'], dict_of_esp_jointprob['cmma1_es1']])
factor_cmma_checking = Factor(variables=['collateral_mma', 'checking_usd'],
cardinality=[2, 2],
values=[dict_of_esp_jointprob['cmma0_checking0'], dict_of_esp_jointprob['cmma0_checking1'],
dict_of_esp_jointprob['cmma1_checking0'],dict_of_esp_jointprob['cmma1_checking1']])
# cash management
factor_cm_fx = Factor(variables=['cash_management', 'fx_products'],
cardinality=[2, 2],
values=[dict_of_esp_jointprob['cm0_fx0'], dict_of_esp_jointprob['cm0_fx1'],
dict_of_esp_jointprob['cm1_fx0'], dict_of_esp_jointprob['cm1_fx1']])
factor_cm_loc = Factor(variables=['cash_management', 'letters_of_credit'],
cardinality=[2, 2],
values=[dict_of_esp_jointprob['cm0_loc0'], dict_of_esp_jointprob['cm0_loc1'],
dict_of_esp_jointprob['cm1_loc0'], dict_of_esp_jointprob['cm1_loc1']])
factor_cm_es= Factor(variables=['cash_management', 'enterprise_sweep'],
cardinality=[2, 2],
values=[dict_of_esp_jointprob['cm0_es0'], dict_of_esp_jointprob['cm0_es1'],
dict_of_esp_jointprob['cm1_es0'], dict_of_esp_jointprob['cm1_es1']])
factor_cm_checking = Factor(variables=['cash_management', 'checking_usd'],
cardinality=[2, 2],
values=[dict_of_esp_jointprob['cm0_checking0'], dict_of_esp_jointprob['cm0_checking1'],
dict_of_esp_jointprob['cm1_checking0'], dict_of_esp_jointprob['cm1_checking1']])
# FX products
factor_fx_loc = Factor(variables=['fx_products', 'letters_of_credit'],
cardinality=[2, 2],
values=[dict_of_esp_jointprob['fx0_loc0'], dict_of_esp_jointprob['fx0_loc1'],
dict_of_esp_jointprob['fx1_loc0'], dict_of_esp_jointprob['fx1_loc1']])
factor_fx_es= Factor(variables=['fx_products', 'enterprise_sweep'],
cardinality=[2, 2],
values=[dict_of_esp_jointprob['fx0_es0'], dict_of_esp_jointprob['fx0_es1'],
dict_of_esp_jointprob['fx1_es0'], dict_of_esp_jointprob['fx1_es1']])
factor_fx_checking = Factor(variables=['fx_products', 'checking_usd'],
cardinality=[2, 2],
values=[dict_of_esp_jointprob['fx0_checking0'], dict_of_esp_jointprob['fx0_checking1'],
dict_of_esp_jointprob['fx1_checking0'], dict_of_esp_jointprob['fx1_checking1']])
# letters of credit
factor_loc_es= Factor(variables=['letters_of_credit', 'enterprise_sweep'],
cardinality=[2, 2],
values=[dict_of_esp_jointprob['loc0_es0'], dict_of_esp_jointprob['loc0_es1'],
dict_of_esp_jointprob['loc1_es0'], dict_of_esp_jointprob['loc1_es1']])
factor_loc_checking = Factor(variables=['letters_of_credit', 'checking_usd'],
cardinality=[2, 2],
values=[dict_of_esp_jointprob['loc0_checking0'], dict_of_esp_jointprob['loc0_checking1'],
dict_of_esp_jointprob['loc1_checking0'], dict_of_esp_jointprob['loc1_checking1']])
#enterprise sweep
factor_es_checking = Factor(variables=['enterprise_sweep', 'checking_usd'],
cardinality=[2, 2],
values=[dict_of_esp_jointprob['es0_checking0'], dict_of_esp_jointprob['es0_checking1'],
dict_of_esp_jointprob['es1_checking0'], dict_of_esp_jointprob['es1_checking1']])
# built the markov model
model.add_factors(factor_mmb_cmma , factor_mmb_cm, factor_mmb_fx, factor_mmb_loc,factor_mmb_es, factor_mmb_checking,
factor_cmma_cm , factor_cmma_fx, factor_cmma_loc, factor_cmma_es,factor_cmma_checking,
factor_cm_fx, factor_cm_loc, factor_cm_es, factor_cm_checking , factor_fx_loc,
factor_fx_es , factor_fx_checking, factor_loc_es, factor_loc_checking , factor_es_checking )
belief_propagation = BeliefPropagation(model)
all_products = ['money_market_bonus','collateral_mma', 'cash_management','enterprise_sweep',
'fx_products','letters_of_credit','checking_usd']
# perform inference for all product except the one in the for loop
for prod in all_products:
if evidence_==None:
new_evidence=evidence_
else:
new_evidence = {key: value for key, value in evidence_.items()
if key != prod}
# perform belief inference on only one product at a time
belief_inference_products = str(prod)
# belief propogation on one product at a time given evidence from all other products
belief = belief_propagation.query(variables=[belief_inference_products], evidence=new_evidence)
try:
#mmb = belief_mmb['money_market_bonus'].values[1]
mmb = belief['money_market_bonus'].values[1]
if mmb <0 :
mmb = .0000001
elif mmb >1:
mmb =1
prob_mmb.append(mmb)# one is having the product
except: # can't perform inference on this product
pass
try:
cmma = belief['collateral_mma'].values[1]
if cmma <0:
cmma = .0000001
elif cmma >1:
cmma =1
prob_cmma.append(cmma)
except:## don't have this product
pass
try:
cm = belief['cash_management'].values[1]
if cm <0:
cm = .0000001
elif cm >1:
cm =1
prob_cm.append(cm)
except:
pass
try:
checking = belief['checking_usd'].values[1]
if checking <0:
checking = .0000001
elif checking >1:
checking =1
prob_checking.append(checking)
except:
pass
try:
fx = belief['fx_products'].values[1]
if fx <0:
fx = .0000001
elif fx >1:
fx =1
prob_fx.append(fx)
except:
pass
try:
loc = belief['letters_of_credit'].values[1]
if loc <0:
loc = .0000001
elif loc > 1:
loc = 1
prob_loc.append(loc)
except:
pass
try:
es = belief['enterprise_sweep'].values[1]
if es<0:
es = .0000001
elif es >1:
es = 1
prob_es.append(es)
except:
pass
if single==False:
for week_n_loop in range(range_of_weeks):
dict_of_esp_jointprob = esp_money_market_jointprob_probabilities(week_n_loop)
markov_inference(dict_of_esp_jointprob)
else:
dict_of_esp_jointprob = esp_money_market_jointprob_probabilities(week_n_one_time)
markov_inference(dict_of_esp_jointprob)
# the order of the factor model is a0_b0, a0_b1, ,a1_b0, a1_b1
#http://conference.scipy.org/proceedings/scipy2015/pdfs/ankur_ankan.pdf
return prob_checking[0], prob_cmma[0], prob_mmb[0], prob_cm[0], prob_fx[0], prob_loc[0], prob_es[0]
end_time = time.time()
print('{} weeks took {} seconds'.format(range_of_weeks,end_time-start_time))
if __name__ == '__main__':
week_n = 1
# can test our different starting evidence at different week numbers
checking_prob, cmma_prob, mmb_prob, cm_prob, fx_prob ,loc_prob, es_prob = \
ESP_Markov_Model_Joint_Prob(ESP_Joint_Product_Probabilities,single=True,
week_n_one_time=week_n,
evidence_ = {'checking_usd':1,'money_market_bonus':1})
# evidence_={'money_market_bonus':0,'collateral_mma':0,
# 'cash_management':0,'enterprise_sweep':0,
# 'fx_products':0,'letters_of_credit':0,'checking_usd':1})
print(checking_prob, ' checking prob')
print(cmma_prob,'cmma prob')
print(mmb_prob,'mmb prob')
print(cm_prob,' cm prob')
print(fx_prob, ' fx prob')
print(loc_prob,'loc prob')
print(es_prob,'es prob')
print(week_n , ' week number for inference')
```
#### File: capstone_work/scipts_to_run_parallel_simulations/esp_simulation_SVB_checking_mmb.py
```python
import simpy
import scipy.stats as stats
import pandas as pd
import numpy as np
import time
import pickle
import sys
from esp_product_revenue import ESP_revenue_predictions
from ESP_Markov_Model_Client_Lifetime import ESP_Joint_Product_Probabilities, \
ESP_Markov_Model_Joint_Prob
__author__ = '<NAME>'
class Client(object):
"""This is the base class to represent client that enter the bank.
The class will contain attributes such as lifetime, when their bank account
was opened, simpy requests for different resources .
A general class to represent all of the atributes of a client.
Used in the Markov MOdel to make product inferences. """
def __init__(self, client_id):
"""Initialize with the client ID and lifetime. As the time progresses,
keep track of different events that occur."""
self.client_id = client_id
self.client_lifetime = None
self.time_bank_closed = None
self.time_credit_card_opened = None
self.time_bank_was_opened = None
# Store the resource requests for SImpy
self.esp_open_money_market_bonus_request = None
self.esp_open_collateral_mma_request = None
self.esp_open_cash_management_request = None
self.esp_open_fx_request = None
self.esp_open_letters_of_credit_request = None
self.esp_open_enterprise_sweep_request = None
self.esp_open_checking_request = None
self.esp_client_alive_resource_request = None
self.have_mmb = 0
self.have_cmma = 0
self.have_cm = 0
self.have_fx = 0
self.have_loc = 0
self.have_es = 0
self.have_checking = 0
self.client_age = None
self.close_account_process = None
class ESP_flow(object):
"""Model cclients in ESP opening up produts over time.
Client lifetime drawn from distribution of client lifetimes from 2013-2016.
The probability of ech product is inferred from a Markov Model, where the
factors between the product nodes represent the joint probabilities. These
join probabilities are updated are every week number to performance inference.
The revenue per product is drawn from 2016 historical data (per month)
The number of clients per week is drawn from 2016 data.
Note, all time units are in terms of one week. One day would correspond
to 1/7 of a week or .143."""
def __init__(self, env, number_of_weeks_to_run,
yearly_interest_rate,
increase_esp_clients_percent_per_week =0,
esp_client_alive_resource = 10000,
esp_open_money_market_bonus_capacity=5000,
esp_open_collateral_mma_capacity =5000,
esp_open_cash_management_capacity = 5000, esp_fx_capacity = 5000,
esp_open_letters_of_credit_capacity = 5000,
esp_open_enterprise_sweep_capacity = 5000, esp_open_checking_capacity = 5000,
cc_capacity=200, esp_capacity = 5000,
stripe_capacity=3000,
evidence_ = None):
self.env = env
self.list_of_all_clients = []
self.weekly_interest_rate = self.yearly_to_weekly_interest_rate_conversion(
yearly_interest_rate)
self.number_of_weeks_to_run = number_of_weeks_to_run
self.esp_money_market_bonus_resource = simpy.Resource(env, capacity=esp_open_money_market_bonus_capacity)
self.esp_collateral_mma_resource = simpy.Resource(env, capacity=esp_open_collateral_mma_capacity)
self.esp_cash_management_resource= simpy.Resource(env, capacity=esp_open_cash_management_capacity)
self.esp_fx_resource = simpy.Resource(env, capacity=esp_fx_capacity)
self.esp_letters_of_credit_resource = simpy.Resource(env, capacity=esp_open_letters_of_credit_capacity )
self.esp_enterprise_sweep_resource = simpy.Resource(env, capacity=esp_open_enterprise_sweep_capacity )
self.esp_checking_resource = simpy.Resource(env, capacity= esp_open_checking_capacity )
self.esp_client_alive_resource = simpy.Resource(env, capacity= esp_client_alive_resource )
self.time_series_total_clients = []
self.time_series_cumulative_clients = []
self.time_series_esp_money_market_bonus = []
self.time_series_esp_collateral_mma = []
self.time_series_esp_cash_management = []
self.time_series_esp_fx = []
self.time_series_esp_letters_of_credit = []
self.time_series_esp_enterprise_sweep = []
self.time_series_esp_checking= []
self.time_series_esp_money_market_bonus_total_weekly_rev= []
self.time_series_esp_collateral_mma_total_weekly_rev = []
self.time_series_esp_cash_management_total_weekly_rev= []
self.time_series_esp_fx_total_weekly_rev= []
self.time_series_esp_letters_of_credit_total_weekly_rev = []
self.time_series_esp_enterprise_sweep_total_weekly_rev= []
self.time_series_esp_checking_total_weekly_rev = []
self.time_series_esp_money_market_bonus_rev_per_customer = []
self.time_series_esp_collateral_mma_rev_per_customer = []
self.time_series_esp_cash_management_rev_per_customer = []
self.time_series_esp_fx_rev_per_customer = []
self.time_series_esp_letters_of_credit_rev_per_customer = []
self.time_series_esp_enterprise_sweep_rev_per_customer = []
self.time_series_esp_checking_rev_per_customer = []
# If we wanted to simulate increasing the number of new ESP customers per week
self.increase_esp_clients_percent_per_week = \
increase_esp_clients_percent_per_week
# Store any initial evidence we have for products
self.evidence = evidence_
def yearly_to_weekly_interest_rate_conversion(self,yearly_interest_rate):
"""Convert from a yearly rate to a weekly rate using the following
equation.
Effective rate for period = (1 + annual rate)**(1 / # of periods) – 1
"""
weekly_interest_rate = ((1 + yearly_interest_rate)**(1/52))-1
return weekly_interest_rate
def esp_clients_per_week(self,mean=20.433962264150942, std=3.5432472792051746):
"""This generates the number of new clients in ESP for a given week.
The default parameters are taken from the years 2013-2016."""
self.oneweek_esp_clients = round(stats.norm.rvs(mean,std))
if self.oneweek_esp_clients <0:
self.oneweek_esp_clients = 0
self.oneweek_esp_clients = self.oneweek_esp_clients * \
self.increase_esp_clients_percent_per_week + self.oneweek_esp_clients
def accelerator_clients_per_week(self,mean=4.1792452830188678,
std=0.92716914151900442):
"""This generates the number of new clients in accelerator for a given week.
The default parameters are taken from the years 2013-2016"""
self.oneweek_accelerator_clients = round(stats.norm.rvs(mean,std))
if self.oneweek_accelerator_clients < 0:
self.oneweek_accelerator_clients =0
def stripe_clients_per_week(self,mean =23.209302325581394,
std =12.505920717868896):
""""This generates the number of new Stripe customers from the given week.
The default parameters from from 2016"""
self.oneweek_stripe_clients = round(stats.norm.rvs(mean, std))
if self.oneweek_stripe_clients < 0:
self.oneweek_stripe_clients = 0
def time_between_esb_accelerator(self,shape = 1.3513865965152867,
location = -0.85750795314579964, scale = 57.412494398862549):
"""This is an exponential distribution of the average time between
a client being in the esp team and being moved to the acceleartor team.
Default parameters are from 2000-2016"""
self.time_between_esb_accelerator = stats.gamma.rvs(shape, location, scale)
if self.time_between_esb_accelerator <0:
self.time_between_esb_accelerator = 1
# at least one week before transferring to accelerator
def esp_client_lifetime(self):
"""Draws from a distribution of client lifetimes (in months) from 2013-2016.
Return the number of weeks that a client will be alive.
A client needs to be generating revenue for at least three months, and not
have generated revenue for three months to be considred a
'client lifetime'. It is possible for a single client to have Multiple
'client lifetimes' that feed into the parameters for the Exponential
distribution.
Multiply the result by 4 to turn months into weeks"""
exponential_lifetime_parameters = (2.9999999999982676, 11.500665661185888)
return round(stats.expon(*exponential_lifetime_parameters ).rvs())*4
def initiate_week_client_run(self, esp_mean=20.433962264150942,
esp_std=3.5432472792051746, accel_mean = 4.1792452830188678,
accel_std = 0.92716914151900442, stripe_mean = 23.209302325581394,
stripe_std = 12.505920717868896):
"""This is the main function for initiating clients throughout the time
at the bank. The number of customers is an input which is given when you
instantiate this class.
The esp, accelerator and stripe mean come from 2000-2016
This function steps through the simulated time one week at a time
and keeps track of the number of clients at each node during this simulation.
This function looks at the probabilities associated with each product
via a dynamic Markov Model (where the edges represent joint probabilities
between products that are updated every week). These probabilities are used
to infer the probability of a client having each product given evidence
about the other products that client has.
This function keeps track of the total number of clients over time,
the total number of clients for the seven ESP products, the total
GP per week per product, and the GP per client per product (adjusted
to be in NPV).
In addition, each client lifetime, which is drawn from an exponential
distribution, is represented by a simpy process. Once a client churns,
they are no longer counted in each of the products that used to hold.
"""
for week_n in range(self.number_of_weeks_to_run):
print("Starting WEEK NUMBER {}".format(week_n))
# generate new clients for each channel
self.esp_clients_per_week(esp_mean, esp_std)
self.accelerator_clients_per_week(accel_mean, accel_std)
self.stripe_clients_per_week(stripe_mean, stripe_std)
print(self.oneweek_esp_clients, ' ESP clients this week')
# Keep track of the total number of clients over time
self.time_series_total_clients.append(
('Total New clients IN ESP for Week = ', week_n, self.oneweek_esp_clients))
# Total number of clients
## See where the ESP clients end up across the products
for esp_client_n in range(int(self.oneweek_esp_clients)):
# Client id is number for the week + week number
esp_client = Client(str(esp_client_n)+'-'+str(week_n))
# default client lifetime
esp_client.client_age = 0 # new client
# keep track of the total number of clients over time
client_alive = self.esp_client_alive_resource.request()
yield client_alive
esp_client.esp_client_alive_resource_request = client_alive
# Draw a lifetime value from an exponential distribution
esp_client.client_lifetime = self.esp_client_lifetime()
# make a list of all esp clients
self.list_of_all_clients.append(esp_client)
# keep track of cumulative clients
self.time_series_cumulative_clients.append(("ESP cumulative clients\
for week =", week_n, self.esp_client_alive_resource.count))
for idx,client in enumerate(self.list_of_all_clients):
# print client lifetime (see when clients are closing accounts)
if idx % 10 == 0:
print('Client {} lifetime = {}'.format(client.client_id,
client.client_lifetime))
# Yield for the client lifetime
# only span one close account process per client
# Otherwise, SImpy will try to close the same account
# Multiple times
if client.close_account_process == None:
close_accounts = self.env.process(self.close_accounts(client))
client.close_account_process = close_accounts
if client.client_age == 0: ## Don't have any products yet
checking_prob, cmma_prob, mmb_prob, cm_prob, fx_prob ,loc_prob, es_prob = \
ESP_Markov_Model_Joint_Prob(ESP_Joint_Product_Probabilities,
single=True,week_n_one_time=client.client_age,
evidence_ = self.evidence)
# See if a client has each product
open_checking = np.random.choice([1,0],p=np.array(
[checking_prob,(1-checking_prob)]))
open_cmma = np.random.choice([1,0],p=np.array(
[cmma_prob,(1-cmma_prob)]))
open_mmb = np.random.choice([1,0],p=np.array(
[mmb_prob,(1-mmb_prob)]))
open_cm = np.random.choice([1,0],p=np.array(
[cm_prob,(1-cm_prob)]))
open_fx = np.random.choice([1,0],p=np.array(
[fx_prob,(1-fx_prob)]))
open_loc = np.random.choice([1,0],p=np.array(
[loc_prob,(1-loc_prob)]))
open_es = np.random.choice([1,0],p=np.array(
[es_prob,(1-es_prob)]))
# open an account if a client has each product
# Otherwise, add a default event to yield
if open_checking == 1:
if client.have_checking == 0:
open_checking = self.env.process(self.esp_open_checking(client))
else:
open_checking = self.env.timeout(0)
# either open product or
else:
open_checking = self.env.timeout(0)
if open_cmma == 1:
if client.have_cmma == 0:
open_cmma = self.env.process(
self.esp_open_collateral_mma(client))
else:
open_cmma = self.env.timeout(0)
# yield close_accounts |open_cmma
else:
open_cmma = self.env.timeout(0)
if open_mmb ==1:
if client.have_mmb == 0:
open_mmb = self.env.process(
self.esp_open_money_market_bonus(client))
else:
open_mmb = self.env.timeout(0)
else:
open_mmb = self.env.timeout(0)
if open_cm == 1:
if client.have_cm == 0:
open_cm = self.env.process(
self.esp_open_cash_management(client))
else:
open_cm = self.env.timeout(0)
# yield close_accounts | open_cm
else:
open_cm = self.env.timeout(0)
if open_fx == 1:
if client.have_fx == 0:
open_fx = self.env.process(self.esp_open_fx(client))
else:
open_fx = self.env.timeout(0)
# yield close_accounts |open_fx
else:
open_fx = self.env.timeout(0)
if open_loc == 1:
if client.have_fx == 0:
open_loc = self.env.process(
self.esp_open_letters_of_credit(client))
else:
open_loc = self.env.timeout(0)
# yield close_accounts | open_loc
else:
open_loc = self.env.timeout(0)
if open_es == 1:
if client.have_es == 0:
open_es = self.env.process(
self.esp_open_enterprise_sweep(client))
else:
open_es = self.env.timeout(0)
# yield close_accounts | open_es
else:
open_es = self.env.timeout(0)
# either open product or close the account
yield (open_checking &open_cmma & open_mmb & open_cm \
&open_fx & open_loc & open_es) | client.close_account_process
client.client_age +=1 # increment the age of the client
else:
# every client now has an indicator for if they have
# a product or not
checking_prob, cmma_prob, mmb_prob, cm_prob, fx_prob ,loc_prob, es_prob = \
ESP_Markov_Model_Joint_Prob(ESP_Joint_Product_Probabilities,
single=True,week_n_one_time=client.client_age,
evidence_={'money_market_bonus':client.have_mmb,
'collateral_mma':client.have_cmma,
'cash_management':client.have_cm,
'enterprise_sweep':client.have_es,
'fx_products':client.have_fx,
'letters_of_credit':client.have_loc,
'checking_usd':client.have_checking})
# # update if these clients have each product
## See if a client has each product
open_checking = np.random.choice([1,0],p=np.array(
[checking_prob,(1-checking_prob)]))
open_cmma = np.random.choice([1,0],p=np.array(
[cmma_prob,(1-cmma_prob)]))
open_mmb = np.random.choice([1,0],p=np.array(
[mmb_prob,(1-mmb_prob)]))
open_cm = np.random.choice([1,0],p=np.array(
[cm_prob,(1-cm_prob)]))
open_fx = np.random.choice([1,0],p=np.array(
[fx_prob,(1-fx_prob)]))
open_loc = np.random.choice([1,0],p=np.array(
[loc_prob,(1-loc_prob)]))
open_es = np.random.choice([1,0],p=np.array(
[es_prob,(1-es_prob)]))
# open an account if a client has each product
# Otherwise, add a default event to yield
if open_checking == 1:
if client.have_checking == 0:
open_checking = self.env.process(self.esp_open_checking(client))
else:
open_checking = self.env.timeout(0)
# either open product or
elif open_checking == 0:
if client.have_checking ==1 :
open_checking = self.env.process(self.esp_close_checking(client))
else:
open_checking = self.env.timeout(0)
else:
print('Something weird happened')
# close the account for this client product
open_checking = self.env.timeout(0)
if open_cmma == 1:
if client.have_cmma == 0:
open_cmma = self.env.process(
self.esp_open_collateral_mma(client))
else:
open_cmma = self.env.timeout(0)
# yield close_accounts |open_cmma
elif open_cmma == 0 :
if client.have_cmma ==1:
open_cmma = self.env.process(self.esp_close_collateral_mma(client))
else:
open_cmma = self.env.timeout(0)
else:
print('Something weird happened')
if open_mmb ==1:
if client.have_mmb == 0:
open_mmb = self.env.process(
self.esp_open_money_market_bonus(client))
else:
open_mmb = self.env.timeout(0)
elif open_mmb == 0:
if client.have_mmb == 1:
open_mmb = self.env.process(self.esp_close_money_market_bonus(client))
else:
open_mmb = self.env.timeout(0)
else:
pass
if open_cm == 1:
if client.have_cm == 0:
open_cm = self.env.process(
self.esp_open_cash_management(client))
else:
open_cm = self.env.timeout(0)
# yield close_accounts | open_cm
elif open_cm == 0:
if client.have_cm == 1:
open_cm = self.env.process(self.esp_close_cash_management(client))
else:
open_cm = self.env.timeout(0)
else:
pass
if open_fx == 1:
if client.have_fx == 0:
open_fx = self.env.process(self.esp_open_fx(client))
else:
open_fx = self.env.timeout(0)
# yield close_accounts |open_fx
elif open_fx ==0:
if client.have_fx == 1:
open_fx = self.env.process(self.esp_close_fx(client))
else:
open_fx = self.env.timeout(0)
else:
pass
if open_loc == 1:
if client.have_loc == 0:
open_loc = self.env.process(
self.esp_open_letters_of_credit(client))
else:
open_loc = self.env.timeout(0)
# yield close_accounts | open_loc
elif open_loc == 0:
if client.have_loc == 1:
open_loc = self.env.process(self.esp_close_letters_of_credit(client))
else:
open_loc = self.env.timeout(0)
else:
pass
if open_es == 1:
if client.have_es == 0:
open_es = self.env.process(
self.esp_open_enterprise_sweep(client))
else:
open_es = self.env.timeout(0)
# yield close_accounts | open_es
elif open_es == 0:
if client.have_es == 1:
open_es = self.env.process(self.esp_close_enterprise_sweep(client))
else:
open_es = self.env.timeout(0)
else:
pass
# either open product or close the account
yield (open_checking & open_cmma & open_mmb & open_cm \
&open_fx & open_loc & open_es) | client.close_account_process
client.client_age +=1 # increment the age of the client
if idx % 10 == 0 :
## print out stats of every 10th client
print(client.client_id, ' client id ')
print(client.client_age,'client age')
print(client.have_mmb, ' client.have_mmb')
print(client.have_cmma, 'client.have_cmma')
print(client.have_cm, 'client.have_cm')
print( client.have_es, ' client.have_es')
print(client.have_fx,'client.have_fx')
print(client.have_loc,'client.have_loc')
print(client.have_checking,'client.have_checking')
# print the weekly metrics
print()
print('WEEK METRICS {}'.format(week_n))
print(self.esp_money_market_bonus_resource.count,'esp mmb clients ')
print(self.esp_collateral_mma_resource.count, ' esp cmma clients')
print(self.esp_cash_management_resource.count, ' esp cm clients')
print(self.esp_fx_resource.count, 'fx count')
print(self.esp_letters_of_credit_resource.count, ' loc count')
print(self.esp_enterprise_sweep_resource.count, 'es count')
print(self.esp_checking_resource.count , 'checking count')
print(self.esp_client_alive_resource.count, ' total number of clients')
print()
# At the end of each week, record the number of clients per
# product
self.time_series_esp_money_market_bonus.append(("Week = ",
self.env.now,self.esp_money_market_bonus_resource.count))
self.time_series_esp_collateral_mma.append(("Week = ",
self.env.now,self.esp_collateral_mma_resource.count))
self.time_series_esp_cash_management.append(("Week = ",
self.env.now,self.esp_cash_management_resource.count))
self.time_series_esp_fx.append(("Week = ",
self.env.now,self.esp_fx_resource.count))
self.time_series_esp_letters_of_credit.append(("Week = ",
self.env.now,self.esp_letters_of_credit_resource.count))
self.time_series_esp_enterprise_sweep.append(("Week = ",
self.env.now,self.esp_enterprise_sweep_resource.count))
self.time_series_esp_checking.append(("Week = ",
self.env.now,self.esp_checking_resource.count))
# At the end of each week, find the weekly GP and weekly GP per client
# esp money market bonus weekly gp
self.get_weekly_gp(week_n, self.time_series_esp_money_market_bonus,
ESP_revenue_predictions,
self.time_series_esp_money_market_bonus_total_weekly_rev,
self.time_series_esp_money_market_bonus_rev_per_customer,
'mmb')
# esp collateral mma weekly gp
self.get_weekly_gp(week_n, self.time_series_esp_collateral_mma,
ESP_revenue_predictions,
self.time_series_esp_collateral_mma_total_weekly_rev,
self.time_series_esp_collateral_mma_rev_per_customer,
'cmma')
# esp cash management weekly revenue
self.get_weekly_gp(week_n, self.time_series_esp_cash_management,
ESP_revenue_predictions,
self.time_series_esp_cash_management_total_weekly_rev,
self.time_series_esp_cash_management_rev_per_customer,
'cm')
### esp fx weekly gp
self.get_weekly_gp(week_n, self.time_series_esp_fx,
ESP_revenue_predictions,
self.time_series_esp_fx_total_weekly_rev,
self.time_series_esp_fx_rev_per_customer,
'fx')
### esp letters of credit
self.get_weekly_gp(week_n, self.time_series_esp_letters_of_credit,
ESP_revenue_predictions,
self.time_series_esp_letters_of_credit_total_weekly_rev,
self.time_series_esp_letters_of_credit_rev_per_customer,
'loc')
### esp enterprise sweep weekly gp
self.get_weekly_gp(week_n, self.time_series_esp_enterprise_sweep,
ESP_revenue_predictions,
self.time_series_esp_enterprise_sweep_total_weekly_rev,
self.time_series_esp_enterprise_sweep_rev_per_customer,
'es')
### esp checking weekly gp
self.get_weekly_gp(week_n, self.time_series_esp_checking,
ESP_revenue_predictions,
self.time_series_esp_checking_total_weekly_rev,
self.time_series_esp_checking_rev_per_customer,
'checking')
# Increment by one week
one_week_increment = self.env.timeout(1)
yield one_week_increment
def monitor_resource(self, resource, resource_name):
"""Print out monitoring statistics for a given resource.
NUmber of slots allocated.
Number of people using the resource
Number of queued events for the resource"""
print()
print("MONITORING STATISTICS FOR {}".format(resource_name))
print('{} of {} slots are allocated at time {}.'.format (
resource.count, resource.capacity, self.env.now))
#print(' Users :', resource.users)
print(' Queued events:', resource.queue)
print()
def get_weekly_gp(self,week_n, time_series, gp_data_function, total_rev_week,
rev_per_client_week, product):
"""Get the total revenue for the week, and revenue per client, for a
given product.
Also, adjusts the revenue to be in week zero through net
present value.
Need the weekly return interest rate.
NPV = ∑ {Net Period Cash Flow / (1+R)^T} - Initial Investment """
total_weekly_rev = 0
number_of_customer_for_product_week = time_series[week_n][2]
if number_of_customer_for_product_week == 0:
# No customer this week for this product
total_rev_week.append( ('week = ',week_n, 0))
rev_per_client_week.append( ('week = ',week_n,0))
else: # We have customers!
for esp_customer in range(number_of_customer_for_product_week ):
# Get weekly revenue from distribution
total_weekly_rev += gp_data_function.get_revenue(product)
# total value of the product
print(total_weekly_rev, ' total rev for product {}'.format(product))
# NPV calculation
total_wekly_rev = total_weekly_rev / (1+self.weekly_interest_rate)**week_n
# Records results
total_rev_week.append( ('week = ',week_n, total_weekly_rev))
# average value per customer
rev_per_client_week.append( ('week = ',week_n,total_weekly_rev / \
time_series[week_n][2]))
def esp_open_money_market_bonus(self, client):
"""This is a simpy process for opening a money market bonus account.
Also, append the resource request for simpy to the client object.
This will let us release this resource request later"""
# opening a money market bonus account
open_mmb = self.esp_money_market_bonus_resource.request()
# Wait until its our turn or until or the customer churns
yield open_mmb
client.have_mmb = 1
client.esp_open_money_market_bonus_request = open_mmb
def esp_open_collateral_mma(self, client):
"""This is a simpy process for opening a open collateral mma accounts
Also, append the resource request for simpy to the client object.
This will let us release this resource request later"""
open_cmma = self.esp_collateral_mma_resource.request()
# Wait until its our turn or until or the customer churns
yield open_cmma
client.have_cmma = 1
client.esp_open_collateral_mma_request = open_cmma
def esp_open_cash_management(self, client):
"""This is a simpy process for opening a cash management checking account
Also, append the resource request for simpy to the client object.
This will let us release this resource request later"""
open_cmc = self.esp_cash_management_resource.request()
# Wait until its our turn or until or the customer churns
yield open_cmc
client.have_cm = 1
client.esp_open_cash_management_request = open_cmc
def esp_open_fx(self, client):
"""This is a simpy process for opening a fx-account account
Also, append the resource request for simpy to the client object.
This will let us release this resource request later"""
open_fx = self.esp_fx_resource.request()
# Wait until its our turn or until or the customer churns
yield open_fx
client.have_fx = 1
client.esp_open_fx_request = open_fx
def esp_open_letters_of_credit(self, client):
"""This is a simpy process for opening a letters of credit
Also, append the resource request for simpy to the client object.
This will let us release this resource request later"""
open_letter_credit = self.esp_letters_of_credit_resource.request()
# Wait until its our turn or until or the customer churns
yield open_letter_credit
client.have_loc = 1
client.esp_open_letters_of_credit_request = open_letter_credit
def esp_open_enterprise_sweep(self, client):
"""This is a simpy process for opening a letters of credit
Also, append the resource request for simpy to the client object.
This will let us release this resource request later"""
open_es = self.esp_enterprise_sweep_resource.request()
# Wait until its our turn or until or the customer churns
yield open_es
client.have_es = 1
client.esp_open_enterprise_sweep_request = open_es
def esp_open_checking(self, client):
"""This is a simpy process for opening a letters of credit
Also, append the resource request for simpy to the client object.
This will let us release this resource request later"""
open_checking = self.esp_checking_resource.request()
# Wait until its our turn or until or the customer churns
yield open_checking
client.have_checking = 1
client.esp_open_checking_request = open_checking
def esp_close_checking(self, client):
"""This releases the resource request for the SImpy resource representing
checking. """
print('closing checking')
client.have_checking = 0
yield self.esp_checking_resource.release(client.esp_open_checking_request)
def esp_close_cash_management(self, client):
"""This releases the resource request for the Simpy resource
representing cash management."""
print('closing cash management')
client.have_cm = 0
yield self.esp_cash_management_resource.release(client.esp_open_cash_management_request)
def esp_close_collateral_mma(self, client):
"""This releases the resource request for the Simpy resource representing
collateral mma accounts"""
print('closing collateral mma')
client.have_cmma = 0
yield self.esp_collateral_mma_resource.release(client.esp_open_collateral_mma_request)
def esp_close_enterprise_sweep(self, client):
"""This releases the resource request for the SImpy resource representing
enterprise sweep"""
print('closing enterprise sweep')
client.have_es = 0
yield self.esp_enterprise_sweep_resource.release(client.esp_open_enterprise_sweep_request)
def esp_close_letters_of_credit(self, client):
"""This releases the resource request for the Simpy resource representign
letters of credit"""
print('closing letters of credit')
client.have_loc = 0
yield self.esp_letters_of_credit_resource.release(client.esp_open_letters_of_credit_request)
def esp_close_money_market_bonus(self, client):
"""This releases teh resource request for the Simpy resource representing
money market bonus accounts"""
print('closing money market bonus')
client.have_mmb = 0
yield self.esp_money_market_bonus_resource.release(client.esp_open_money_market_bonus_request)
def esp_close_fx(self, client):
"""This releases the resource request for the Simpy resource representing
foreign exchange products"""
print('closeing fx')
client.have_fx = 0
yield self.esp_fx_resource.release(client.esp_open_fx_request)
def close_accounts(self, client):
"""Release the simpy process for each of the Simpy Products.
This occurs once a client has churned.
In addition, remove this client from the list of clients that we have"""
yield self.env.timeout(client.client_lifetime)
print()
print('WE are closing accounts for client {}'.format(client.client_id))
print(len(self.list_of_all_clients),' length of client list before')
self.list_of_all_clients.remove(client)
print(len(self.list_of_all_clients),'len list of all cleitns')
# Drop the clients from each product once they churn
yield self.esp_cash_management_resource.release(client.esp_open_cash_management_request)
yield self.esp_checking_resource.release(client.esp_open_checking_request)
yield self.esp_collateral_mma_resource.release(client.esp_open_collateral_mma_request)
yield self.esp_enterprise_sweep_resource.release(client.esp_open_enterprise_sweep_request)
yield self.esp_letters_of_credit_resource.release(client.esp_open_letters_of_credit_request)
yield self.esp_money_market_bonus_resource.release(client.esp_open_money_market_bonus_request)
yield self.esp_fx_resource.release(client.esp_open_fx_request)
yield self.esp_client_alive_resource.release(
client.esp_client_alive_resource_request)
if __name__ == "__main__":
sys.stdout.flush()
env = simpy.Environment()
start = time.time()
n_weeks_run = 104
trials = 3
federal_funds_rate = .0075 # May 11, 2017
inflation_rate = .025 # March 2017
# Evidence for staring products
starting_evidence = {'money_market_bonus':1,'checking_usd':1}
#starting_evidence = None
# keep a list of the data attributes over time
times_series_all_clients = []
times_series_cumulative_clients = []
# products
time_series_money_market_bonus = []
time_series_esp_collateral_mma = []
time_series_esp_cash_management = []
time_series_esp_fx = []
time_series_esp_letters_of_credit = []
time_series_esp_enterprise_sweep = []
time_series_esp_checking = []
# GP
time_series_esp_money_market_bonus_total_weekly_rev= []
time_series_esp_money_market_bonus_rev_per_customer = []
time_series_esp_collateral_mma_total_weekly_rev = []
time_series_esp_collateral_mma_rev_per_customer = []
time_series_esp_cash_management_total_weekly_rev = []
time_series_esp_cash_management_rev_per_customer = []
time_series_esp_fx_total_weekly_rev = []
time_series_esp_fx_rev_per_customer = []
time_series_esp_letters_of_credit_total_weekly_rev = []
time_series_esp_letters_of_credit_rev_per_customer = []
time_series_esp_enterprise_sweep_total_weekly_rev = []
time_series_esp_enterprise_sweep_rev_per_customer = []
time_series_esp_checking_total_weekly_rev = []
time_series_esp_checking_rev_per_customer = []
#for i in range(3):
# Record data over multiple runs
for i in range(trials):
print()
print('Starting simulation {}'.format(i))
print()
esp_flow = ESP_flow(env,
number_of_weeks_to_run = n_weeks_run,
yearly_interest_rate = federal_funds_rate * inflation_rate,
evidence_ = starting_evidence)
env.process(esp_flow.initiate_week_client_run())
env.run()
# Keep track of the data for each run
times_series_all_clients.append(esp_flow.time_series_total_clients)
times_series_cumulative_clients.append(esp_flow.time_series_cumulative_clients)
# products
time_series_money_market_bonus.append(esp_flow .time_series_esp_money_market_bonus)
time_series_esp_collateral_mma.append(esp_flow .time_series_esp_collateral_mma)
time_series_esp_cash_management.append(esp_flow .time_series_esp_cash_management)
time_series_esp_fx.append(esp_flow .time_series_esp_fx)
time_series_esp_letters_of_credit.append(esp_flow .time_series_esp_letters_of_credit)
time_series_esp_enterprise_sweep.append(esp_flow .time_series_esp_enterprise_sweep)
time_series_esp_checking.append(esp_flow .time_series_esp_checking)
# GP
time_series_esp_money_market_bonus_total_weekly_rev.append(esp_flow .time_series_esp_money_market_bonus_total_weekly_rev)
time_series_esp_money_market_bonus_rev_per_customer.append(esp_flow.time_series_esp_money_market_bonus_rev_per_customer)
time_series_esp_collateral_mma_total_weekly_rev.append(esp_flow.time_series_esp_collateral_mma_total_weekly_rev)
time_series_esp_collateral_mma_rev_per_customer.append(esp_flow.time_series_esp_collateral_mma_rev_per_customer)
time_series_esp_cash_management_total_weekly_rev.append(esp_flow.time_series_esp_cash_management_total_weekly_rev)
time_series_esp_cash_management_rev_per_customer.append(esp_flow.time_series_esp_cash_management_rev_per_customer)
time_series_esp_fx_total_weekly_rev.append(esp_flow.time_series_esp_fx_total_weekly_rev)
time_series_esp_fx_rev_per_customer.append(esp_flow.time_series_esp_fx_rev_per_customer)
time_series_esp_letters_of_credit_total_weekly_rev.append(esp_flow.time_series_esp_letters_of_credit_total_weekly_rev)
time_series_esp_letters_of_credit_rev_per_customer.append(esp_flow.time_series_esp_letters_of_credit_rev_per_customer)
time_series_esp_enterprise_sweep_total_weekly_rev.append(esp_flow.time_series_esp_enterprise_sweep_total_weekly_rev)
time_series_esp_enterprise_sweep_rev_per_customer.append(esp_flow.time_series_esp_enterprise_sweep_rev_per_customer)
time_series_esp_checking_total_weekly_rev.append(esp_flow.time_series_esp_checking_total_weekly_rev)
time_series_esp_checking_rev_per_customer.append(esp_flow.time_series_esp_checking_rev_per_customer)
print()
print("SUMMARY STATISTICS")
print('Finished at time {}'.format(env.now))
print('Time series of total clients over time = {}'.format(
esp_flow.time_series_total_clients
))
print('Time series of cumulative clients over time = {}'.format(
esp_flow.time_series_cumulative_clients
))
print("Time series of esp money market bonus {} ".format(
esp_flow .time_series_esp_money_market_bonus))
print("Time series of esp collateral mma {} ".format(
esp_flow .time_series_esp_collateral_mma))
print("Time series of esp cash management {} ".format(
esp_flow .time_series_esp_cash_management))
print("Time series of esp fx{} ".format(
esp_flow .time_series_esp_fx))
print("Time series of esp letters of credit {} ".format(
esp_flow .time_series_esp_letters_of_credit))
print("Time series of esp enterprise sweep {} ".format(
esp_flow .time_series_esp_enterprise_sweep))
print("Time series of esp checking {} ".format(
esp_flow .time_series_esp_checking))
print("Total rgp for esp money market bonus per week {}".format(
esp_flow .time_series_esp_money_market_bonus_total_weekly_rev))
print("GP per custome rfor esp money market bonus per week {}".format(
esp_flow.time_series_esp_money_market_bonus_rev_per_customer
))
print()
print("GP per total {} and per customer for collateral MMA {}".format(
esp_flow.time_series_esp_collateral_mma_total_weekly_rev,
esp_flow.time_series_esp_collateral_mma_rev_per_customer
))
print()
print('GP for cash management total {} and gp cash management per customer {}'.format(
esp_flow.time_series_esp_cash_management_total_weekly_rev,
esp_flow.time_series_esp_cash_management_rev_per_customer
))
print()
print(' GP for fx total {} and fx per client {}'.format(
esp_flow.time_series_esp_fx_total_weekly_rev,
esp_flow.time_series_esp_fx_rev_per_customer))
print()
print('GP fox letters of credit toal {} and gp for letters of credit per customer {}'.format(
esp_flow.time_series_esp_letters_of_credit_total_weekly_rev,
esp_flow.time_series_esp_letters_of_credit_rev_per_customer
))
print()
print('GP for enterprise sweep total {} and enterprise sweep gP per client{}'.format(
esp_flow.time_series_esp_enterprise_sweep_total_weekly_rev,
esp_flow.time_series_esp_enterprise_sweep_rev_per_customer
))
print()
print('GP for checking total {} and checking per client per week {} '.format(
esp_flow.time_series_esp_checking_total_weekly_rev,
esp_flow.time_series_esp_checking_rev_per_customer
))
end = time.time()
print('{} weeks tooks {} seconds'.format(n_weeks_run,end-start))
# Save the generated data
with open('data-evidence-checking-mmb/time_series_all_clients', 'wb') as fp:
pickle.dump(times_series_all_clients, fp)
with open('data-evidence-checking-mmb/times_series_cumulative_clients', 'wb') as fp:
pickle.dump(times_series_cumulative_clients, fp)
# products
with open('data-evidence-checking-mmb/time_series_money_market_bonus', 'wb') as fp:
pickle.dump(time_series_money_market_bonus, fp)
with open('data-evidence-checking-mmb/time_series_esp_collateral_mma ', 'wb') as fp:
pickle.dump(time_series_esp_collateral_mma, fp)
with open('data-evidence-checking-mmb/time_series_esp_cash_management', 'wb') as fp:
pickle.dump(time_series_esp_cash_management , fp)
with open('data-evidence-checking-mmb/time_series_esp_fx', 'wb') as fp:
pickle.dump(time_series_esp_fx , fp)
with open('data-evidence-checking-mmb/time_series_esp_letters_of_credit ', 'wb') as fp:
pickle.dump(time_series_esp_letters_of_credit , fp)
with open('data-evidence-checking-mmb/time_series_esp_enterprise_sweep', 'wb') as fp:
pickle.dump(time_series_esp_enterprise_sweep , fp)
with open('data-evidence-checking-mmb/time_series_esp_checking', 'wb') as fp:
pickle.dump(time_series_esp_checking , fp)
# GP
with open('data-evidence-checking-mmb/time_series_esp_money_market_bonus_total_weekly_rev', 'wb') as fp:
pickle.dump(time_series_esp_money_market_bonus_total_weekly_rev, fp)
with open('data-evidence-checking-mmb/time_series_esp_money_market_bonus_rev_per_customer', 'wb') as fp:
pickle.dump(time_series_esp_money_market_bonus_rev_per_customer,fp)
with open('data-evidence-checking-mmb/time_series_esp_collateral_mma_total_weekly_rev', 'wb') as fp:
pickle.dump(time_series_esp_collateral_mma_total_weekly_rev , fp)
with open('data-evidence-checking-mmb/time_series_esp_collateral_mma_rev_per_customer', 'wb') as fp:
pickle.dump(time_series_esp_collateral_mma_rev_per_customer , fp)
with open('data-evidence-checking-mmb/time_series_esp_cash_management_total_weekly_rev', 'wb') as fp:
pickle.dump(time_series_esp_cash_management_total_weekly_rev , fp)
with open('data-evidence-checking-mmb/time_series_esp_cash_management_rev_per_customer', 'wb') as fp:
pickle.dump(time_series_esp_cash_management_rev_per_customer , fp)
with open('data-evidence-checking-mmb/time_series_esp_fx_total_weekly_rev', 'wb') as fp:
pickle.dump(time_series_esp_fx_total_weekly_rev , fp)
with open('data-evidence-checking-mmb/time_series_esp_fx_rev_per_customer', 'wb') as fp:
pickle.dump(time_series_esp_fx_rev_per_customer, fp)
with open('data-evidence-checking-mmb/time_series_esp_letters_of_credit_total_weekly_rev', 'wb') as fp:
pickle.dump(time_series_esp_letters_of_credit_total_weekly_rev, fp)
with open('data-evidence-checking-mmb/time_series_esp_letters_of_credit_rev_per_customer', 'wb') as fp:
pickle.dump(time_series_esp_letters_of_credit_rev_per_customer, fp)
with open('data-evidence-checking-mmb/time_series_esp_enterprise_sweep_total_weekly_rev', 'wb') as fp:
pickle.dump(time_series_esp_enterprise_sweep_total_weekly_rev , fp)
with open('data-evidence-checking-mmb/time_series_esp_enterprise_sweep_rev_per_customer', 'wb') as fp:
pickle.dump(time_series_esp_enterprise_sweep_rev_per_customer , fp)
with open('data-evidence-checking-mmb/time_series_esp_checking_total_weekly_rev', 'wb') as fp:
pickle.dump(time_series_esp_checking_total_weekly_rev , fp)
with open('data-evidence-checking-mmb/time_series_esp_checking_rev_per_customer', 'wb') as fp:
pickle.dump(time_series_esp_checking_rev_per_customer, fp)
```
#### File: deep_learning/notebooks/algorithm_comparisons.py
```python
import pandas as pd
import numpy as np
from py_geohash_any import geohash as gh
import datetime
import random
import numpy as np
from collections import deque
import time
from keras.layers.normalization import BatchNormalization
import json
from collections import defaultdict
from keras.models import model_from_json
from keras.models import Sequential
from keras.layers import LSTM
from keras.layers import InputLayer
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.optimizers import SGD , Adam
import tensorflow as tf
import pickle
from operator import itemgetter
import sys
sys.path.insert(0, '../data/') ## for running on local
import auxiliary_functions, make_dataset
from auxiliary_functions import convert_miles_to_minutes_nyc, list_of_output_predictions_to_direction
__author__ = ' <NAME>'
class AlgorithmComparison(object):
"""A class used to compare DQN (mlp and lstm), Actor Critic MLP, and a naive approach"""
def __init__(self, args, ACTION_SPACE, OBSERVATION_SPACE,
list_of_unique_geohashes,list_of_time_index, list_of_geohash_index,
list_of_inverse_geohash_index, final_data_structure,
list_of_output_predictions_to_direction):
"""Store the data attributes needed for each algorithm.
Also, compile each model that will be compared."""
self.ACTION_SPACE = ACTION_SPACE
self.OBSERVATION_SPACE = OBSERVATION_SPACE
self.args = args
self.list_of_unique_geohashes = list_of_unique_geohashes
self.list_of_time_index = list_of_time_index
self.list_of_geohash_index = list_of_geohash_index
self.list_of_inverse_geohash_index = list_of_inverse_geohash_index
self.final_data_structure = final_data_structure
self.list_of_output_predictions_to_direction = list_of_output_predictions_to_direction
# Build the various models and load the weights
self.actor_model()
self.build_mlp_dqn_model()
self.build_lstm_dqn_model()
self.first_run = True
def actor_model(self):
"""Build an actor model with mlp.
http://www.rage.net/~greg/2016-07-05-ActorCritic-with-OpenAI-Gym.htmlCode
Input time followed by geohash index for predictions.
When making predictions, you do not need the critic network.
The critic network is solely used in training an actor critic model."""
model_mlp = Sequential()
model_mlp.add(Dense(100, input_shape=(self.OBSERVATION_SPACE,)))
model_mlp.add(BatchNormalization())
model_mlp.add(Activation('relu'))
model_mlp.add(Dropout(.3))
model_mlp.add(Dense(500))
model_mlp.add(BatchNormalization())
model_mlp.add(Activation('relu'))
model_mlp.add(Dropout(.3))
model_mlp.add(Dense(1000))
model_mlp.add(BatchNormalization())
model_mlp.add(Activation('relu'))
model_mlp.add(Dropout(.3))
model_mlp.add(Dense(self.ACTION_SPACE, activation='linear'))
# predict which geohash to move to next
print('Loading weights for Actor model')
model_mlp.load_weights(self.args['model_weights_load_actor_mlp'])
print('Weights loaded for Actor model')
adam = Adam(clipnorm=1.0)
model_mlp.compile(loss='mse',optimizer=adam)
self.actor_model = model_mlp
def build_mlp_dqn_model(self):
"""Build a simple MLP model.
Input time follwoed by the geohash index for predictions"""
model_mlp = Sequential()
model_mlp.add(Dense(100, input_shape=(self.OBSERVATION_SPACE,)))
model_mlp.add(BatchNormalization())
model_mlp.add(Activation('relu'))
model_mlp.add(Dropout(.3))
model_mlp.add(Dense(500))
model_mlp.add(BatchNormalization())
model_mlp.add(Activation('relu'))
model_mlp.add(Dropout(.3))
model_mlp.add(Dense(1000))
model_mlp.add(BatchNormalization())
model_mlp.add(Activation('relu'))
model_mlp.add(Dropout(.3))
model_mlp.add(Dense(self.ACTION_SPACE, activation='linear')) ## predict which geohash to move to next
adam = Adam()
print('Loading weights for MLP DQN')
model_mlp.load_weights(self.args['model_weights_load_dqn_mlp'])
print('Weights loaded for MLP DQN')
model_mlp.compile(loss='mse',optimizer=adam)
self.model_mlp_dqn = model_mlp
def build_lstm_dqn_model(self):
"""Build a simpleLSTM model choosen by hyperparameter selection from hyperas.
Input time follwoed by the geohash index. """
model_lstm = Sequential()
model_lstm .add(LSTM( 512, dropout=.24,
batch_input_shape=(1,None, 2),
recurrent_dropout=.24,return_sequences = True))
model_lstm.add(BatchNormalization())
model_lstm .add(LSTM(1024, dropout=.18,
recurrent_dropout=.18,
return_sequences = True))
model_lstm.add(BatchNormalization())
model_lstm.add(Dense(512))
model_lstm.add(BatchNormalization())
model_lstm.add(Activation('sigmoid'))
model_lstm .add(Dense(9, activation='linear',name='dense_output'))
adam = Adam(clipnorm=.5, clipvalue=.5)
print('Loading weights for LSTM DQN')
model_lstm.load_weights(self.args['model_weights_load_dqn_lstm'])
print('Weights loaded for LSTM DQN')
model_lstm .compile(loss='mean_squared_error', optimizer=adam)
self.model_lstm_dqn = model_lstm
def output_lat_long_predictions_given_input(self,geohash_start=None,
time_start=None, first_run=None):
"""Give n the starting geohash , and time, see which direction each algorithm
goes to.
If running for the first time, provide a geohash and time to start at.
If running past the first time, the model will have kept the previous geohashah
and time to create a prediction from.
Returns the latitude and longtitude for each algorithm alonside the
fare received for each move the different algorithms made.
If you call this function multiple times, you do not need to providea geohash_start
or time_start as these will be stored for each algorithm by the class.
"""
if first_run != None:
self.first_run = first_run
if self.first_run == True:
start_geohash_index = self.list_of_geohash_index[geohash_start]
start_state = np.array([[time_start, start_geohash_index ]])
start_state_lstm = np.array([[[time_start, start_geohash_index]]])
# predict for DQN MLP
mlp_dqn_predictions = self.model_mlp_dqn.predict(start_state)
# action to take for MLP DQN
mlp_dqn_action = np.argmax(mlp_dqn_predictions)
# predict for DQN LSTM
lstm_dqn_predictions = self.model_lstm_dqn.predict(start_state_lstm)
# action to take for MLP DQN
lstm_dqn_action = np.argmax(lstm_dqn_predictions)
# predict for actor critic
mlp_ac_predictions = self.actor_model.predict(start_state)
# action to take for MLP DQN
mlp_ac_action = np.argmax(mlp_ac_predictions)
# predict for naive
naive_action = np.random.choice([0,1,2,3,4,5,6,7,8])
# Record the informationfor DQN MLP
self.s_geohash1_dqn_mlp, self.s_time1_dqn_mlp, r_t_dqn_mlp, fare_t_dqn_mlp, \
latitude_s1_dqn_mlp, longtitude_s1_dqn_mlp = \
self.geohash_conversion_given_action_state(
mlp_dqn_action, geohash_start, time_start)
# Record the information for DQN LSTM
self.s_geohash1_dqn_lstm, self.s_time1_dqn_lstm, r_t_dqn_mlp, fare_t_dqn_lstm, \
latitude_s1_dqn_lstm, longtitude_s1_dqn_lstm = \
self.geohash_conversion_given_action_state(
lstm_dqn_action, geohash_start, time_start)
# Record information for Actor-Critic MLP
self.s_geohash1_ac_mlp, self.s_time1_ac_mlp, r_t_dqn_mlp, fare_t_ac_mlp, \
latitude_s1_ac_mlp, longtitude_s1_ac_mlp = \
self.geohash_conversion_given_action_state(
mlp_ac_action, geohash_start, time_start)
# Record information for the Naive implementation
self.s_geohash1_naive, self.s_time1_naive, r_t_dqn_mlp, fare_t_naive, \
latitude_s1_naive, longtitude_s1_naive = \
self.geohash_conversion_given_action_state(
naive_action, geohash_start, time_start)
self.first_run = False
return latitude_s1_dqn_mlp, longtitude_s1_dqn_mlp, fare_t_dqn_mlp,\
latitude_s1_dqn_lstm, longtitude_s1_dqn_lstm, fare_t_dqn_lstm,\
latitude_s1_ac_mlp, longtitude_s1_ac_mlp, fare_t_ac_mlp,\
latitude_s1_naive, longtitude_s1_naive, fare_t_naive
else:
## convert index geohash to string geohash
geohash_dqn_mlp = self.list_of_inverse_geohash_index[self.s_geohash1_dqn_mlp]
geohash_dqn_lstm = self.list_of_inverse_geohash_index[self.s_geohash1_dqn_lstm]
geohash_ac_mlp = self.list_of_inverse_geohash_index[self.s_geohash1_ac_mlp]
geohash_naive = self.list_of_inverse_geohash_index[self.s_geohash1_naive]
start_state_dqn_mlp = np.array([[self.s_time1_dqn_mlp, self.s_geohash1_dqn_mlp ]])
start_state_ac_mlp = np.array([[self.s_time1_ac_mlp, self.s_geohash1_ac_mlp]])
start_state_lstm_dqn = np.array([[[self.s_time1_dqn_lstm, self.s_geohash1_dqn_lstm]]])
# predict for DQN MLP
mlp_dqn_predictions = self.model_mlp_dqn.predict(start_state_dqn_mlp)
# action to take for MLP DQN
mlp_dqn_action = np.argmax(mlp_dqn_predictions)
# predict for DQN LSTM
lstm_dqn_predictions = self.model_lstm_dqn.predict(start_state_lstm_dqn)
# action to take for MLP DQN
lstm_dqn_action = np.argmax(lstm_dqn_predictions)
# predict for actor critic
mlp_ac_predictions = self.actor_model.predict(start_state_ac_mlp)
# action to take for MLP DQN
mlp_ac_action = np.argmax(mlp_ac_predictions)
# predict for naive
naive_action = np.random.choice([0,1,2,3,4,5,6,7,8])
# Record the informationfor DQN MLP
self.s_geohash1_dqn_mlp, self.s_time1_dqn_mlp, r_t_dqn_mlp, fare_t_dqn_mlp, \
latitude_s1_dqn_mlp, longtitude_s1_dqn_mlp = \
self.geohash_conversion_given_action_state(
mlp_dqn_action, geohash_dqn_mlp, self.s_time1_dqn_mlp)
# Record the information for DQN LSTM
self.s_geohash1_dqn_lstm, self.s_time1_dqn_lstm, r_t_dqn_mlp, fare_t_dqn_lstm, \
latitude_s1_dqn_lstm, longtitude_s1_dqn_lstm = \
self.geohash_conversion_given_action_state(
lstm_dqn_action, geohash_dqn_lstm, self.s_time1_dqn_lstm,)
# Record information for Actor-Critic MLP
self.s_geohash1_ac_mlp, self.s_time1_ac_mlp, r_t_dqn_mlp, fare_t_ac_mlp, \
latitude_s1_ac_mlp, longtitude_s1_ac_mlp = \
self.geohash_conversion_given_action_state(
mlp_ac_action, geohash_ac_mlp, self.s_time1_ac_mlp)
# Record information for the Naive implementation
self.s_geohash1_naive, self.s_time1_naive, r_t_dqn_mlp, fare_t_naive, \
latitude_s1_naive, longtitude_s1_naive = \
self.geohash_conversion_given_action_state(
naive_action, geohash_naive , self.s_time1_naive)
return latitude_s1_dqn_mlp, longtitude_s1_dqn_mlp, fare_t_dqn_mlp,\
latitude_s1_dqn_lstm, longtitude_s1_dqn_lstm, fare_t_dqn_lstm,\
latitude_s1_ac_mlp, longtitude_s1_ac_mlp, fare_t_ac_mlp,\
latitude_s1_naive, longtitude_s1_naive, fare_t_naive
self.first_run = False
def geohash_conversion_given_action_state(self,action, start_geohash, start_time):
"""Go through the process of converting an actions from a state into a
geohash and corresponding latitude and longtitude.
Returns geohash, time, reward ratio (fare / time), fare, lat, and longtitude"""
#Get the neighbors from the current geohash - convert back to string
#current_geohash_string = self.list_of_inverse_geohash_index[start_geohash]
#print(current_geohash_string,' current eohash string')
neighbors = gh.neighbors(start_geohash)
# Get the direction we should go
direction_to_move_to = self.list_of_output_predictions_to_direction[action]
# Get the geohash of the direction we moved to
if direction_to_move_to =='stay':
new_geohash = start_geohash # stay in current geohash, get the index of said geohash
possible_rewards = np.array(self.final_data_structure[start_time][new_geohash])
# hash with the letters of the geohash above
new_geohash = self.list_of_geohash_index[start_geohash]
else:
new_geohash = neighbors[direction_to_move_to]## give us the geohash to move to next
# get the reward of the geohash we just moved to (this is the ratio of fare /time of trip)
# time, geohash, list of tuple ( fare, time ,ratio)
possible_rewards = np.array(self.final_data_structure[start_time][new_geohash])
if len (possible_rewards) ==0:
r_t = -.1 # we do not have information for this time and geohash, don't go here. waste gass
fare_t = 0 # no information so the fare = 0
s_time1 = start_time+10 # assume this took ten minutes
else:
reward_option = np.random.randint(0,len(possible_rewards))
r_t = possible_rewards[reward_option][2] # get the ratio of fare / trip time
fare_t = possible_rewards[reward_option][0]
# get the trip length
s_time1 = start_time + possible_rewards[reward_option][1]
s_geohash1 = self.list_of_geohash_index[new_geohash]
# decode the geohash into latitude nad longtitude
decoded_geohash_s1 = gh.decode(
(self.list_of_inverse_geohash_index[s_geohash1]))
latitude_s1 = decoded_geohash_s1['lat']
longtitude_s1 = decoded_geohash_s1['lon']
# return the latitude and longtitude, fare, geohash, and time
return s_geohash1, s_time1, r_t, fare_t, latitude_s1, longtitude_s1
#
# args = {'model_weights_load_actor_mlp':None,
# 'model_weights_load_dqn_mlp':'mlp_model_dqn/model_mlp_linear_2million.h5',
# 'model_weights_load_dqn_lstm':'lstm_model_dqn/lstm_weight_200k.h5'}
```
#### File: src/models-DQN/model_mlp.py
```python
import numpy as np
import datetime
import random
import numpy as np
from collections import deque
import json
from collections import defaultdict
from keras.models import model_from_json
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD , Adam
import tensorflow as tf
from keras.layers.normalization import BatchNormalization
import time
import sys
import pickle
import auxiliary_functions, make_dataset
from py_geohash_any import geohash as gh
from keras import backend as K
from auxiliary_functions import convert_miles_to_minutes_nyc, \
list_of_output_predictions_to_direction
__author__ = '<NAME>'
#parameters
ACTIONS = 9 # number of valid actions
GAMMA = 0.99 # decay rate of past observations
OBSERVATION = 10000. # timesteps to observe before training
EXPLORE = 3000000 # frames over which to anneal epsilon
FINAL_EPSILON = 0.001 # final value of epsilon
INITIAL_EPSILON = 0.1 # starting value of epsilon
TRAINING_EPSILON = .0001
REPLAY_MEMORY = 50000 # number of previous transitions to remember
BATCH = 32 # size of minibatch
FRAME_PER_ACTION = 1
LEARNING_RATE = 1e-1
class RLNYCTaxiCab(object):
"""Creates an mlp model with DQN to train on NYC taxi data from January 2016.
Uses a MLP model with DQN."""
def __init__(self, list_of_unique_geohashes,list_of_time_index, list_of_geohash_index,
list_of_inverse_heohash_index, final_data_structure, return_metrics=False):
"""Sotre the data attributes needed to train out model."""
self.list_of_unique_geohashes = list_of_unique_geohashes
self.list_of_time_index = list_of_time_index
self. list_of_geohash_index = list_of_geohash_index
self.list_of_inverse_heohash_index = list_of_inverse_heohash_index
self.final_data_structure = final_data_structure
self.build_mlp_model()
self.return_metrics = return_metrics
def build_mlp_model(self):
"""Build a simple MLP model.
Input time follwoed by the geohash index. """
model_mlp = Sequential()
model_mlp.add(Dense(100, input_shape=(2,)))
model_mlp.add(BatchNormalization())
model_mlp.add(Activation('relu'))
model_mlp.add(Dropout(.3))
model_mlp.add(Dense(500))
model_mlp.add(BatchNormalization())
model_mlp.add(Activation('relu'))
model_mlp.add(Dropout(.3))
model_mlp.add(Dense(1000))
model_mlp.add(BatchNormalization())
model_mlp.add(Activation('relu'))
model_mlp.add(Dropout(.3))
model_mlp.add(Dense(9, activation='linear')) ## predict which geohash to move to next
adam = Adam(lr=LEARNING_RATE)
model_mlp.compile(loss='mse',optimizer=adam)
self.model_mlp = model_mlp
def NaiveApproach(self, s_time_, s_geohash_,starting_geo,
input_fare_list = None, historic_current_fare = None):
"""Assign the same probability to every state and keep track of the
total fare received, total fare over time,
and geohashes visited.
Terminates after a 'day' is finished"""
## parameters to track where we are and at what time
starting_geohash = starting_geo
s_time = s_time_
s_geohash = s_geohash_
list_of_geohashes_visited = []
## check and see if we have old fare to continue adding to
if input_fare_list == None:
total_fare = 0
total_fare_over_time = []
else:
total_fare = historic_current_fare
total_fare_over_time = input_fare_list
while True:
a_t = np.zeros([ACTIONS])
action_index = random.randrange(ACTIONS)
a_t[action_index] = 1
#Get the neighbors from the current geohash - convert back to string
current_geohash_string = self.list_of_inverse_heohash_index[s_geohash]
neighbors = gh.neighbors(current_geohash_string)
# Get the direction we should go
direction_to_move_to = list_of_output_predictions_to_direction[action_index]
# Get the geohash of the direction we moved to
if direction_to_move_to =='stay':
new_geohash = starting_geohash # stay in current geohash, get the index of said geohash
possible_rewards = np.array(self.final_data_structure[s_time][new_geohash])
# hash with the letters of the geohash above
new_geohash = self.list_of_geohash_index[starting_geohash]
else:
new_geohash = neighbors[direction_to_move_to]## give us the geohash to move to next
# get the reward of the geohash we just moved to (this is the ratio of fare /time of trip)
# time, geohash, list of tuple ( fare, time ,ratio)
possible_rewards = np.array(self.final_data_structure[s_time][new_geohash])
if len (possible_rewards) ==0:
r_t = -.1 # we do not have information for this time and geohash, don't go here. waste gass
fare_t = 0 # no information so the fare = 0
s_time1 = s_time+10 # assume this took ten minutes
else:
reward_option = np.random.randint(0,len(possible_rewards))
r_t = possible_rewards[reward_option][2] # get the ratio of fare / trip time
fare_t = possible_rewards[reward_option][0]
# get the trip length
s_time1 = s_time + possible_rewards[reward_option][1]
s_geohash1 = self.list_of_geohash_index[new_geohash]
# store the transition in D
if s_time1 <= 2350: # The last possible time for a trip
terminal = 0
# get the naive implementation per day
else: # the day is over, pick a new starting geohash and time
break # the day is over
total_fare += fare_t
total_fare_over_time.append(total_fare)
list_of_geohashes_visited.append(starting_geohash)
# increment the state and time information
s_time = s_time1
s_geohash = s_geohash1
starting_geohash = new_geohash #update the starting geohash in case we stay here
return total_fare, total_fare_over_time, list_of_geohashes_visited
def trainNetworkNeuralNetworkTaxicab(self, args, training_length=1000,
return_training_data = False, save_model = False):
# Code adapted from https://github.com/yanpanlau/Keras-FlappyBird/blob/master/qlearn.py
"""Train a DQN algorithm to learn how the best geohashes to go to throughout the day.
Each geohash is about
3803 x 3803 meters (~15 minutes of driving time to traverse in NYC).
This algoirthm incorporates experience replay to stablize the training procedure
for the DQN algorithm. Due to the large size of the input features,
you need to train for a long time (1-2million iterations) .
This implementation also uses a Naive approach which has both the DQN and
Naive implementation start at the same geohash and same time. Then,
each algorithm will run until the day is finished keeping track
of the geohashes visited and fare received.
This information is finally returned."""
self.return_training_data = return_training_data
# store the previous observations in replay memory
D = deque()
# get the first state by randomlly choosing a geohash to start at and random time to start at
# Assume that the first state has no reward associated with it
# Over multiple steps, starting geohash becomes the previous geohash we visited
starting_geohash = np.random.choice(self.list_of_unique_geohashes)
s_time = np.random.choice(self.list_of_time_index)
s_geohash = self.list_of_geohash_index[starting_geohash]
s_t = np.array([[s_time,
s_geohash]])
if args['mode'] == 'Run':
OBSERVE = 1000 #We keep observe, never train
epsilon = TRAINING_EPSILON
print ("Now we load weight")
self.model_mlp.load_weights(args['model_weights_load'])
adam = Adam(lr=LEARNING_RATE)
self.model_mlp.compile(loss='mse',optimizer=adam)
print ("Weight load successfully")
else: #We go to training mode
OBSERVE = OBSERVATION
epsilon = INITIAL_EPSILON
#start your observations
t = 0
total_days_driven = 0
loss_list = []
total_fare_received = 0
total_fare_received_over_time = []
list_of_geohashes_visited = []
total_naive_fare = 0
total_naive_fare_over_time =[]
list_of_naive_geohashes_visited = []
if return_training_data == True:
self.training_data_X = np.zeros((training_length+1,2))
self.training_data_y = np.zeros((training_length+1,ACTIONS))
if self.return_metrics == True: ## Compare to a naive approach, only train / observe
if t > OBSERVE:
total_naive_fare, total_naive_fare_over_time, list_of_naive_geohashes_visited = \
self.NaiveApproach(s_time, s_geohash,starting_geohash)
start_time = time.time()
while (True):
loss = 0
Q_sa = 0
action_index = 0
r_t = 0
a_t = np.zeros([ACTIONS])
#choose a random action action epsilon greedy
if t % FRAME_PER_ACTION == 0: ## will always choose this if frame per action is 1
if random.random() <= epsilon:
print("----------Random Action----------")
action_index = random.randrange(ACTIONS) # Randomlly choose another geohash to go to
a_t[action_index] = 1
else:
#print("------------Predicted Action___________")
q = self.model_mlp.predict(s_t) #input the time followed by the geohash index
max_Q = np.argmax(q) # find the position of the highest probability (which direction to go in)
action_index = max_Q
#print('Action {}'.format(action_index))
a_t[max_Q] = 1
# We reduced the epsilon gradually to take more random actions
if epsilon > FINAL_EPSILON and t > OBSERVE:
epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE
# run the selected action and observed next state and reward
# We need to find the neighbors to the geohash that we started at
#G et the neighbors from the current geohash - convert back to string
current_geohash_string = self.list_of_inverse_heohash_index[s_geohash]
neighbors = gh.neighbors(current_geohash_string)
# Get the direction we should go
direction_to_move_to = list_of_output_predictions_to_direction[action_index]
# Get the geohash of the direction we moved to
if direction_to_move_to =='stay':
new_geohash = starting_geohash # stay in current geohash, get the index of said geohash
possible_rewards = np.array(self.final_data_structure[s_time][new_geohash])
# hash with the letters of the geohash above
new_geohash = self.list_of_geohash_index[starting_geohash]
else:
new_geohash = neighbors[direction_to_move_to]## give us the geohash to move to next
# get the reward of the geohash we just moved to (this is the ratio of fare /time of trip)
# time, geohash, list of tuple ( fare, time ,ratio)
possible_rewards = np.array(self.final_data_structure[s_time][new_geohash])
if len (possible_rewards) ==0:
r_t = -.1 # we do not have information for this time and geohash, don't go here. waste gass
fare_t = 0 # no information so the fare = 0
s_time1 = s_time+10 # assume this took ten minutes
else:
possible_rewards = np.random.randint(0,len(possible_rewards))
r_t = possible_rewards[possible_rewards][2] # get the ratio of fare / trip time
fare_t = possible_rewards[possible_rewards][0]
# get the trip length
s_time1 = s_time + possible_rewards[possible_rewards][1]
#r_t = np.random.choice(possible_rewards)
s_geohash1 = self.list_of_geohash_index[new_geohash]
# store the transition in D
if s_time1 <= 2350: # The last possible time for a trip
terminal = 0
else: # the day is over, pick a new starting geohash and time
print('We finished a day!')
terminal = 1
total_days_driven +=1
# Choose a new starting time and geohash
s_time1 = np.random.choice(self.list_of_time_index)
s_geohash1 = self.list_of_geohash_index[np.random.choice(
self.list_of_unique_geohashes)]
# Chech the naive approach to the new geohashes and time
if self.return_metrics == False: ## don't benchmark to the naive approach
pass
else:
if t > OBSERVE: # only record after observations
total_naive_fare, total_naive_fare_over_time, naive_geohashes_visited = \
self.NaiveApproach(s_time1, s_geohash1,
starting_geohash, total_naive_fare_over_time,total_naive_fare )
if return_training_data == True:
list_of_naive_geohashes_visited.extend(naive_geohashes_visited)
# Terminal should be a one if the day is over or a zero otherwise
# time, geohash, action index, reward, time1, geohash 1, terminal
D.append((s_time,s_geohash, action_index, r_t, s_time1, s_geohash1, terminal))
if return_training_data == True: # append training data
if r_t >0: ## normalize the values for hyperas
self.training_data_X[t,:] = np.array([s_time, s_geohash])
self.training_data_y[t,action_index] = np.array([r_t])
else:
self.training_data_X[t,:] = np.array([s_time, s_geohash])
# action index for the reward
self.training_data_y[t,action_index] = r_t
if len(D) > REPLAY_MEMORY: ## don't store a huge replay memory
D.popleft()
######### NEXT SEXTION #########
# only train if done observing
if t > OBSERVE:
#sample a minibatch to train on
minibatch = random.sample(D, BATCH)
inputs = []
inputs = np.zeros((BATCH, s_t.shape[1])) # 16, 2
targets = np.zeros((inputs.shape[0], ACTIONS)) # 16, 9
# Now we do the experience replay
for i in range(0, len(minibatch)): # 0 -15 for batch 16
s_time_t = minibatch[i][0]
s_geohash_t = minibatch[i][1]
action_t = minibatch[i][2] # action index
reward_t = minibatch[i][3]
s_time_t1 = minibatch[i][4]
s_geohash_t1 = minibatch[i][5]
terminal = minibatch[i][6]
# if terminated, only equals reward
for col in range(inputs.shape[1]-1):
inputs[i,col] = s_time_t
# Save the time and geohash in the inputs to the model
inputs[i,col+1] = s_geohash_t
state_t = np.array([[s_time_t, s_geohash_t]])
state_t1 = np.array([[s_time_t1,s_geohash_t1]])
targets[i] = self.model_mlp.predict(state_t)
# update entire row
Q_sa = self.model_mlp.predict(state_t1)
if terminal==1:
# The day ended, pick a new starting geohash and time
targets[i, action_t] = reward_t
else:
targets[i, action_t] = reward_t + GAMMA * np.max(Q_sa)
# exponential discounting for each memory
loss += self.model_mlp.train_on_batch(inputs, targets)
loss_list.append(loss)
if self.return_metrics is True:
# only record fares once we start training
total_fare_received += fare_t
total_fare_received_over_time.append(total_fare_received)
# print info
state = ""
if t <= OBSERVE:
state = "observe"
elif t > OBSERVE and t <= OBSERVE + EXPLORE:
state = "explore"
else:
state = "train"
if save_model is True:
if t % 1000 == 0:
print("Now we save model")
self.model_mlp.save_weights(args['save_model_weights'],
overwrite=True)
with open("model.json", "w") as outfile:
json.dump(self.model_mlp.to_json(), outfile)
if t % 500 == 0:
print("TIMESTEP", t, "/ STATE", state, \
"/ EPSILON", epsilon, "/ ACTION", action_index, "/ REWARD", r_t, \
"/ Q_MAX " , np.max(Q_sa), "/ Loss ", loss, "/ Total fare RL ",\
total_fare_received,
"/ Total fare naive", total_naive_fare)
now_time = time.time()
print('500 steps took {}'.format(now_time - start_time))
start_time = now_time
if t ==training_length: ### end training
if self.return_metrics == True and save_model == True:
print("Now we save model")
self.model_mlp.save_weights(args['save_model_weights'],
overwrite=True)
with open("model.json", "w") as outfile:
json.dump(self.model_mlp.to_json(), outfile)
return loss_list, total_fare_received_over_time, \
list_of_geohashes_visited, total_naive_fare_over_time,\
total_days_driven, list_of_naive_geohashes_visited
elif self.return_metrics == True:
return loss_list, total_fare_received_over_time, \
list_of_geohashes_visited, total_naive_fare_over_time,\
total_days_driven, list_of_naive_geohashes_visited
elif self.return_training_data ==True:
return self.training_data_X, self.training_data_y
elif save_model == True:
print("Now we save model")
self.model_mlp.save_weights(args['save_model_weights'],
overwrite=True)
with open("model.json", "w") as outfile:
json.dump(self.model_mlp.to_json(), outfile)
break
else: # something weird happened
break
# increment the state and time information
s_time = s_time1
s_geohash = s_geohash1
if self.return_metrics == True:
list_of_geohashes_visited.append(starting_geohash)
starting_geohash = new_geohash
# update the starting geohash in case we stay here
t = t + 1
def data_attributes(taxi_yellowcab_df):
"""Some random data objects needed to train the RL algorithm.
Includes a conversion from direction index (0-8) to a
direction (n,s,w,e,...etc). Therefore, we can use the
gh.neighbors attribute to find the geohashes associated with each
direction.
Also, has a dict for geohash : geohash_index
Contains a dict for geohash_index : geohash
Contains a list of all times
Contains a list of all unique geohashes"""
list_of_output_predictions_to_direction =\
{0:'nw',1:'n',2:'ne',3:'w',4:'stay',5:'e',6:'sw',7:'s',8:'se'}
list_of_unique_geohashes = taxi_yellowcab_df.geohash_pickup.unique()
list_of_geohash_index = defaultdict(int)
for idx,hash_n in enumerate(list_of_unique_geohashes):
list_of_geohash_index [hash_n] = idx
list_of_inverse_heohash_index = defaultdict(str)
for idx,hash_n in enumerate(list_of_unique_geohashes):
list_of_inverse_heohash_index[idx] = hash_n
hours = [str(_) for _ in range(24)]
minutes = [str(_) for _ in range(0,60,10)]
minutes.append('00')
list_of_time_index = []
for h in hours:
for m in minutes:
list_of_time_index.append(int(str(h)+str(m)))
list_of_time_index = list(set(list_of_time_index))
return list_of_output_predictions_to_direction, list_of_unique_geohashes, \
list_of_geohash_index, list_of_time_index , list_of_inverse_heohash_index
if __name__ =="__main__":
import gc; gc.collect()
with K.get_session(): # TF session
# open up the data
taxi_yellowcab_df, final_data_structure= make_dataset.main()
# the the data structures needed for the RL calss
list_of_output_predictions_to_direction, list_of_unique_geohashes, \
list_of_geohash_index, list_of_time_index,list_of_inverse_heohash_index\
= data_attributes(taxi_yellowcab_df)
arg = {'mode':'Run','save_model':True,'model_weights_load':'model_mlp_linear.h5',
'save_model_weights':'mlp_linear.h5'}
train_rl_taxi = RLNYCTaxiCab(list_of_unique_geohashes,list_of_time_index,list_of_geohash_index,\
list_of_inverse_heohash_index, final_data_structure, return_metrics=True)
if arg['save_model']==True:
loss_list, total_fare_received_over_time, list_of_geohashes_visited,\
naive_fare_over_time, days_driven, naive_geohashes \
=train_rl_taxi.trainNetworkNeuralNetworkTaxicab(arg, training_length=1000000,
return_training_data =False, save_model= True)
# save your metrics
with open('loss_over_time', 'wb') as fp:
pickle.dump(loss_list, fp)
with open('rl_total_fare_time','wb') as fp:
pickle.dump(total_fare_received_over_time, fp)
with open('naive_fare_time','wb') as fp:
pickle.dump(naive_fare_over_time, fp)
with open('total_day','wb') as fp:
pickle.dump(days_driven, fp)
else:
train_rl_taxi.trainNetworkNeuralNetworkTaxicab(arg, training_length=1000000,
return_training_data =False, save_model= False)
```
#### File: Natural_Language_Processing/code/Evaluation_Metrics_word2vec.py
```python
__author__='<NAME>'
from sklearn.metrics import accuracy_score
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
import re
import textblob
from textblob import TextBlob
import gensim
def accuracy_score_test(list_of_lists_of_queries,story_chunk_responses,item_keyword,type_of_evaluation):
"""Calculate the accuracy of return the correct response from each story chunk given the keyword of the item the user input"""
if type_of_evaluation=='tfidf':
number_correct=0
number_incorrect=0
incorrect = []
correct = []
for idx,query in enumerate(list_of_lists_of_queries):
tfidf= TfidfVectorizer() #create a model
tf_idf_story= tfidf.fit_transform(story_chunk_responses) ##tf-idf on the possible story chunks in storychunk_one
query_tfidf = tfidf.transform(query) ## turn the user sentence into a vector
cosine_sim = linear_kernel(tf_idf_story,query_tfidf).flatten() ## cosine similarity between the story chunks and user sentence
section = re.sub(r'(\t)','',story_chunk_responses[cosine_sim.argsort()[::-1][0]] )
if item_keyword in section:
number_correct+=1
correct.append(idx)
else:
number_incorrect+=1
incorrect.append(idx)
if number_incorrect !=0:
return 'The accuracy is',float(number_correct/(number_correct+number_incorrect)),'The query that failed was :',[list_of_lists_of_queries[i] for i in incorrect]
else:
return 'The accuracy is',float(number_correct/(number_correct+number_incorrect)),'The correct queries are',[list_of_lists_of_queries[i] for i in correct]
#return "The accuracy is :{}".format(number_correct/(number_incorrect+number_incorrect))
elif type_of_evaluation =='jaccard':
number_correct=0
number_incorrect=0
incorrect = []
correct = []
for query_idx,query in enumerate(list_of_lists_of_queries):
tokens = TextBlob(query[0]).tokens
query_set = set(tokens)
len_query = len(query_set)
top_score = 0
response_idx = 0
for idx,chunk in enumerate(story_chunk_responses):
story_tokens = TextBlob(chunk).tokens
jaccard_story= set(story_tokens) ##create a set of the words in the story
len_chunk = len(jaccard_story)
len_intersection = len(jaccard_story.intersection(query_set))
query_score = len_intersection / (len_query+len_chunk) ## jaccard similarity
if query_score > top_score:
top_score=query_score ## replace with the best new match
response_idx=idx
if item_keyword in story_chunk_responses[response_idx]:
number_correct+=1
correct.append(list_of_lists_of_queries[query_idx])
else:
number_incorrect +=1
incorrect.append(list_of_lists_of_queries[query_idx])
if number_incorrect !=0:
return 'The accuracy is',float(number_correct/(number_correct+number_incorrect)),'The query that failed was :',incorrect
else:
return 'The accuracy is',float(number_correct/(number_correct+number_incorrect)),'The correct queries are',correct
# See top related words from word2vec model
def word2vec(story_chunk,user_query):
"""Return the top related words to a user's query"""
try:
one = ''
for i in story_chunk:
one+=i
blob_one= textblob.TextBlob(one)
sent_one = [i.tokens.lower() for i in blob_one.sentences]
one = one.lower()
membership = blob_one.tokens.lower()
query_words_one = user_query[0].lower().split(' ')
final_query_one=[]
count_one = 0
### Check if the user input words are in our word2vec model, if not, we can not use them.
for input_one in query_words_one:
if input_one in membership:
final_query_one.append(input_one)
model_one = gensim.models.Word2Vec(sent_one, min_count=1)
print('The words most similar (in the given story chunk) to the following query words (vectors added together):',final_query_one)
return model_one.most_similar(positive=final_query_one)
except:
'There are no words in the story chunk that matches your input.'
```
#### File: Natural_Language_Processing/code/sentence_generator.py
```python
__author__ = '<NAME>'
#encoding utf-8
from textblob import TextBlob
from collections import Counter
from collections import defaultdict
import numpy as np
from sklearn.metrics.pairwise import linear_kernel
from sklearn.feature_extraction.text import TfidfVectorizer
import nltk
import re
# cond_prob_bigram / cond_prob_trigram code inspired by Brian Spiering
class SentenceGenerator():
"""Probabilistic language generation using a trigram model."""
def __init__(self,story_text):
blob_text = TextBlob(story_text)
text_tokens = blob_text.tokens
self.text_counter_unigram = Counter(text_tokens)
self.text_counter_bigram= Counter(nltk.bigrams(text_tokens))
self.text_counter_trigram= Counter(nltk.trigrams(text_tokens))
#Probability distributions for different text combinations
#This takes ~10 minutes to run
self.prob_dist_unigram = self.prob_distribution(self.text_counter_unigram)
self.prob_dist_bigram = self.prob_distribution(self.text_counter_bigram)
self.prob_dist_trigram = self.prob_distribution(self.text_counter_trigram)
def prob_distribution(self,counter):
"""Return the probability based upon a counter dictionary"""
return {k:value/sum(counter.values()) for k,value in counter.items()}
def cond_prob_bigram(self,end_word, first_word):
"""Conditional probability of word, given previous word. Predicting end word"""
#Inspiration from http://norvig.com/spell-correct.html
bigram = (first_word , end_word)
try: # might not have the keys for a bigram
if self.prob_dist_bigram[bigram] > 0 and self.prob_dist_unigram[first_word] > 0:
return self.prob_dist_bigram[bigram] / self.prob_dist_unigram[first_word]
except:
# Average the back-off value and zero. This is the smoothing
return self.prob_dist_unigram[end_word] / 2
def cond_prob_trigram(self,start,middle,end):
"""Find the conditional probability of a trigram model given the two previous words. Predicting the end word."""
#Inspiration from http://norvig.com/spell-correct.html
trigram = (start ,middle , end)
bigram = ( start,middle)
try: ## might not have the keys for a trigram
if self.prob_dist_trigram[trigram] > 0 and self.prob_dist_bigram[bigram] > 0 and self.prob_dist_unigram[start] >0:
return self.prob_dist_trigram[trigram] / self.prob_dist_bigram[bigram] ##return prob of trigram over first two words
except:
try: #might not find a bigram
if self.prob_dist_bigram[bigram] > 0 and self.prob_dist_unigram[start] >0: # Back off to bigram model
return self.prob_dist_bigram[bigram] / self.prob_dist_unigram[start]
except: # back off to unigram
#back off to unigram model (three words). This is the smoothing
return self.prob_dist_unigram[end] / 3
def sentence_generate(self,number_of_sentences):
"""Generate random sentences based upon the probabilities given the the probability_dict"""
number=0
generated_text=[]
character_counter = 0
#Find starting word of sentences using unigram proab
sentence_endings = ["?","!","."]
starting_chars = [item[1] for item in list(self.prob_dist_bigram.keys()) if item[0] in sentence_endings]
starting_chars_counter = Counter(starting_chars)
#find list of unigram probabilities for starting characters
starting_prob = self.prob_distribution(starting_chars_counter)
#Pick an initial starting character
start_char_index = np.random.choice([i for i in range(len(starting_prob.keys()))],1,p=list(starting_prob.values()))
generated_text.append(starting_chars[start_char_index])
while number !=number_of_sentences: #make sure we have this number of sentences. Keep generating sentences if not.
if len(generated_text)<3:
words_list = list(self.prob_dist_bigram.keys())
prev_character=generated_text[character_counter]
current_word_options = [i[1] for i in self.text_counter_bigram if i[0]==prev_character] #Find bigrams with prev char
prob_bigram_list = []
for curr_word in current_word_options:
prob_bigram_list.append(self.cond_prob_bigram(curr_word,prev_character))
# weighted choice algorithm
# http://stackoverflow.com/questions/22722079/choosing-elements-from-python-list-based-on-probability
# 1) pick random number between 0 and 1
# 2) walk through the list, subtracting each item from your number as your go
# 3 ) when you go to 0 or below, pick the current item
weight = np.random.random()
bigram_word_index = 0
for index,prob in enumerate(prob_bigram_list):
weight -=prob
if weight <0:
bigram_word_index=index
word = current_word_options[bigram_word_index]
generated_text.append(word)
character_counter+=1 ## go to the next character
elif len(generated_text)>2: ###trigram
words_list = list(self.prob_dist_trigram.keys()) ## list of all trigram
first_character=generated_text[character_counter] # find the previous word (one index away)
second_character=generated_text[character_counter-1] #find the previous word to the previous word
current_triword_options= []
prob_trigram_list = [] #list of conditional probabilities associated with the last word in the trigram
for i in self.text_counter_trigram:
if i[1]==first_character and i[0]==second_character: ##the first two words of our trigram
curr_word = i[2] #the current word to predict
prob_trigram_list.append(self.cond_prob_trigram(second_character,first_character,curr_word)) ##add prob
current_triword_options.append(curr_word) ##add the possible word to our list
if len(current_triword_options)==0: ## we do not have any options to continue the sentence.
break
weight = np.random.randint(1,sum(prob_trigram_list)*100000000)##need to change the weight because the triword options don't sum to 100%
weight = weight/100000000# back to probabilities
for index,prob in enumerate(prob_trigram_list):
weight -=prob
if weight <0: ## pick this as the word to add to the sentence
word = current_triword_options[index]
break
generated_text.append(word)
character_counter+=1 ## go to the next character
if word in (".","!","?"): ##end of the sentence
number+=1
first_word = generated_text[0][0].upper()+generated_text[0][1:]
sentence = first_word+' '
for index,word in enumerate(generated_text[1:]):
if generated_text[index] in (".","!","?"):
sentence +=word[0].upper()+word[1:]+' '
else:
sentence +=word +' '
return sentence
``` |
{
"source": "jonhilgart22/lifelines",
"score": 3
} |
#### File: lifelines/fitters/weibull_fitter.py
```python
from __future__ import print_function, division
import numpy as np
import pandas as pd
from numpy.linalg import solve, norm, inv
from lifelines.fitters import UnivariateFitter
from lifelines.utils import inv_normal_cdf
def _negative_log_likelihood(lambda_rho, T, E):
if np.any(lambda_rho < 0):
return np.inf
lambda_, rho = lambda_rho
return - np.log(rho * lambda_) * E.sum() - (rho - 1) * (E * np.log(lambda_ * T)).sum() + ((lambda_ * T) ** rho).sum()
def _lambda_gradient(lambda_rho, T, E):
lambda_, rho = lambda_rho
return - rho * (E / lambda_ - (lambda_ * T) ** rho / lambda_).sum()
def _rho_gradient(lambda_rho, T, E):
lambda_, rho = lambda_rho
return - E.sum() / rho - (np.log(lambda_ * T) * E).sum() + (np.log(lambda_ * T) * (lambda_ * T) ** rho).sum()
# - D/p - D Log[m t] + (m t)^p Log[m t]
def _d_rho_d_rho(lambda_rho, T, E):
lambda_, rho = lambda_rho
return (1. / rho ** 2 * E + (np.log(lambda_ * T) ** 2 * (lambda_ * T) ** rho)).sum()
# (D/p^2) + (m t)^p Log[m t]^2
def _d_lambda_d_lambda_(lambda_rho, T, E):
lambda_, rho = lambda_rho
return (rho / lambda_ ** 2) * (E + (rho - 1) * (lambda_ * T) ** rho).sum()
def _d_rho_d_lambda_(lambda_rho, T, E):
lambda_, rho = lambda_rho
return (-1. / lambda_) * (E - (lambda_ * T) ** rho - rho * (lambda_ * T) ** rho * np.log(lambda_ * T)).sum()
class WeibullFitter(UnivariateFitter):
"""
This class implements a Weibull model for univariate data. The model has parameterized
form:
S(t) = exp(-(lambda*t)**rho), lambda >0, rho > 0,
which implies the cumulative hazard rate is
H(t) = (lambda*t)**rho,
and the hazard rate is:
h(t) = rho*lambda(lambda*t)**(rho-1)
After calling the `.fit` method, you have access to properties like:
`cumulative_hazard_', 'survival_function_', 'lambda_' and 'rho_'.
"""
def fit(self, durations, event_observed=None, timeline=None, entry=None,
label='Weibull_estimate', alpha=None, ci_labels=None):
"""
Parameters:
duration: an array, or pd.Series, of length n -- duration subject was observed for
timeline: return the estimate at the values in timeline (postively increasing)
event_observed: an array, or pd.Series, of length n -- True if the the death was observed, False if the event
was lost (right-censored). Defaults all True if event_observed==None
entry: an array, or pd.Series, of length n -- relative time when a subject entered the study. This is
useful for left-truncated observations, i.e the birth event was not observed.
If None, defaults to all 0 (all birth events observed.)
label: a string to name the column of the estimate.
alpha: the alpha value in the confidence intervals. Overrides the initializing
alpha for this call to fit only.
ci_labels: add custom column names to the generated confidence intervals
as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<alpha>
Returns:
self, with new properties like `cumulative_hazard_', 'survival_function_', 'lambda_' and 'rho_'.
"""
self.durations = np.asarray(durations, dtype=float)
# check for negative or 0 durations - these are not allowed in a weibull model.
if np.any(self.durations <= 0):
raise ValueError('This model does not allow for non-positive durations. Suggestion: add a small positive value to zero elements.')
self.event_observed = np.asarray(event_observed, dtype=int) if event_observed is not None else np.ones_like(self.durations)
self.timeline = np.sort(np.asarray(timeline)) if timeline is not None else np.arange(int(self.durations.min()), int(self.durations.max()) + 1)
self._label = label
alpha = alpha if alpha is not None else self.alpha
# estimation
self.lambda_, self.rho_ = self._newton_rhaphson(self.durations, self.event_observed)
self.survival_function_ = pd.DataFrame(self.survival_function_at_times(self.timeline), columns=[self._label], index=self.timeline)
self.hazard_ = pd.DataFrame(self.hazard_at_times(self.timeline), columns=[self._label], index=self.timeline)
self.cumulative_hazard_ = pd.DataFrame(self.cumulative_hazard_at_times(self.timeline), columns=[self._label], index=self.timeline)
self.confidence_interval_ = self._bounds(alpha, ci_labels)
self.median_ = 1. / self.lambda_ * (np.log(2)) ** (1. / self.rho_)
# estimation functions - Cumulative hazard takes priority.
self.predict = self._predict("cumulative_hazard_", self._label)
self.subtract = self._subtract("cumulative_hazard_")
self.divide = self._divide("cumulative_hazard_")
# plotting - Cumulative hazard takes priority.
self.plot = self._plot_estimate("cumulative_hazard_")
self.plot_cumulative_hazard = self.plot
return self
def hazard_at_times(self, times):
return self.lambda_ * self.rho_ * (self.lambda_ * times) ** (self.rho_ - 1)
def survival_function_at_times(self, times):
return np.exp(-self.cumulative_hazard_at_times(times))
def cumulative_hazard_at_times(self, times):
return (self.lambda_ * times) ** self.rho_
def _newton_rhaphson(self, T, E, precision=1e-5):
from lifelines.utils import _smart_search
def jacobian_function(parameters, T, E):
return np.array([
[_d_lambda_d_lambda_(parameters, T, E), _d_rho_d_lambda_(parameters, T, E)],
[_d_rho_d_lambda_(parameters, T, E), _d_rho_d_rho(parameters, T, E)]
])
def gradient_function(parameters, T, E):
return np.array([_lambda_gradient(parameters, T, E), _rho_gradient(parameters, T, E)])
# initialize the parameters. This shows dramatic improvements.
parameters = _smart_search(_negative_log_likelihood, 2, T, E)
iter = 1
step_size = 1.
converging = True
while converging and iter < 50:
# Do not override hessian and gradient in case of garbage
j, g = jacobian_function(parameters, T, E), gradient_function(parameters, T, E)
delta = solve(j, - step_size * g.T)
if np.any(np.isnan(delta)):
raise ValueError("delta contains nan value(s). Convergence halted.")
parameters += delta
# Save these as pending result
jacobian = j
if norm(delta) < precision:
converging = False
iter += 1
self._jacobian = jacobian
return parameters
def _bounds(self, alpha, ci_labels):
alpha2 = inv_normal_cdf((1. + alpha) / 2.)
df = pd.DataFrame(index=self.timeline)
var_lambda_, var_rho_ = inv(self._jacobian).diagonal()
def _dH_d_lambda(lambda_, rho, T):
return rho / lambda_ * (lambda_ * T) ** rho
def _dH_d_rho(lambda_, rho, T):
return np.log(lambda_ * T) * (lambda_ * T) ** rho
def sensitivity_analysis(lambda_, rho, var_lambda_, var_rho_, T):
return var_lambda_ * _dH_d_lambda(lambda_, rho, T) ** 2 + var_rho_ * _dH_d_rho(lambda_, rho, T) ** 2
std_cumulative_hazard = np.sqrt(sensitivity_analysis(self.lambda_, self.rho_, var_lambda_, var_rho_, self.timeline))
if ci_labels is None:
ci_labels = ["%s_upper_%.2f" % (self._label, alpha), "%s_lower_%.2f" % (self._label, alpha)]
assert len(ci_labels) == 2, "ci_labels should be a length 2 array."
df[ci_labels[0]] = self.cumulative_hazard_at_times(self.timeline) + alpha2 * std_cumulative_hazard
df[ci_labels[1]] = self.cumulative_hazard_at_times(self.timeline) - alpha2 * std_cumulative_hazard
return df
def _compute_standard_errors(self):
var_lambda_, var_rho_ = inv(self._jacobian).diagonal()
return pd.DataFrame([[np.sqrt(var_lambda_), np.sqrt(var_rho_)]],
index=['se'], columns=['lambda_', 'rho_'])
def _compute_confidence_bounds_of_parameters(self):
se = self._compute_standard_errors().loc['se']
alpha2 = inv_normal_cdf((1. + self.alpha) / 2.)
return pd.DataFrame([
np.array([self.lambda_, self.rho_]) + alpha2 * se,
np.array([self.lambda_, self.rho_]) - alpha2 * se,
], columns=['lambda_', 'rho_'], index=['upper-bound', 'lower-bound'])
@property
def summary(self):
"""Summary statistics describing the fit.
Set alpha property in the object before calling.
Returns
-------
df : pd.DataFrame
Contains columns coef, exp(coef), se(coef), z, p, lower, upper"""
lower_upper_bounds = self._compute_confidence_bounds_of_parameters()
df = pd.DataFrame(index=['lambda_', 'rho_'])
df['coef'] = [self.lambda_, self.rho_]
df['se(coef)'] = self._compute_standard_errors().loc['se']
df['lower %.2f' % self.alpha] = lower_upper_bounds.loc['lower-bound']
df['upper %.2f' % self.alpha] = lower_upper_bounds.loc['upper-bound']
return df
def print_summary(self):
"""
Print summary statistics describing the fit.
"""
df = self.summary
# Print information about data first
print('n={}, number of events={}'.format(self.durations.shape[0],
np.where(self.event_observed)[0].shape[0]),
end='\n\n')
print(df.to_string(float_format=lambda f: '{:.3e}'.format(f)))
return
``` |
{
"source": "jonhillmtl/passman",
"score": 3
} |
#### File: passman/passman/commands.py
```python
from .repo import Repo, RepoAlreadyExistsError, RepoNotFoundError
from .vault import (Vault, VaultNotFoundError, VaultAlreadyExistsError,
VaultWrongPasswordError, VaultEntryAlreadyExistsError, VaultWeakPasswordError)
from .utils import error_exit, smart_choice
from termcolor import colored
import pprint
import pyperclip
def add_vault_entry(args):
vault = Vault(args.vault_name, args.vault_password)
try:
entry = vault.add_entry(name=args.name, username=args.username, password=<PASSWORD>)
entry['password'] = '<<PASSWORD>>'
vault.add_history_entry('add_entry', entry, entry['timestamp'])
except VaultEntryAlreadyExistsError:
error_exit("entry for {} ({}) already exists. try update_vault_entry instead".format(
args.name,
args.username
))
def change_vault_password(args):
vault = Vault(args.name, args.old_password)
try:
vault.change_password(args.new_password)
except VaultWeakPasswordError as e:
error_exit("the password is too weak: {}".format(e.error))
def create_vault(args):
vault = Vault(args.vault_name, args.vault_password)
try:
vault.create()
vault.add_history_entry('create_vault', None, None)
# TODO JHILL: write cached password to cache file
except VaultAlreadyExistsError as e:
error_exit("vault already exists")
except RepoNotFoundError as e:
error_exit("repo does not exist")
except VaultWeakPasswordError as e:
error_exit("the password is too weak: {}".format(e.error))
def delete_vault_entry(args):
vault = Vault(args.vault_name, args.vault_password)
vault_data = vault.read()
entry_id = smart_choice(
[
dict(
choice_data=entry['id'],
description="{}: {} {} {}".format(
entry['id'],
entry['name'],
entry['username'],
entry['password']
)
) for entry in vault_data['entries']]
)
if entry_id != -1:
vault.delete_entry(entry_id)
vault.add_history_entry('delete_vault_entry', entry_id, None)
def dump_vault(args):
vault = Vault(args.vault_name, args.vault_password)
vault.add_history_entry('dump_vault', None, None)
pprint.pprint(vault.read())
def init(args):
repo = Repo()
try:
repo.init()
print(colored("created repo", "green"))
except RepoAlreadyExistsError as e:
error_exit("repo already exists")
def list_vaults(args):
repo = Repo()
if repo.exists:
print(repo.vaults)
else:
error_exit("no repo exists")
def merge_vaults(args):
target = Vault(args.v1, args.v1pw)
source = Vault(args.v2, args.v2pw)
target.merge_vault(source)
# TODO JHILL: add_history_entry
def password(args):
vault = Vault(args.vault_name, args.vault_password)
vault_data = vault.read()
matches = []
if args.search == '':
matches= vault_data['entries']
else:
for entry in vault_data['entries']:
if args.search.lower() in entry['name'].lower():
matches.append(entry)
if len(matches) == 0:
error_exit("no matches found for {}".format(args.search))
entry_id = smart_choice(
[
dict(
choice_data=index,
description="{} {}".format(
entry['name'],
entry['username']
)
) for (index, entry) in enumerate(matches)
]
)
if entry_id != -1:
pyperclip.copy(matches[entry_id]['password'])
print(colored("copied to clipboard", "green"))
vault.add_history_entry(
'password',
dict(
entry_id=matches[entry_id]['id'],
search=args.search
)
)
def security_audit(args):
vault = Vault(args.vault_name, args.vault_password)
# TODO JHILL: use entry audits
password_audits, entry_audits = vault.security_audit()
secure = True
for password, data in password_audits.items():
if len(data['entries']) > 1:
secure = False
print("{} accounts have the same password: {}".format(
len(data['entries']),
", ".join("{} ({})".format(e['name'], e['username']) for e in data['entries'])
))
if data['password_secure'] is False:
secure = False
print("{} accounts have weak passwords: {}".format(
len(data['entries']),
", ".join("{} ({})".format(e['name'], e['username']) for e in data['entries'])
))
# TODO JHILL: check the age of the password by their timestamp!!!
if secure is True:
print(colored("secure", "green"))
vault.add_history_entry('security_audit', None, None)
def update_vault_entry(args):
vault = Vault(args.vault_name, args.vault_password)
vault_data = vault.read()
entry_id = smart_choice([
dict(
choice_data=entry['id'],
description="{}: {} {} {}".format(
entry['id'],
entry['name'],
entry['username'],
entry['password']
)
) for entry in vault_data['entries']
])
if entry_id != -1:
entry = vault.update_entry(entry_id, name=args.name, username=args.username, password=args.password)
entry['password'] = '<<PASSWORD>>'
vault.add_history_entry('update_vault_entry', entry, None)
``` |
{
"source": "JonHolman/access-undenied-aws",
"score": 2
} |
#### File: access-undenied-aws/access_undenied_aws/cli.py
```python
from __future__ import annotations
import logging
from typing import IO
import boto3
import click
import click_log
import colorlog
import json
from access_undenied_aws import analysis
from access_undenied_aws import common
from access_undenied_aws import logger
from access_undenied_aws import organizations
def _initialize_logger() -> None:
click_log.basic_config(logger)
root_handler = logger.handlers[0]
formatter = colorlog.ColoredFormatter(
"%(log_color)s[%(asctime)s,%(msecs)d %(levelname)-8s"
" %(filename)s:%(lineno)d - %(funcName)20s()]%(reset)s"
" %(white)s%(message)s",
datefmt="%H:%M:%S",
reset=True,
log_colors={
"DEBUG": "blue",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red",
},
)
root_handler.setFormatter(formatter)
def initialize_config_from_user_input(
config: common.Config,
output_file: IO[str],
management_account_role_arn: str,
suppress_output: bool,
cross_account_role_name: str,
) -> None:
config.cross_account_role_name = cross_account_role_name
config.management_account_role_arn = management_account_role_arn
if logger.level == logging.NOTSET:
logger.setLevel(logging.INFO)
config.output_file = output_file
config.suppress_output = suppress_output
_initialize_logger()
pass_config = click.make_pass_decorator(common.Config, ensure=True)
@click.group()
@click_log.simple_verbosity_option(logger)
@click.option(
"--profile",
help="the AWS profile to use (default is default profile)",
default=None,
)
@pass_config
def access_undenied_aws(config: common.Config, profile: str) -> None:
"""
Parses AWS AccessDenied CloudTrail events, explains the reasons for them, and offers actionable fixes.
"""
config.session = boto3.Session(profile_name=profile)
config.account_id = config.session.client("sts").get_caller_identity()["Account"]
config.iam_client = config.session.client("iam")
@access_undenied_aws.command()
@click.option(
"--events-file",
help="input file of CloudTrail events",
required=True,
type=click.File("r"),
)
@click.option(
"--scp-file",
help="Service control policy data file generated by the get_scps command.",
default=None,
type=click.File("r"),
)
@click.option(
"--management-account-role-arn",
help=(
"a cross-account role in the management account of the organization "
"that must be assumable by your credentials."
),
default=None,
)
@click.option(
"--cross-account-role-name",
help=(
"The name of the cross-account role for AccessUndenied to assume."
" default: AccessUndeniedRole"
),
default="AccessUndeniedRole",
)
@click.option(
"--output-file",
help="output file for results (default: no output to file)",
default=None,
type=click.File("w"),
)
@click.option(
"--suppress-output/--no-suppress-output",
help="should output to stdout be suppressed (default: not suppressed)",
default=False,
)
@pass_config
def analyze(
config: common.Config,
events_file: click.File,
scp_file: IO[str],
management_account_role_arn: str,
cross_account_role_name: str,
output_file: IO[str],
suppress_output: bool,
) -> None:
"""
Analyzes AWS CloudTrail events and explains the reasons for AccessDenied
"""
initialize_config_from_user_input(
config,
output_file,
management_account_role_arn,
suppress_output,
cross_account_role_name,
)
organizations.initialize_organization_data(config, scp_file.read() if scp_file else None)
analysis.analyze_cloudtrail_events(config, events_file)
@access_undenied_aws.command()
@click.option(
"--output-file",
help="output file for scp data (default: scp_data.json)",
default="scp_data.json",
type=click.File("w"),
)
@pass_config
def get_scps(
config: common.Config,
output_file: IO[str],
) -> None:
"""
Writes the organization's SCPs and organizational tree to a file
"""
logger.info("Gathering Service Control Policy data...")
organizations.initialize_organization_data(config, None)
json.dump(config.organization_nodes, output_file, default=vars, indent=2)
logger.info(f"Finished writing Service Control Policy data to {output_file.name}.")
```
#### File: lambda-cdk/fns/lambda_handler.py
```python
import sys
import boto3
from os import environ
import access_undenied_aws
import access_undenied_aws.analysis
import access_undenied_aws.cli
import access_undenied_aws.common
import access_undenied_aws.organizations
ACCESS_UNDENIED_ROLE = "accessUndenied"
ACCOUNT = "123456789012"
client = boto3.client('sns')
def lambda_handler(event, context):
config = access_undenied_aws.common.Config()
config.session = boto3.Session()
config.account_id = config.session.client("sts").get_caller_identity()["Account"]
config.iam_client = config.session.client("iam")
access_undenied_aws.cli.initialize_config_from_user_input(
config=config,
cross_account_role_name=(ACCESS_UNDENIED_ROLE),
management_account_role_arn=(f"arn:aws:iam::{ACCOUNT}:role/{ACCESS_UNDENIED_ROLE}"),
output_file=sys.stdout,
suppress_output=True)
access_undenied_aws.organizations.initialize_organization_data(
config=config,
scp_file_content=''
)
result = access_undenied_aws.analysis.analyze(config, event.get("detail", event))
client.publish(TargetArn=environ['SNS_TOPIC_ARN'], Message=str(result))
return {
'statusCode': 200,
'body': str(result)
}
```
#### File: python/access_undenied_aws/simulate_custom_policy_context_generator.py
```python
from __future__ import annotations
from typing import (
Dict,
List,
Callable,
Sequence,
TYPE_CHECKING,
Optional,
Iterable,
)
import boto3
import botocore.exceptions
from access_undenied_aws import event
from access_undenied_aws import event_permission_data
from access_undenied_aws import logger
if TYPE_CHECKING:
from mypy_boto3_iam.type_defs import ContextEntryTypeDef
else:
ContextEntryTypeDef = object
class SimulateCustomPolicyContextGenerator(object):
def __init__(
self,
session: boto3.Session,
event_permission_data_: event_permission_data.EventPermissionData,
cloudtrail_event_: event.Event,
):
self.session = session
self.iam_client = session.client("iam")
self.event_permission_data = event_permission_data_
self.cloudtrail_event = cloudtrail_event_
def _get_aws_event_time(self) -> Optional[ContextEntryTypeDef]:
return {
"ContextKeyName": "aws:CurrentTime",
"ContextKeyValues": (self.cloudtrail_event.event_time,),
"ContextKeyType": "string",
}
def _get_aws_principal_arn(self) -> Optional[ContextEntryTypeDef]:
return {
"ContextKeyName": "aws:PrincipalArn",
"ContextKeyValues": (self.event_permission_data.principal.arn,),
"ContextKeyType": "string",
}
def _get_aws_principal_arn_caps(self) -> Optional[ContextEntryTypeDef]:
context_entry = {
"ContextKeyName": "aws:PrincipalARN",
"ContextKeyValues": (self.event_permission_data.principal.arn,),
"ContextKeyType": "string",
}
return context_entry
def _get_aws_principal_tag(self, tag_key: str) -> Optional[ContextEntryTypeDef]:
principal_tags = []
try:
if self.event_permission_data.principal.type == "AssumedRole":
principal_tags = self.iam_client.list_role_tags(
RoleName=(self.event_permission_data.principal.name)
)["Tags"]
elif self.event_permission_data.principal.type == "IAMUser":
principal_tags = self.iam_client.list_user_tags(
UserName=(self.event_permission_data.principal.name)
)["Tags"]
except botocore.exceptions.ClientError as list_tags_error:
logger.error(
f"[Error:{repr(list_tags_error)}] when getting" " aws:PrincipalTag value"
)
return None
for tag in principal_tags:
if tag["Key"] == tag_key:
return {
"ContextKeyName": f"aws:PrincipalTag/{tag_key}",
"ContextKeyValues": (tag["Value"],),
"ContextKeyType": "string",
}
return None
def _get_aws_requested_region(self) -> Optional[ContextEntryTypeDef]:
if not self.cloudtrail_event.region:
return None
return {
"ContextKeyName": "aws:RequestedRegion",
"ContextKeyValues": (self.cloudtrail_event.region,),
"ContextKeyType": "string",
}
def _get_aws_service_name(self) -> Optional[ContextEntryTypeDef]:
if not self.cloudtrail_event.event_source:
return None
return {
"ContextKeyName": "iam:AWSServiceName",
"ContextKeyValues": (self.cloudtrail_event.event_source,),
"ContextKeyType": "string",
}
def _get_aws_source_ip(self) -> Optional[ContextEntryTypeDef]:
if not self.cloudtrail_event.source_ip_address:
return None
return {
"ContextKeyName": "aws:SourceIp",
"ContextKeyValues": (self.cloudtrail_event.source_ip_address,),
"ContextKeyType": "string",
}
def _get_aws_source_vpce(self) -> Optional[ContextEntryTypeDef]:
if not self.cloudtrail_event.vpc_endpoint_id:
return None
return {
"ContextKeyName": "aws:sourceVpce",
"ContextKeyValues": (self.cloudtrail_event.vpc_endpoint_id,),
"ContextKeyType": "string",
}
def _get_aws_username(self) -> Optional[ContextEntryTypeDef]:
return {
"ContextKeyName": "aws:username",
"ContextKeyValues": (self.event_permission_data.principal.name,),
"ContextKeyType": "string",
}
def generate_context(
self, context_keys: Iterable[str]
) -> Sequence[ContextEntryTypeDef]:
context_entries = []
for context_key in context_keys:
context_generation_result = None
if (
context_key in SimulateCustomPolicyContextGenerator.KEY_FUNCTION_DICT
or context_key
in SimulateCustomPolicyContextGenerator.KEY_WITH_SUBKEY_FUNCTION_DICT
):
context_generation_result = (
SimulateCustomPolicyContextGenerator.KEY_FUNCTION_DICT[context_key](
self
)
)
elif (
context_key
in SimulateCustomPolicyContextGenerator.KEY_WITH_SUBKEY_FUNCTION_DICT
and "/" in context_key
):
subkey = context_key.split("/", 1)[1]
context_generation_result = (
SimulateCustomPolicyContextGenerator.KEY_WITH_SUBKEY_FUNCTION_DICT[
context_key
](self, subkey)
)
if context_generation_result:
context_entries.append(context_generation_result)
else:
logger.warning(
"Unable to find value for condition context key"
f" [context_key: {context_key}]"
)
return context_entries
KEY_FUNCTION_DICT: Dict[
str,
Callable[
[SimulateCustomPolicyContextGenerator],
Optional[ContextEntryTypeDef],
],
] = {
"aws:username": _get_aws_username,
"aws:CurrentTime": _get_aws_event_time,
"aws:PrincipalArn": _get_aws_principal_arn,
"aws:PrincipalARN": _get_aws_principal_arn_caps,
"aws:SourceVpce": _get_aws_source_vpce,
"aws:SourceIp": _get_aws_source_ip,
"aws:RequestedRegion": _get_aws_requested_region,
"iam:AWSServiceName": _get_aws_service_name,
}
KEY_WITH_SUBKEY_FUNCTION_DICT: Dict[
str,
Callable[
[SimulateCustomPolicyContextGenerator, str],
Optional[ContextEntryTypeDef],
],
] = {
"aws:PrincipalTag": _get_aws_principal_tag,
}
```
#### File: python/access_undenied_aws/simulate_custom_policy_helper.py
```python
import copy
import json
import re
from typing import (
List,
Sequence,
Optional,
TYPE_CHECKING,
Dict,
Any,
Set,
Iterable,
)
from access_undenied_aws import simulate_custom_policy_result_analyzer
from access_undenied_aws import (
event_permission_data,
iam_utils,
event,
iam_policy_data,
common,
results,
utils,
logger,
)
from access_undenied_aws.iam_policy_data import IamPolicyData
if TYPE_CHECKING:
from mypy_boto3_iam import IAMClient
from mypy_boto3_iam.type_defs import (
ContextEntryTypeDef,
SimulateCustomPolicyRequestRequestTypeDef,
)
else:
ContextEntryTypeDef = object
IAMClient = object
SimulateCustomPolicyRequestRequestTypeDef = object
def _add_resource_field(resource_policy, resource_arn) -> str:
"""
Policy simulator requires resource policies to have the
resource field explicitly stated. That is not the case for all resource
policies (e.g. IAM Trust Policies)
"""
resource_policy_dict = json.loads(resource_policy)
for statement in resource_policy_dict.get("Statement", []):
statement.pop("NotResource", None)
statement["Resource"] = resource_arn
return json.dumps(resource_policy_dict)
def _get_context_keys_for_custom_policy(
policy_input_list: List[str],
) -> Set[str]:
context_keys = set()
for policy_document in policy_input_list:
for statement in json.loads(policy_document).get("Statement", []):
for _, condition_type_map in statement.get("Condition", {}).items():
for context_key in condition_type_map.keys():
context_keys.add(context_key)
return context_keys
def _simulate_custom_policy(
iam_client: IAMClient,
cloudtrail_event_: event.Event,
event_permission_data_: event_permission_data.EventPermissionData,
iam_policy_data_: iam_policy_data.IamPolicyData,
guardrail_policy: Optional[common.Policy],
simulate_custom_policy_arguments_template: SimulateCustomPolicyRequestRequestTypeDef,
) -> Optional[results.AnalysisResult]:
if guardrail_policy:
simulate_custom_policy_arguments = copy.copy(
simulate_custom_policy_arguments_template
)
simulate_custom_policy_arguments["PermissionsBoundaryPolicyInputList"] = [
guardrail_policy.policy_document
]
else:
simulate_custom_policy_arguments = simulate_custom_policy_arguments_template
simulate_custom_policy_response = iam_client.simulate_custom_policy(
**simulate_custom_policy_arguments
)["EvaluationResults"][0]
if simulate_custom_policy_response["EvalDecision"] in [
"explicitDeny",
"implicitDeny",
]:
return simulate_custom_policy_result_analyzer.SimulateCustomPolicyResultAnalyzer(
simulate_custom_policy_request=simulate_custom_policy_arguments,
simulate_custom_policy_response=simulate_custom_policy_response,
event_=cloudtrail_event_,
event_permission_data_=event_permission_data_,
iam_policy_data_=iam_policy_data_,
guardrail_policy=guardrail_policy,
).analyze()
return None
def generate_context_key_list_for_simulate_custom_policy(
iam_policy_data_: IamPolicyData, iam_client: IAMClient
) -> Iterable[str]:
policy_input_list = [
identity_policy.policy_document
for identity_policy in iam_policy_data_.identity_policies
] + [
boundary_policy.policy_document
for boundary_policy in iam_policy_data_.guardrail_policies
]
if iam_policy_data_.resource_policy:
policy_input_list.append(iam_policy_data_.resource_policy.policy_document)
return _get_context_keys_for_custom_policy(policy_input_list)
def generate_simulate_custom_policy_request(
iam_policy_data_: IamPolicyData,
event_permission_data_: event_permission_data.EventPermissionData,
context: Sequence[ContextEntryTypeDef],
) -> SimulateCustomPolicyRequestRequestTypeDef:
simulate_custom_policy_request = {
"PolicyInputList": [
policy.policy_document for policy in iam_policy_data_.identity_policies
],
"ActionNames": (event_permission_data_.iam_permission,),
"ResourceOwner": get_resource_owner_parameter_from_account_arn(
resource_arn=event_permission_data_.resource.arn,
resource_account_id=event_permission_data_.resource.account_id,
iam_permission=event_permission_data_.iam_permission,
),
"CallerArn": iam_policy_data_.caller_arn_placeholder,
"ResourceArns": (event_permission_data_.resource.arn,),
"ContextEntries": context,
}
if iam_policy_data_.resource_policy:
# We can only perform one principal replacement,
# but the principal parameter in the resource policy can be
# arn:aws:iam::account:role/role-name or it can be
# arn:aws:sts::account:assumed-role/role-name/role-session-name
# We need to find out which principal, if any,
# is used in the resource policy. :(
iam_policy_data_.caller_arn = (
event_permission_data_.principal.session_name
if event_permission_data_.principal.session_name
in iam_policy_data_.resource_policy.policy_document
else event_permission_data_.principal.arn
)
simulate_custom_policy_request[
"ResourcePolicy"
] = iam_utils.replace_principal_in_policy(
original_principal=iam_policy_data_.caller_arn,
replacement_principal=iam_policy_data_.caller_arn_placeholder,
policy=_add_resource_field(
iam_policy_data_.resource_policy.policy_document,
event_permission_data_.resource.arn,
),
)
return simulate_custom_policy_request
def get_resource_owner_parameter_from_account_arn(
resource_arn: str,
resource_account_id: str,
iam_permission: str,
) -> str:
arn_match = re.match(common.RESOURCE_ARN_PATTERN, resource_arn)
if (
utils.get_regex_match_group_or_none(arn_match, "resource_type") == "key"
or "AssumeRole" in iam_permission
):
logger.debug(
"IAM Role trust policies and KMS Key Policies are"
" anomalous and are evaluated like cross-account"
" policies."
" Listing placeholder account id 123456789012..."
)
return f"arn:aws:iam::123456789012:root"
return f"arn:aws:iam::{resource_account_id}:root"
def simulate_custom_policies(
iam_client: IAMClient,
cloudtrail_event_: event.Event,
event_permission_data_: event_permission_data.EventPermissionData,
iam_policy_data_: iam_policy_data.IamPolicyData,
simulate_custom_policy_arguments_base: SimulateCustomPolicyRequestRequestTypeDef,
) -> Optional[results.AnalysisResult]:
for guardrail_policy in iam_policy_data_.guardrail_policies or [None]:
deny_result = _simulate_custom_policy(
iam_client,
cloudtrail_event_,
event_permission_data_,
iam_policy_data_,
guardrail_policy,
simulate_custom_policy_arguments_base,
)
if deny_result:
return deny_result
raise common.AccessUndeniedError(
message="AccessUndenied could not find a reason for AccessDenied.",
access_denied_reason=common.AccessDeniedReason.ALLOWED,
)
``` |
{
"source": "jonhoo/django-coex",
"score": 2
} |
#### File: jonhoo/django-coex/check-symex-zoobar.py
```python
verbose = 1
import os
import re
import symex.fuzzy as fuzzy
import time
# NOTE(jon): This needs to come before we start the rewriter
cov = None
import sys
if len(sys.argv) > 1 and sys.argv[-1] == '-c':
from coverage import coverage
cov = True
from symex.symdjango import SymDjango, post_data
import symex.symeval
settings = "zoobar.settings"
appviews = {
"zapp.views.index": (lambda p: p == "/"),
"zapp.views.users": (lambda p: p == "users/"),
"zapp.views.transfer": (lambda p: p == "transfer/"),
"zlogio.views.login": (lambda p: p == "accounts/login/"),
"zlogio.views.logout": (lambda p: p == "accounts/logout/")
#"url.parameter.example": (lambda p: (p == "/", {name: "this"}))
}
appdir = os.path.abspath(os.path.dirname(__file__) + '/app')
d = SymDjango(settings, appdir, appviews)
if cov is not None:
cov = coverage(auto_data = True, source = [os.path.realpath(appdir)])
from zapp.models import Person, Transfer
from django.contrib.auth.models import User
from symex.symqueryset import AllSymQuerySet, SQLSymQuerySet, MutationSymQuerySet
d.setup_models([
{'model': User, 'queryset': AllSymQuerySet},
{'model': Person, 'queryset': AllSymQuerySet},
{'model': Transfer, 'queryset': AllSymQuerySet}
])
# Only safe to load now that it's been patched and added to import path
import zoobar
def report_balance_mismatch():
print("WARNING: Balance mismatch detected")
def report_zoobar_theft():
print("WARNING: Zoobar theft detected")
def adduser(username):
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
u = User.objects.create_user(username, '', 'password')
u.save()
return u
# TODO(jon): This currently only test single-request actions
def test_stuff():
method = fuzzy.mk_str('method')
if not method == 'get' and not method == 'post':
return
req = d.new()
from django.contrib.auth.models import User
User.objects.all().delete()
alice = adduser('alice')
bob = adduser('bob')
balance1 = sum([u.person.zoobars for u in User.objects.all()])
from zapp.models import Transfer
Transfer.objects.all().delete()
#User.objects.get(username = 'alice')
## In two cases, we over-restrict the inputs in order to reduce the
## number of paths that "make check" explores, so that it finishes
## in a reasonable amount of time. You could pass unconstrained
## concolic values for both REQUEST_METHOD and PATH_INFO, but then
## zoobar generates around 2000 distinct paths, and that takes many
## minutes to check.
path = fuzzy.mk_str('path') + '/'
if path[0] == '/':
return
data = {}
if method == 'post':
if path == 'transfer/':
data = post_data(
zoobars = fuzzy.mk_int('transfer.zoobars'),
recipient = fuzzy.mk_str('transfer.recipient')
)
logged_in = False
user = fuzzy.mk_str('user')
if user == 'alice' or user == 'bob':
if verbose > 0:
print('==> accessing %s as %s' % (path, user))
if user == 'alice':
req.login(username='alice', password='password')
elif user == 'bob':
req.login(username='bob', password='password')
logged_in = True
else:
if verbose > 0:
print('==> accessing %s anonymously' % path)
if cov is not None:
cov.start()
response = None
if method == 'get':
response = req.get(path)
elif method == 'post':
response = req.post(path, data=data)
if cov is not None:
cov.stop()
cov.save()
if verbose == 1 and response.status_code == 404:
print(" -> 404 not found...")
elif verbose == 1:
print(' -> %d %s' % (response.status_code, response.reason_phrase))
elif verbose > 1:
print(' -> %d %s\n -> %s' % (
response.status_code,
response.reason_phrase,
response.items())
)
if verbose > 2 or response.status_code == 500:
print(80 * "-")
print(re.sub("^", "\t", response.content))
print(80 * "-")
if logged_in and path == "transfer/":
if verbose > 0:
if "Log out" in response.content:
print(" -> login works. that's nice.")
else:
print(" -> login doesn't work :(")
if method == "post":
if "warning" in response.content:
if verbose > 0:
# success is also notified using a warning span
wtext = re.search('<span class="warning">([^<]*)</span>', response.content).group(1)
print(" -> transfer warning: %s" % wtext)
else:
print(" -> NO TRANSFER WARNING?!")
print(80 * "-")
print(re.sub("^", "\t", response.content))
print(80 * "-")
if User.objects.all().count() == 2:
balance2 = sum([u.person.zoobars for u in User.objects.all()])
if balance1 != balance2:
report_balance_mismatch()
utransfers = [t.sender.user.username for t in Transfer.objects.all()]
for p in User.objects.all():
if p.username not in utransfers:
if p.person.zoobars < 10:
report_zoobar_theft()
# technically, this check could be fooled if an attacker could insert
# rows into the transfer db. Instead, we should keep a log of all
# requests, and which user the request was issued as, but this seems
# outside the scope of the exercise?
start = time.time()
fuzzy.concolic_test(test_stuff, maxiter=2000, v=verbose,
uniqueinputs = True,
removeredundant = True,
usecexcache = True)
end = time.time()
print "%.2f seconds" %(end-start)
if cov is not None:
print "Coverage report stored in covhtml/"
cov.html_report(directory = 'covhtml')
os.remove('.coverage')
```
#### File: django-coex/symex/fuzzy.py
```python
import z3str
import z3
import multiprocessing
import sys
import collections
import Queue
import signal
import operator
import inspect
import __builtin__
## Our AST structure
class sym_ast(object):
def __str__(self):
return str(self._z3expr(True))
class sym_func_apply(sym_ast):
def __init__(self, *args):
for a in args:
if not isinstance(a, sym_ast):
raise Exception("Passing a non-AST node %s %s as argument to %s" % \
(a, type(a), type(self)))
self.args = args
def __eq__(self, o):
if type(self) != type(o):
return False
if len(self.args) != len(o.args):
return False
return all(sa == oa for (sa, oa) in zip(self.args, o.args))
def __hash__(self):
return reduce(operator.xor, [hash(a) for a in self.args], 0)
class sym_unop(sym_func_apply):
def __init__(self, a):
super(sym_unop, self).__init__(a)
@property
def a(self):
return self.args[0]
class sym_binop(sym_func_apply):
def __init__(self, a, b):
super(sym_binop, self).__init__(a, b)
@property
def a(self):
return self.args[0]
@property
def b(self):
return self.args[1]
class sym_triop(sym_func_apply):
def __init__(self, a, b, c):
super(sym_triop, self).__init__(a, b, c)
@property
def a(self):
return self.args[0]
@property
def b(self):
return self.args[1]
@property
def c(self):
return self.args[2]
def z3expr(o, printable = False):
assert isinstance(o, sym_ast)
return o._z3expr(printable)
class const_str(sym_ast):
def __init__(self, v):
self.v = v
def __eq__(self, o):
if not isinstance(o, const_str):
return False
return self.v == o.v
def __ne__(self, o):
return not self.__eq__(o)
def __hash__(self):
return hash(self.v)
def _z3expr(self, printable):
## z3str has a weird way of encoding string constants.
## for printing, we make strings look like nice constants,
## but otherwise we use z3str's encoding plan.
if printable:
return z3.Const('"%s"' % self.v, z3str.StringSort())
enc = "__cOnStStR_" + "".join(["_x%02x" % ord(c) for c in self.v])
return z3.Const(enc, z3str.StringSort())
class const_int(sym_ast):
def __init__(self, i):
self.i = i
def __eq__(self, o):
if not isinstance(o, const_int):
return False
return self.i == o.i
def __ne__(self, o):
return not self.__eq__(o)
def __hash__(self):
return hash(self.i)
def _z3expr(self, printable):
return self.i
class const_bool(sym_ast):
def __init__(self, b):
self.b = b
def __eq__(self, o):
if not isinstance(o, const_bool):
return False
return self.b == o.b
def __ne__(self, o):
return not self.__eq__(o)
def __hash__(self):
return hash(self.b)
def _z3expr(self, printable):
return self.b
def ast(o):
if hasattr(o, '_sym_ast'):
return o._sym_ast()
if isinstance(o, bool):
return const_bool(o)
if isinstance(o, int):
return const_int(o)
if isinstance(o, str) or isinstance(o, unicode):
return const_str(o)
raise Exception("Trying to make an AST out of %s %s" % (o, type(o)))
## Logic expressions
class sym_eq(sym_binop):
def _z3expr(self, printable):
return z3expr(self.a, printable) == z3expr(self.b, printable)
class sym_and(sym_func_apply):
def _z3expr(self, printable):
return z3.And(*[z3expr(a, printable) for a in self.args])
class sym_or(sym_func_apply):
def _z3expr(self, printable):
return z3.Or(*[z3expr(a, printable) for a in self.args])
class sym_not(sym_unop):
def _z3expr(self, printable):
return z3.Not(z3expr(self.a, printable))
## Arithmetic
class sym_int(sym_ast):
def __init__(self, id):
self.id = id
def __eq__(self, o):
if not isinstance(o, sym_int):
return False
return self.id == o.id
def __hash__(self):
return hash(self.id)
def _z3expr(self, printable):
return z3.Int(self.id)
class sym_lt(sym_binop):
def _z3expr(self, printable):
return z3expr(self.a, printable) < z3expr(self.b, printable)
class sym_lte(sym_binop):
def _z3expr(self, printable):
return z3expr(self.a, printable) <= z3expr(self.b, printable)
class sym_gt(sym_binop):
def _z3expr(self, printable):
return z3expr(self.a, printable) > z3expr(self.b, printable)
class sym_gte(sym_binop):
def _z3expr(self, printable):
return z3expr(self.a, printable) >= z3expr(self.b, printable)
class sym_plus(sym_binop):
def _z3expr(self, printable):
return z3expr(self.a, printable) + z3expr(self.b, printable)
class sym_minus(sym_binop):
def _z3expr(self, printable):
return z3expr(self.a, printable) - z3expr(self.b, printable)
class sym_mul(sym_binop):
def _z3expr(self, printable):
return z3expr(self.a, printable) * z3expr(self.b, printable)
class sym_div(sym_binop):
def _z3expr(self, printable):
return z3expr(self.a, printable) / z3expr(self.b, printable)
## String operations
class sym_str(sym_ast):
def __init__(self, id):
self.id = id
def __eq__(self, o):
if not isinstance(o, sym_str):
return False
return self.id == o.id
def __hash__(self):
return hash(self.id)
def _z3expr(self, printable):
return z3.Const(self.id, z3str.StringSort())
class sym_concat(sym_binop):
def _z3expr(self, printable):
return z3str.Concat(z3expr(self.a, printable),
z3expr(self.b, printable))
class sym_length(sym_unop):
def _z3expr(self, printable):
return z3str.Length(z3expr(self.a, printable))
class sym_substring(sym_triop):
def _z3expr(self, printable):
return z3str.SubString(z3expr(self.a, printable),
z3expr(self.b, printable),
z3expr(self.c, printable))
class sym_indexof(sym_binop):
def _z3expr(self, printable):
return z3str.Indexof(z3expr(self.a, printable),
z3expr(self.b, printable))
class sym_contains(sym_binop):
def _z3expr(self, printable):
return z3str.Contains(z3expr(self.a, printable),
z3expr(self.b, printable))
class sym_startswith(sym_binop):
def _z3expr(self, printable):
return z3str.StartsWith(z3expr(self.a, printable),
z3expr(self.b, printable))
class sym_endswith(sym_binop):
def _z3expr(self, printable):
return z3str.EndsWith(z3expr(self.a, printable),
z3expr(self.b, printable))
class sym_replace(sym_triop):
def _z3expr(self, printable):
return z3str.Replace(z3expr(self.a, printable),
z3expr(self.b, printable),
z3expr(self.c, printable))
## Symbolic simplifications
class patname(sym_ast):
def __init__(self, name, pattern = None):
self.name = name
self.pattern = pattern
simplify_patterns_strings = [
(sym_substring(patname("a",
sym_substring(patname("b"),
patname("c"),
sym_minus(sym_length(patname("b")),
patname("c")))),
patname("d"),
sym_minus(sym_length(patname("a")),
patname("d"))),
sym_substring(patname("b"),
sym_plus(patname("c"), patname("d")),
sym_minus(sym_length(patname("b")),
sym_plus(patname("c"), patname("d"))))
),
(sym_concat(patname("a"), const_str("")),
patname("a")
),
]
simplify_patterns_logic = [
(sym_not(sym_not(patname("a"))),
patname("a")
),
(sym_not(sym_eq(patname("a"), const_bool(False))),
sym_eq(patname("a"), const_bool(True))
),
(sym_not(sym_eq(patname("a"), const_bool(True))),
sym_eq(patname("a"), const_bool(False))
),
]
simplify_patterns_arithmetic = [
(sym_plus(patname("x"), const_int(0)),
patname("x")
),
(sym_minus(patname("x"), const_int(0)),
patname("x")
),
(sym_mul(patname("x"), const_int(1)),
patname("x")
),
(sym_div(patname("x"), const_int(1)),
patname("x")
),
(sym_plus(sym_mul(patname("a"), patname("x")),
sym_mul(patname("b"), patname("x"))),
sym_mul(sym_plus(patname("a"), patname("b")), patname("x"))
),
(sym_minus(sym_mul(patname("a"), patname("x")),
sym_mul(patname("b"), patname("x"))),
sym_mul(sym_minus(patname("a"), patname("b")), patname("x"))
),
]
simplify_patterns = []
simplify_patterns += simplify_patterns_strings
simplify_patterns += simplify_patterns_logic
# simplify_patterns += simplify_patterns_arithmetic
def pattern_match(expr, pat, vars):
if isinstance(pat, patname):
if pat.name in vars:
return expr == vars[pat.name]
else:
vars[pat.name] = expr
if pat.pattern is None:
return True
return pattern_match(expr, pat.pattern, vars)
if type(expr) != type(pat):
return False
if not isinstance(expr, sym_func_apply):
return expr == pat
if len(expr.args) != len(pat.args):
return False
return all(pattern_match(ea, pa, vars)
for (ea, pa) in zip(expr.args, pat.args))
def pattern_build(pat, vars):
if isinstance(pat, patname):
return vars[pat.name]
if isinstance(pat, sym_func_apply):
args = [pattern_build(pa, vars) for pa in pat.args]
return type(pat)(*args)
return pat
def simplify(e):
matched = True
while matched:
matched = False
for (src, dst) in simplify_patterns:
vars = {}
if not pattern_match(e, src, vars):
continue
e = pattern_build(dst, vars)
matched = True
if isinstance(e, sym_func_apply):
t = type(e)
args = [simplify(a) for a in e.args]
return t(*args)
return e
## Current path constraint
cur_path_constr = None
cur_path_constr_callers = None
def get_caller():
frame = inspect.currentframe()
back = []
try:
while True:
info = inspect.getframeinfo(frame)
## Skip stack frames inside the symbolic execution engine,
## as well as in the rewritten replacements of dict, %, etc.
if not info.filename.endswith('fuzzy.py') and\
not info.filename.endswith('rewriter.py'):
back.append((info.filename, info.lineno))
frame = frame.f_back
finally:
del frame
return back
def add_constr(e):
global cur_path_constr, cur_path_constr_callers
cur_path_constr.append(simplify(e))
cur_path_constr_callers.append(get_caller())
## This exception is thrown when a required symbolic condition
## is not met; the symbolic execution engine should retry with
## a different input to go down another path instead.
class RequireMismatch(Exception):
pass
def require(e):
if not e:
raise RequireMismatch()
## Creating new symbolic names
namectr = 0
def uniqname(id):
global namectr
namectr += 1
return "%s_%d" % (id, namectr)
## Helper for printing Z3-indented expressions
def indent(s, spaces = '\t'):
return spaces + str(s).replace('\n', ' ')
## Support for forking because z3str uses lots of global variables
## timeout for Z3, in seconds
z3_timeout = 5
def fork_and_check_worker(constr, conn):
z3e = z3expr(constr)
(ok, z3m) = z3str.check_and_model(z3e)
m = {}
if ok == z3.sat:
for k in z3m:
v = z3m[k]
if v.sort() == z3.IntSort():
m[str(k)] = v.as_long()
elif v.sort() == z3str.StringSort():
# print "Model string %s: %s" % (k, v)
vs = str(v)
if not vs.startswith('__cOnStStR_'):
if not str(k).startswith('_t_'):
print 'Undecodable string constant (%s): %s' % (k, vs)
continue
hexbytes = vs.split('_x')[1:]
bytes = [int(h, 16) for h in hexbytes]
m[str(k)] = ''.join(chr(x) for x in bytes)
else:
raise Exception("Unknown sort for %s=%s: %s" % (k, v, v.sort()))
conn.send((ok, m))
conn.close()
def fork_and_check(constr):
constr = simplify(constr)
parent_conn, child_conn = multiprocessing.Pipe()
p = multiprocessing.Process(target=fork_and_check_worker,
args=(constr, child_conn))
p.start()
child_conn.close()
## timeout after a while..
def sighandler(signo, stack):
print "Timed out.."
# print z3expr(constr, True).sexpr()
p.terminate()
signal.signal(signal.SIGALRM, sighandler)
signal.alarm(z3_timeout)
try:
res = parent_conn.recv()
except EOFError:
res = (z3.unknown, None)
finally:
signal.alarm(0)
p.join()
return res
## Symbolic type replacements
def concolic_bool(sym, v):
## Python claims that 'bool' is not an acceptable base type,
## so it seems difficult to subclass bool. Luckily, bool has
## only two possible values, so whenever we get a concolic
## bool, add its value to the constraint.
add_constr(sym_eq(sym, ast(v)))
return v
class concolic_int(int):
def __new__(cls, sym, v):
self = super(concolic_int, cls).__new__(cls, v)
self.__v = v
self.__sym = sym
return self
def concrete_value(self):
return self.__v
def __eq__(self, o):
if not isinstance(o, int):
return False
if isinstance(o, concolic_int):
res = (self.__v == o.__v)
else:
res = (self.__v == o)
return concolic_bool(sym_eq(ast(self), ast(o)), res)
def __ne__(self, o):
return not self.__eq__(o)
def __cmp__(self, o):
res = long(self.__v).__cmp__(long(o))
if concolic_bool(sym_lt(ast(self), ast(o)), res < 0):
return -1
if concolic_bool(sym_gt(ast(self), ast(o)), res > 0):
return 1
return 0
def __add__(self, o):
if isinstance(o, concolic_int):
res = self.__v + o.__v
else:
res = self.__v + o
return concolic_int(sym_plus(ast(self), ast(o)), res)
def __radd__(self, o):
res = o + self.__v
return concolic_int(sym_plus(ast(o), ast(self)), res)
def __sub__(self, o):
res = self.__v - o
return concolic_int(sym_minus(ast(self), ast(o)), res)
def __mul__(self, o):
res = self.__v * o
return concolic_int(sym_mul(ast(self), ast(o)), res)
def __div__(self, o):
res = self.__v / o
return concolic_int(sym_div(ast(self), ast(o)), res)
def _sym_ast(self):
return self.__sym
class concolic_str(str):
def __new__(cls, sym, v):
assert type(v) == str or type(v) == unicode
self = super(concolic_str, cls).__new__(cls, v)
self.__v = v
self.__sym = sym
return self
def concrete_value(self):
return self.__v
def __eq__(self, o):
if not isinstance(o, str) and not isinstance(o, unicode):
return False
if isinstance(o, concolic_str):
res = (self.__v == o.__v)
else:
res = (self.__v == o)
return concolic_bool(sym_eq(ast(self), ast(o)), res)
def __ne__(self, o):
return not self.__eq__(o)
def __add__(self, o):
if isinstance(o, concolic_str):
res = self.__v + o.__v
else:
res = self.__v + o
return concolic_str(sym_concat(ast(self), ast(o)), res)
def __radd__(self, o):
res = o + self.__v
return concolic_str(sym_concat(ast(o), ast(self)), res)
def __len__(self):
res = len(self.__v)
return concolic_int(sym_length(ast(self)), res)
def __contains__(self, o):
res = o in self.__v
return concolic_bool(sym_contains(ast(self), ast(o)), res)
def startswith(self, o):
res = self.__v.startswith(o)
return concolic_bool(sym_startswith(ast(self), ast(o)), res)
def endswith(self, o):
res = self.__v.endswith(o)
return concolic_bool(sym_endswith(ast(self), ast(o)), res)
def __getitem__(self, i):
res = self.__v[i]
return concolic_str(sym_substring(ast(self), ast(i), ast(1)), res)
def __getslice__(self, i, j):
if j == 9223372036854775807 or j == 2147483647:
## Python passes in INT_MAX when there's no upper bound.
## Unfortunately, this differs depending on whether you're
## running in a 32-bit or a 64-bit system.
j = self.__len__()
res = self.__v[i:j]
return concolic_str(sym_substring(ast(self), ast(i), ast(j-i)), res)
def find(self, ch):
res = self.__v.find(ch)
return concolic_int(sym_indexof(ast(self), ast(ch)), res)
def decode(self, encoding = sys.getdefaultencoding(), errors = 'strict'):
## XXX hack: we restrict z3str to just 7-bit ASCII (see call to
## setAlphabet7bit) and then pretend that str and unicode objects
## are the same.
return self
def encode(self, encoding = sys.getdefaultencoding(), errors = 'strict'):
## XXX same hack as for decode().
return self
def __unicode__(self):
## XXX same hack as for decode().
return self
def lstrip(self, chars = ' \t\n\r'):
for ch in chars:
if self.startswith(chars):
return self[1:].lstrip(chars)
return self
def rsplit(self, sep = None, maxsplit = -1):
if maxsplit != 1 or type(sep) != str:
return self.__v.rsplit(sep, maxsplit)
name = 'rsplit_%s_%s' % (self.__sym, sep)
l = mk_str(name + '_l')
r = mk_str(name + '_r')
if l + sep + r != self:
require(sep not in self)
return self
require(sep not in l)
require(sep not in r)
return (l, r)
def upper(self):
## XXX an incorrect overloading that gets us past werkzeug's use
## of .upper() on the HTTP method name..
return self
def _sym_ast(self):
return self.__sym
## Override some builtins..
old_len = __builtin__.len
def xlen(o):
if isinstance(o, concolic_str):
return o.__len__()
return old_len(o)
__builtin__.len = xlen
## Track inputs that should be tried later
class InputQueue(object):
def __init__(self):
## "inputs" is a priority queue storing inputs we should try.
## The inputs are stored as a dictionary, from symbolic variable
## name to the value we should try. If a value is not present,
## mk_int() and mk_str() below will pick a default value. Each
## input also has a priority (lower is "more important"), which
## is useful when there's too many inputs to process.
self.inputs = Queue.PriorityQueue()
self.inputs.put((0, {'values': {}, 'path_condition': None}))
self.input_history = []
## "branchcount" is a map from call site (filename and line number)
## to the number of branches we have already explored at that site.
## This is used to choose priorities for inputs.
self.branchcount = collections.defaultdict(int)
def empty(self):
return self.inputs.empty()
def get(self):
(prio, values) = self.inputs.get()
return (values['values'], values['path_condition'])
def add(self, new_values, caller, path_condition, uniqueinputs = False):
if uniqueinputs:
if self.check_input_history(new_values):
if verbose > 1:
print "SKIPPING INPUT"
return
prio = self.branchcount[caller[0]]
self.branchcount[caller[0]] += 1
self.inputs.put((prio, {'values': new_values, 'path_condition': path_condition}))
if uniqueinputs:
self.input_history.append((prio, new_values))
def check_input_history(self, new_values):
## Return True if new_values has been added to the input queue before.
for (prio, values) in self.input_history:
if self.value_dicts_match(values, new_values):
return True
return False
def value_dicts_match(self, old_values, new_values):
if len(old_values) != len(new_values):
return False
if len(old_values) == 0:
return True
for k in old_values:
if k not in new_values:
return False
if old_values[k] != new_values[k]:
return False
return True
## Actual concolic execution API
concrete_values = {}
def mk_int(id, value = 0):
global concrete_values
if id not in concrete_values:
concrete_values[id] = value
return concolic_int(sym_int(id), concrete_values[id])
def mk_str(id, value = ''):
global concrete_values
if id not in concrete_values:
concrete_values[id] = value
return concolic_str(sym_str(id), concrete_values[id])
verbose = 0
def concolic_test(testfunc, maxiter = 100, v = 0,
uniqueinputs = True,
removeredundant = True,
usecexcache = True):
# globally available 'verbose' flag
verbose = v
## "checked" is the set of constraints we already sent to Z3 for
## checking. use this to eliminate duplicate paths.
checked_paths = set()
## list of inputs we should try to explore.
inputs = InputQueue()
## cache of solutions to previously checked path conditions,
## or lack thereof, being a counterexample.
## a dictionary that maps path conditions to value assignments.
cexcache = {}
iter = 0
while iter < maxiter and not inputs.empty():
iter += 1
global concrete_values
global path_condition
(concrete_values, path_condition) = inputs.get()
global cur_path_constr, cur_path_constr_callers
cur_path_constr = []
cur_path_constr_callers = []
if verbose > 0:
# print 'Trying concrete values:', ["%s = %s" % (k, concrete_values[k]) for k in concrete_values if not k.startswith('_t_')]
print 'Trying concrete values:', ["%s = %s" % (k, concrete_values[k]) for k in concrete_values]
try:
testfunc()
except RequireMismatch:
pass
if verbose > 1:
print 'Test generated', len(cur_path_constr), 'branches:'
for (c, caller) in zip(cur_path_constr, cur_path_constr_callers):
if verbose > 2:
print indent(z3expr(c, True)), '@'
for c in caller:
print indent(indent('%s:%d' % (c[0], c[1])))
else:
print indent(z3expr(c, True)), '@', '%s:%d' % (caller[0][0], caller[0][1])
## for each branch, invoke Z3 to find an input that would go
## the other way, and add it to the list of inputs to explore.
partial_path = []
for (branch_condition, caller) in \
zip(cur_path_constr, cur_path_constr_callers):
## Identify a new branch forked off the current path,
## but skip it if it has been solved before.
if removeredundant:
new_branch = extend_and_prune(partial_path, sym_not(branch_condition))
partial_path = extend_and_prune(partial_path, branch_condition)
else:
new_branch = partial_path + [sym_not(branch_condition)]
partial_path = partial_path + [branch_condition]
new_path_condition = sym_and(*new_branch)
if new_path_condition in checked_paths:
continue
## Solve for a set of inputs that goes down the new branch.
## Avoid solving the branch again in the future.
(ok, model) = (None, None)
if usecexcache:
(ok, model) = check_cache(new_path_condition, cexcache)
if ok != None:
if verbose > 1:
print "USED CEXCACHE"
else:
(ok, model) = fork_and_check(new_path_condition)
else:
(ok, model) = fork_and_check(new_path_condition)
checked_paths.add(new_path_condition)
## If a solution was found, put it on the input queue,
## (if it hasn't been inserted before).
if ok == z3.sat:
new_values = {}
for k in model:
if k in concrete_values:
new_values[k] = model[k]
inputs.add(new_values, caller, new_path_condition, uniqueinputs)
if usecexcache:
cexcache[new_path_condition] = new_values
else:
if usecexcache:
cexcache[new_path_condition] = None
if verbose > 0:
print 'Stopping after', iter, 'iterations'
def check_cache(path_condition, cache):
## return (ok, model) where
## ok = z3.unsat if a subset of path_condition has no solution.
## ok = z3.sat if a superset of path_condition has a solution.
## ok = None if neither of the above can be ascertained.
for old_path in cache:
if cache[old_path] is None and \
issubset(old_path.args, path_condition.args):
return (z3.unsat, None)
if cache[old_path] is not None and \
issubset(path_condition.args, old_path.args):
return (z3.sat, cache[old_path])
return (None, None)
# (ok, model) = fork_and_check(path_condition)
# return (ok, model)
def issubset(candidate_set, context_set):
for elem in candidate_set:
if elem not in context_set:
return False
return True
def extend_and_prune(partial_path, branch_condition):
branch_condition = simplify(branch_condition)
branch_condition = simplify_StartsWith(branch_condition)
## Remove any constraints in partial_path that are
## implied by branch_condition.
prune_set = []
for constraint in partial_path:
# resultZ3 = Z3implies(branch_condition, constraint)
# result = implies(branch_condition, constraint)
# if resultZ3 and not result:
# print "MISSED IMPLICATION"
# print " ", branch_condition
# print " ", constraint
# if not resultZ3 and result:
# print "FALSE IMPLICATION"
# print " ", branch_condition
# print " ", constraint
if implies(branch_condition, constraint):
prune_set.append(constraint)
if len(prune_set) > 0:
for constraint in prune_set:
partial_path.remove(constraint)
return partial_path + [branch_condition]
## If none are removed above, see if any constraints
## in partial_path imply branch_condition.
for constraint in partial_path:
# resultZ3 = Z3implies(constraint, branch_condition)
# result = implies(constraint, branch_condition)
# if resultZ3 and not result:
# print "MISSED IMPLICATION"
# print " ", constraint
# print " ", branch_condition
# if not resultZ3 and result:
# print "FALSE IMPLICATION"
# print " ", constraint
# print " ", branch_condition
if implies(constraint, branch_condition):
return partial_path
## Otherwise return the standard append.
return partial_path + [branch_condition]
def simplify_StartsWith(expr):
if isinstance(expr, sym_eq) and \
isinstance(expr.args[0], sym_startswith):
startswithfn = expr.args[0]
if isinstance(startswithfn.args[1], const_str):
subexpr = startswithfn.args[0]
value = startswithfn.args[1]
return sym_eq(sym_eq(sym_substring(subexpr,
const_int(0),
const_int(len(value.v))),
value),
expr.args[1])
return expr
def Z3implies(antecedent, consequent):
## Want to prove Antecedent --> Consequent, or (not A) OR (C).
## So try to find a counterexample, solve for (A) AND (not C).
## If no solution (unsat), then the implication is true; otherwise false.
(ok, _) = fork_and_check(sym_and(antecedent, sym_not(consequent)))
return (ok == z3.unsat)
def implies(antecedent, consequent):
## If both sides are equal, then trivially true.
if antecedent == consequent:
return True
## Identify whether the antecedent is an equality assignment.
if equalityImplies(antecedent, consequent):
return True
## Try proving the contra-positive: (not C) IMPLIES (not A)
if isinstance(antecedent, sym_eq) and \
isinstance(antecedent.args[1], const_bool) and \
isinstance(consequent, sym_eq) and \
isinstance(consequent.args[1], const_bool):
if equalityImplies(
sym_eq(consequent.args[0], const_bool(not consequent.args[1].b)),
sym_eq(antecedent.args[0], const_bool(not antecedent.args[1].b))):
return True
## Last resort: make an expensive call to Z3.
## Want to prove Antecedent IMPLIES Consequent, that is (not A) OR (C).
## So try to find a counterexample, solve for (A) AND (not C).
## If no solution (unsat), then the implication is true; otherwise false.
# (ok, _) = fork_and_check(sym_and(antecedent, sym_not(consequent)))
# if ok == z3.unsat:
# print "Z3 says", antecedent, "IMPLIES", consequent
# return (ok == z3.unsat)
return False
def equalityImplies(a, c):
if isinstance(a, sym_eq) and \
isinstance(a.args[0], sym_eq) and \
a.args[1] == const_bool(True):
var1 = a.args[0].args[0]
value1 = a.args[0].args[1]
if isinstance(c, sym_eq) and \
isinstance(c.args[0], sym_eq) and \
c.args[1] == const_bool(False):
var2 = c.args[0].args[0]
value2 = c.args[0].args[1]
if var2 == var1 and value2 != value1:
return True
if isinstance(value1, const_str) and \
isinstance(c, sym_eq) and \
c.args[1] == const_bool(False) and \
isinstance(c.args[0], sym_eq) and \
isinstance(c.args[0].args[0], sym_substring):
substringfn = c.args[0].args[0]
substringval = c.args[0].args[1]
if substringfn.args[0] == var1:
start = substringfn.args[1].i
end = substringfn.args[2].i
if value1.v[start:end] != substringval.v:
return True
return False
def isrelevant(ast):
global concrete_values
if isinstance(ast, sym_int) or isinstance(ast, sym_str):
if ast.id in concrete_values:
return True
if isinstance(ast, sym_func_apply):
# Recurse on the ast's arguments.
for arg in ast.args:
if isrelevant(arg):
return True
return False
```
#### File: django-coex/symex/symdjango.py
```python
import sys
import os
import fuzzy
# patch Django where needed
from mock import patch
# Dynamic imports
import importlib
# use our Django (currently irrelevant)
ourdjango = os.path.dirname(os.path.abspath(__file__)) + '/../../django-concolic'
if ourdjango not in sys.path:
sys.path.insert(1, ourdjango)
# Mock out force_str and relatives
from django.utils.encoding import force_bytes
class NewForceBytes():
def __call__(self, s, *args, **kwargs):
if isinstance(s, fuzzy.concolic_str):
return s
if isinstance(s, fuzzy.concolic_int):
return s
return force_bytes(s, *args, **kwargs)
patcher = patch('django.utils.encoding.force_bytes', new_callable=NewForceBytes)
patcher.start()
patcher = patch('django.test.client.force_bytes', new_callable=NewForceBytes)
patcher.start()
# END
# Preserve symbolic values across POST data serialization (gah..)
# First, we do a bit of a trick when asked to create POST data by replacing
# concolic variables with a tagged key containing the symbolic identifier of
# the variable instead.
def post_data(**kwargs):
data = {}
tagged_key = lambda k: 'CoNcOlIc::' + type(k).__name__ + ':' + k._sym_ast().id
for k in kwargs:
v = kwargs[k]
if type(v).__name__ in ("concolic_str", "concolic_int"):
v = tagged_key(v)
data[k] = v
return data
# Then, we wrap django.http.MultiPartParser.parse so that it restores symbolic
# nature of tagged parts (look through self._post, first returned value).
from django.http.request import MultiPartParser
from django.http import QueryDict
class MPP(MultiPartParser):
def parse(self):
post, files = super(MPP, self).parse()
newpost = QueryDict('', mutable=True)
for k, vs in post.iterlists():
if len(vs) == 1 and vs[0].startswith('CoNcOlIc::'):
v = vs[0][len('CoNcOlIc::'):]
ts = v.split(':', 2)
if ts[0] == "concolic_int":
vs = [fuzzy.mk_int(ts[1])]
elif ts[0] == "concolic_str":
vs = [fuzzy.mk_str(ts[1])]
else:
print("UNKNOWN CONCOLIC TYPE %s" % ts[0])
newpost.setlist(k, vs)
return newpost, files
patcher = patch('django.http.request.MultiPartParser', new=MPP)
patcher.start()
# There's also another type forcing happening in QueryDict that we need to
# override
from django.http.request import bytes_to_text
class NewBytes2Text():
def __call__(self, s, encoding):
if isinstance(s, fuzzy.concolic_str):
return s
if isinstance(s, fuzzy.concolic_int):
return s
return bytes_to_text(s, encoding)
patcher = patch('django.http.request.bytes_to_text', new_callable=NewBytes2Text)
patcher.start()
# END
# Mock DB queries so they play nicely with concolic execution
import django.db.models.query
from django.db.models import Model
notdict = {}
oldget = django.db.models.QuerySet.get
def newget(self, *args, **kwargs):
import django.contrib.sessions.models
if self.model is not django.contrib.sessions.models.Session:
if len(kwargs) == 1:
key = kwargs.keys()[0]
if '_' not in key:
if key == 'pk':
key = self.model._meta.pk.name
kwargs[key] = kwargs['pk']
del kwargs['pk']
for m in self.model.objects.all():
v = kwargs[key]
# support model attribute passthrough
if isinstance(v, Model) and hasattr(v, key):
v = getattr(v, key)
if getattr(m, key) == v:
real = oldget(self, *args, **kwargs)
assert m == real
return m
# this should raise an exception, or we've done something wrong
oldget(self, *args, **kwargs)
assert False
else:
e = "newget: special keys like %s not yet supported" % key
if e not in notdict:
print(e)
notdict[e] = True
else:
e = "newget: multi-key lookups not yet supported: %s" % kwargs
if e not in notdict:
print(e)
notdict[e] = True
return oldget(self, *args, **kwargs)
#django.db.models.QuerySet.get = newget
import symex.importwrapper as importwrapper
import symex.rewriter as rewriter
importwrapper.rewrite_imports(rewriter.rewriter)
# It's only safe to use SymDjango as a singleton!
class SymDjango():
def __init__(self, settings, path, viewmap):
self.settings = settings
self.path = path
self.viewmap = viewmap
# search for modules inside application under test
sys.path.append(path)
# Make sure Django reads the correct settings
os.environ.update({
"DJANGO_SETTINGS_MODULE": settings
})
django.setup()
def setup_models(self, models):
from symqueryset import SymManager
# This could patch every model used by django, but we are really only
# interested in the application's models (it's also less expensive)
for m in models:
__objects = m['model'].objects
m['model'].objects = SymManager(__objects, m['queryset'])
def new(self):
return SymClient(self, SERVER_NAME='concolic.io')
# Mock requests by mocking routing + url parsing
from django.test.client import Client
class SymClient(Client):
def __init__(self, symdjango, **defaults):
super(SymClient, self).__init__(False, **defaults)
self.symdjango = symdjango
def request(self, **request):
with patch('django.core.urlresolvers.RegexURLResolver', new=SymResolver) as mock:
mock.symdjango = self.symdjango
return super(SymClient, self).request(**request)
def generic(self, method, path, data='',
content_type='application/octet-stream', secure=False, **extra):
environ = self._base_environ(PATH_INFO=path, **extra)
from urlparse import ParseResult
with patch('django.test.client.urlparse') as mock:
mock.return_value = ParseResult(
scheme = environ['wsgi.url_scheme'],
netloc = environ['SERVER_NAME'],
path = environ['PATH_INFO'],
params = '',
query = 'QUERY_STRING' in environ and environ['QUERY_STRING'] or '',
fragment = ''
)
return super(SymClient, self).generic(method, path, data,
content_type=content_type, secure=secure, **extra)
class SymResolver():
symdjango = None
def __init__(self, regex, conf):
self.reverseDict = {}
for m in SymResolver.symdjango.viewmap:
ind = m.find('.')
self.reverseDict[m[:ind]] = ("", self)
def resolve(self, path):
from django.core.urlresolvers import Resolver404
for v in SymResolver.symdjango.viewmap:
s = SymURL(SymResolver.symdjango, v)
r = s.resolve(path)
if r is not None:
return r
raise Resolver404({'path': path})
def _reverse_with_prefix(self, v, _prefix, *args, **kwargs):
return "<reverse: %s>" % v
@property
def namespace_dict(self):
return self.reverseDict
@property
def app_dict(self):
return {}
class SymURL():
def __init__(self, symdjango, v):
self.symdjango = symdjango
self.view = v
@property
def callback(self):
return self.symdjango.viewmap[self.view]
def resolve(self, path):
from django.core.urlresolvers import ResolverMatch
match = self.callback(path)
if match:
if not isinstance(match, tuple):
match = (match, {}, [])
if len(match) == 1:
match = (match[0], {})
if len(match) == 2:
match = (match[0], match[1], [])
# From core/urlresolvers.py (:222 in 1.7 stable):
# If there are any named groups, use those as kwargs, ignoring non-named
# groups. Otherwise, pass all non-named arguments as positional
# arguments.
kwargs = match[1]
if kwargs:
args = ()
else:
args = match[2]
kwargs.update({}) # TODO: extra args passed to view from urls.py
ind = self.view.rfind('.');
mod = self.view[:ind]
method = self.view[(ind+1):]
views = importlib.import_module(mod);
return ResolverMatch(getattr(views, method), args, kwargs, method)
```
#### File: django-coex/symex/symflask.py
```python
import fuzzy
import flask
import werkzeug
class SymbolicRule(werkzeug.routing.Rule):
def __init__(self, string, **kwargs):
super(SymbolicRule, self).__init__(string, **kwargs)
self.symvarnames = {}
for converter, arguments, variable in werkzeug.routing.parse_rule(string):
if converter is 'default':
self.symvarnames[variable] = fuzzy.uniqname(variable)
def match(self, path):
# print 'match', path, 'rule', self.rule
orig = super(SymbolicRule, self).match(path)
expectpath = "|"
res = {v: fuzzy.mk_str(n) for (v, n) in self.symvarnames.items()}
for converter, arguments, variable in werkzeug.routing.parse_rule(self.rule):
if arguments is not None:
return orig
if converter is None:
expectpath += variable
elif converter is 'default':
expectpath += res[variable]
fuzzy.require('/' not in res[variable])
else:
return orig
if expectpath == path:
return res
else:
return orig
class SymbolicRequest(flask.Request):
@werkzeug.utils.cached_property
def cookies(self):
hdr = self.environ.get('HTTP_COOKIE', '')
name = fuzzy.mk_str('cookie_name')
val = fuzzy.mk_str('cookie_val')
fuzzy.require(hdr == name + '=' + val)
res = {name: val}
return res
@werkzeug.utils.cached_property
def form(self):
## Maybe make a concolic_dict() that would eliminate the need
## to enumerate all the keys of interest here?
res = {}
for k in ('recipient', 'zoobars'):
if fuzzy.mk_int('form_%s_present' % k) == 0:
continue
res[k] = fuzzy.mk_str('form_%s_val' % k)
return res
flask.Flask.url_rule_class = SymbolicRule
flask.Flask.request_class = SymbolicRequest
```
#### File: django-coex/symex/symqueryset.py
```python
from django.db.models import Manager
from django.db.models.query import QuerySet
from django.utils import six
from django.db.models import Model
from django.db import IntegrityError
from random import randrange
from fuzzy import ast, sym_eq, sym_not, sym_or, sym_and, sym_gt, sym_lt, concolic_int
import fuzzy
import traceback
import base64
import string
class SymMixin():
def _old_get(self, cls, *args, **kwargs):
return super(cls, self).get(*args, **kwargs)
# SymQuerySet that creates concolic variables from DB object fields. Example:
# - user = User.objects.get(username='alice') will return an object
# where user.person.zoobars and the other fields are concolic
# - z3 solves constraints and finds concrete values to test
# - Concrete values are then included in the query:
# user = User.objects.get(username='alice', person.zoobars=1)
# - If no such user exists, create it
# This makes it possible to test branches that would not otherwise be covered
# when there are no objects in the DB with the required fields. But it makes it
# more difficult to test invariants since the DB will change
class SQLSymQuerySet(QuerySet, SymMixin):
cache = {},
_id = 0
def get(self, *args, **kwargs):
import django.contrib.sessions.models
if self.model is django.contrib.sessions.models.Session or len(kwargs) != 1:
return self._old_get(SQLSymQuerySet, *args, **kwargs)
# If this query has already been called, some or all of its object's
# properties may be symbolic and have constraints
query_id = self._create_query_id()
# Get any concrete values that are available for this query
index = len(query_id)
concrete = {}
for id in fuzzy.concrete_values:
if not id.startswith(query_id):
continue
concrete[id[index:]] = fuzzy.concrete_values[id]
if len(concrete) > 0:
for field in concrete:
kwargs[field] = concrete[field]
unique = self._get_unique_fields()
try:
obj = self._old_get(SQLSymQuerySet, *args, **kwargs)
except self.model.DoesNotExist:
# Django does not allow empty strings in 'unique' fields
for field in unique:
if field.name in kwargs and kwargs[field.name] == '':
raise self.model.DoesNotExist()
# If searching by primary key and a row exists, update the row
# to avoid errors with duplicate primary keys
obj = None
for field in unique:
if field.name in kwargs:
newkwargs = {field.name: kwargs[field.name]}
try:
obj = self._old_get(SQLSymQuerySet, *args, **newkwargs)
break
except self.model.DoesNotExist:
pass
if obj is None:
obj = self.model()
setattr(obj, self.model._meta.pk.name, hash(str(self._id)))
self._id = self._id + 1
obj.save()
for arg in kwargs:
if arg != self.model._meta.pk.name:
obj = self._set_attr(obj, arg, kwargs[arg])
try:
obj.save()
print obj
print self.all()
except IntegrityError:
raise self.model.DoesNotExist()
obj = self._make_fields_concolic(query_id, obj)
return obj
def _set_attr(self, obj, key, value):
if not isinstance(key, str) or not '__' in key:
setattr(obj, key, value)
return obj
keys = str.split(key, '__', 1)
setattr(obj, keys[0], self._set_attr(getattr(obj, keys[0]), keys[1], value))
return obj
def _get_unique_fields(self):
return [f for f in self.model._meta.fields if f.unique]
def _make_fields_concolic(self, query_id, obj, blacklist = set(), prefix = ''):
blacklist.add('_' + type(obj)._meta.model_name + '_cache')
for prop in vars(obj):
# Ignore private fields
if (prop.startswith('_') and not prop.endswith('_cache')) or prop in blacklist:
continue
value = getattr(obj, prop)
if isinstance(value, fuzzy.concolic_int) or isinstance(value, fuzzy.concolic_str):
continue
if hasattr(value, '__dict__'):
setattr(obj, prop, self._make_fields_concolic(query_id, value, blacklist, type(value)._meta.model_name))
if isinstance(value, int):
setattr(obj, prop, fuzzy.mk_int(query_id + prefix + '__' + prop, value))
elif isinstance(value, str) or isinstance(value, unicode):
setattr(obj, prop, fuzzy.mk_str(query_id + prefix + '__' + prop, value))
return obj
# Each SymQuerySet has a unique ID based on where it was created (i.e. call
# stack contents when it was created)
def _create_query_id(self):
return base64.b64encode(str(hash(''.join(traceback.format_stack()))))
# If the query returns DoesNotExist, then it is probably the case that we are
# looking up the DB with an empty key (e.g. at the beginning of testing when
# 'username' has a default value of ''), so we create a synthetic branch to
# ensure that on a subsequent iteration we actually get a real object
def _create_synthetic_branch(self, **kwargs):
obj = self._get_random_object()
for obj in self.model.objects.all():
if len(kwargs) == 1:
key = kwargs.keys()[0]
if key == 'pk':
key = self.model._meta.pk.name
kwargs[key] = kwargs['pk']
del kwargs['pk']
value = kwargs[key]
if isinstance(value, Model) and hasattr(value, key):
value = getattr(value, key)
if getattr(obj, key) == value:
pass
def _get_random_object(self):
return self.model.objects.all()[randrange(self.count())]
def _exists(self, query_id):
return query_id in self.cache
def _add_query_constraints(self):
pass
# SymQuerySet that just iterates through every row in the DB
class AllSymQuerySet(QuerySet, SymMixin):
def _convert_pk(self, **kwargs):
for key in kwargs:
if key != 'pk':
continue
newkey = self.model._meta.pk.name
kwargs[newkey] = kwargs['pk']
del kwargs['pk']
return kwargs
def _is_match(self, real, obj, **kwargs):
for key in kwargs:
value = kwargs[key]
lookups, parts, reffed_aggregate = self.query.solve_lookup_type(key)
self._create_branch(value, obj, lookups, parts)
if obj == real:
return True
return False
def _create_branch(self, value, obj, lookup, props):
if len(lookup) != 1:
return
obj_attr = self._get_attr(obj, props)
op = lookup[0]
if op == 'gt' and obj_attr > value:
pass
if op == 'gte' and obj_attr >= value:
pass
if op == 'lt' and obj_attr < value:
pass
if op == 'lte' and obj_attr <= value:
pass
if op == 'exact' and obj_attr == value:
pass
def _get_attr(self, obj, props):
result = obj
for prop in props:
if hasattr(obj, prop):
result = getattr(obj, prop)
return result
def get(self, *args, **kwargs):
import django.contrib.sessions.models
if self.model is django.contrib.sessions.models.Session:
return self._old_get(AllSymQuerySet, *args, **kwargs)
kwargs = self._convert_pk(**kwargs)
real = None
try:
real = self._old_get(AllSymQuerySet, *args, **kwargs)
except self.model.DoesNotExist:
pass
for m in self.model.objects.all():
if self._is_match(real, m, **kwargs):
return m
return self._old_get(AllSymQuerySet, *args, **kwargs)
# SymQuerySet that creates mutations based on ConSMutate
class MutationSymQuerySet(AllSymQuerySet, SymMixin):
operators = ['lte', 'gte', 'gt', 'lt', 'exact']
condition_cache = set()
def filter(self, *args, **kwargs):
(op, value, mutations) = self._mutate(*args, **kwargs)
actual = self._apply_filter(*args, **kwargs)
if not isinstance(value, concolic_int):
return actual
mutations = self._remove_dead_mutations(actual, mutations)
self._create_constraints(op, value, mutations)
return actual
def _apply_filter(self, *args, **kwargs):
from django.core.exceptions import FieldError
try:
return super(MutationSymQuerySet, self).filter(*args, **kwargs)
except FieldError:
return None
#
# Based on ConSMutate: SQL mutants for guiding concolic testing of database
# applications (<NAME>, <NAME>, and <NAME>, 2012)
#
# Mutate the current queryset when it is filtered
#
# Suppose the filter is Transfer.objects.filter(zoobars__gt=10) (all
# transfers of more than 10 zoobars)
#
# 1. Split the input string: filter_column = zoobars, operator = gt,
# filter_value = 10
# 2. Create possible mutations:
# i. Create mutated querysets by varying the 'operator', e.g. create
# querysets with operator = 'lt' (less than), 'gte' (greater than or
# equal), etc.
# ii.Should end up with several mutations: e.g.
# Transfer.objects.filter(zoobars__lt=10),
# Transfer.objects.filter(zoobars__gte=10), etc.
# 3. Run original filter
# 4. For each mutation:
# i. Run it and compare with original
# ii.If result is different (called 'dead' mutations in the paper): discard
# iii.If result is the same: Add the symmetric difference of the original
# and the mutation to the path constraints
#
def _mutate(self, *args, **kwargs):
mutations = {}
for arg in kwargs:
lookups, parts, reffed_aggregate = self.query.solve_lookup_type(arg)
if len(lookups) != 1:
continue
mutated_filters = {}
operator = lookups[0]
filter_column = '_'.join(parts)
filter_value = kwargs[arg]
mutate_operators = [op for op in self.operators if op != operator]
for op in mutate_operators:
mutated_filters[op] = {filter_column + '__' + op: filter_value}
# TODO: currently only handles filters with single column queries
# e.g. username='alice'. Ideally, this would handle filters over
# multiple columns e.g. find the transfers of more than 10 zoobars
# to alice recipient='alice' && zoobars > 10
#break
return (operator, filter_value, self._create_mutated_querysets(mutated_filters, *args))
#mutations.append(mutation_set)
return mutations
def _create_mutated_querysets(self, mutated_filters, *args):
mutations = {}
for op in mutated_filters:
filter_kv = mutated_filters[op]
mutated_queryset = self._apply_filter(*args, **filter_kv)
mutations[op] = mutated_queryset
return mutations
def _remove_dead_mutations(self, original_queryset, mutations):
unique_mutations = {}
items = list(six.moves.map(repr, original_queryset))
for op in mutations:
mutation = mutations[op]
if self._is_equal(items, mutation):
unique_mutations[op] = mutation
return unique_mutations
def _is_equal(self, values, other_queryset):
items = list(six.moves.map(repr, other_queryset))
return items == values
def _create_constraints(self, original_op, sym, mutations):
original = self._create_condition(original_op, sym)
t_original = sym_eq(original, ast(True))
f_original = sym_eq(original, ast(False))
for op in mutations:
mutant = self._create_condition(op, sym)
if mutant is None:
return None
t_mutant = sym_eq(mutant, ast(True))
f_mutant = sym_eq(mutant, ast(False))
condition = sym_not(sym_or(sym_and(t_original, f_mutant), sym_and(f_original, t_mutant)))
if self._in_cache(condition):
continue
fuzzy.cur_path_constr = []
fuzzy.cur_path_constr_callers = []
fuzzy.add_constr(sym_and(sym_not(fuzzy.path_condition), condition))
self._add_to_cache(condition)
return
def _hash_condition(self, condition):
return str(condition)
def _add_to_cache(self, condition):
return self.condition_cache.add(self._hash_condition(condition))
def _in_cache(self, condition):
return self._hash_condition(condition) in self.condition_cache
def _create_condition(self, op, sym):
sym_type = None
if op == 'gt':
sym_type = sym_gt
elif op == 'lt':
sym_type = sym_lt
elif op == 'exact':
sym_type = sym_eq
elif op == 'gte':
sym_type = sym_gte
elif op == 'lte':
sym_type = sym_lte
if sym_type is None:
return None
return sym_type(ast(sym), ast(sym.concrete_value()))
class SymManager(Manager, SymMixin):
def __init__(self, manager, queryset_cls):
self.manager = manager
self.queryset_cls = queryset_cls
def __getattr__(self, attr):
#print 'getattr' + attr
return getattr(self.manager, attr)
def get_queryset(self):
#import pdb; pdb.set_trace()
if self.queryset_cls == AllSymQuerySet:
return AllSymQuerySet(self.model, using=self._db, hints=self._hints)
if self.queryset_cls == SQLSymQuerySet:
return SQLSymQuerySet(self.model, using=self._db, hints=self._hints)
if self.queryset_cls == MutationSymQuerySet:
return MutationSymQuerySet(self.model, using=self._db, hints=self._hints)
print 'No SymQuerySet selected'
return QuerySet(self.model, using=self._db, hints=self._hints)
``` |
{
"source": "jonhoo/django-zoobar",
"score": 2
} |
#### File: django-zoobar/zapp/views.py
```python
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponse, HttpResponseNotFound
from zapp.models import Person, Transfer
from django.contrib.auth.models import User
from django.db.models import Q
from django.contrib.auth.decorators import login_required
from zapp.profile import run_profile
import __builtin__
if "symint" not in dir(__builtin__):
def symint(v):
return int(v)
@login_required
def index(request):
try:
request.user.person.profile = request.POST['profile_update']
request.user.person.save()
except KeyError:
pass
return render(request, 'zapp/index.html', {'user': request.user})
@login_required
def transfer(request):
try:
recipient = request.POST['recipient']
zoobars = symint(request.POST['zoobars'])
transfer_impl(request.user, recipient, zoobars)
return render(request, 'zapp/transfer.html', {
'warning': 'Sent %d zoobars' % zoobars
})
except KeyError:
return render(request, 'zapp/transfer.html')
except (User.DoesNotExist, ValueError):
return render(request, 'zapp/transfer.html', {
'warning': 'Transfer to %s failed' % request.POST['recipient']
})
def transfer_impl(sender, recipient, zoobars):
try:
recipientp = User.objects.get(username = recipient)
sender_balance = sender.person.zoobars - zoobars
recipient_balance = recipientp.person.zoobars + zoobars
if sender_balance < 0 or recipient_balance < 0:
raise ValueError()
sender.person.zoobars = sender_balance
recipientp.person.zoobars = recipient_balance
sender.person.save()
recipientp.person.save()
transfer = Transfer()
transfer.sender = sender.person
transfer.recipient = recipientp.person
transfer.amount = zoobars
transfer.save()
except User.DoesNotExist:
raise User.DoesNotExist()
@login_required
def users(request):
try:
req_user = request.GET['user']
try:
user = User.objects.get(username = req_user)
transfers = Transfer.objects.filter(Q(sender = user) | Q(recipient = user))
if user.person.has_executable_profile():
user.person.profile = run_profile(user, req_user)
return render(request, 'zapp/users.html', {
'req_user': req_user,
'user': user,
'transfers': transfers
})
except User.DoesNotExist:
return render(request, 'zapp/users.html', {
'req_user': req_user,
'warning': 'Cannot find that user.'
})
except KeyError:
return render(request, 'zapp/users.html', {'req_user':''})
@login_required
def zoobarjs(request):
return render(request, 'zapp/zoobars.js', {'user': request.user})
```
#### File: django-zoobar/zlogio/views.py
```python
from django.shortcuts import render
from django.contrib.auth import authenticate, login as auth_login, logout as auth_logout
from django.http import HttpResponseRedirect
from django.contrib.auth.models import User
def login(request):
login_error = ""
nexturl = request.GET.get('next', '/')
try:
username = request.POST['login_username']
password = request.POST['login_password']
nexturl = request.POST['next']
if 'submit_registration' in request.POST:
if not username:
login_error = "You must supply a username to register."
elif not password:
login_error = "You must supply a password to register."
else:
user = User.objects.create_user(username, '', password)
user.save()
user = authenticate(username=username, password=password)
auth_login(request, user)
return HttpResponseRedirect(request.POST.get('next', '/'))
elif 'submit_login' in request.POST:
if not username:
login_error = "You must supply a username to log in."
elif not password:
login_error = "You must supply a password to log in."
else:
user = authenticate(username=username, password=password)
if user is not None:
auth_login(request, user)
return HttpResponseRedirect(request.POST.get('next', '/'))
else:
login_error = "Invalid username or password."
except KeyError:
pass
return render(request, "zlogio/login.html", {'login_error': login_error, 'next': nexturl})
def logout(request):
auth_logout(request)
return HttpResponseRedirect('/')
``` |
{
"source": "jonhoo/salvo",
"score": 2
} |
#### File: salvo/salvo/topology.py
```python
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import ( # NOQA
bytes, dict, int, list, object, range, str,
ascii, chr, hex, input, next, oct, open,
pow, round, super,
filter, map, zip)
import json
class Topology:
clusters = []
def __init__(self, clusters):
self.clusters = clusters
@staticmethod
def load_file(handle, parameters):
t = json.load(handle)
for c in t['clusters']:
assert c['name'] != 'hq'
return Topology([
Cluster(c['name'], c, parameters)
for c in t['clusters']
])
class Cluster:
def __init__(self, name, attrs, params):
self.name = name
self.attrs = {
"expose": False,
"internet": True,
"image": "ami-d05e75b8", # Ubuntu Server 14.04 LTS
"itype": "t2.nano",
"count": 1,
}
for k, v in attrs.items():
if k in self.attrs:
if isinstance(v, str) and v.startswith('$'):
self.attrs[k] = params[attrs[k].lstrip('$')]
else:
self.attrs[k] = attrs[k]
elif k != "name":
raise KeyError("Unknown cluster attribute '{}'".format(k))
assert not self.attrs['expose'] or self.attrs['internet']
def __getattr__(self, name):
if name in self.attrs:
return self.attrs[name]
raise AttributeError(name)
``` |
{
"source": "jonhpark7966/skia",
"score": 2
} |
#### File: bots/recipes/recreate_skps.py
```python
DEPS = [
'checkout',
'depot_tools/gclient',
'flavor',
'infra',
'recipe_engine/context',
'recipe_engine/file',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/raw_io',
'recipe_engine/step',
'run',
'vars',
]
TEST_BUILDERS = {
'client.skia.compile': {
'skiabot-linux-swarm-000': [
'Housekeeper-Nightly-RecreateSKPs_Canary',
'Housekeeper-Weekly-RecreateSKPs',
],
},
}
def RunSteps(api):
# Check out Chrome.
api.vars.setup()
checkout_root = api.checkout.default_checkout_root
extra_gclient_env = {
'CPPFLAGS': '-DSK_ALLOW_CROSSPROCESS_PICTUREIMAGEFILTERS=1'}
api.checkout.bot_update(
checkout_root=checkout_root,
checkout_chromium=True,
extra_gclient_env=extra_gclient_env)
api.file.ensure_directory('makedirs tmp_dir', api.vars.tmp_dir)
api.flavor.setup()
src_dir = checkout_root.join('src')
skia_dir = checkout_root.join('skia')
out_dir = src_dir.join('out', 'Release')
with api.context(cwd=src_dir):
# Call GN.
platform = 'linux64' # This bot only runs on linux; don't bother checking.
gn = src_dir.join('buildtools', platform, 'gn')
gn_env = {'CPPFLAGS': '-DSK_ALLOW_CROSSPROCESS_PICTUREIMAGEFILTERS=1',
'GYP_GENERATORS': 'ninja'}
with api.context(env=gn_env):
api.run(api.step, 'GN', cmd=[gn, 'gen', out_dir])
# Build Chrome.
api.run(api.step, 'Build Chrome', cmd=['ninja', '-C', out_dir, 'chrome'])
# Clean up the output dir.
output_dir = api.path['start_dir'].join('skp_output')
if api.path.exists(output_dir):
api.run.rmtree(output_dir)
api.file.ensure_directory('makedirs skp_output', output_dir)
# Capture the SKPs.
asset_dir = skia_dir.join('infra', 'bots', 'assets', 'skp')
cmd = ['python', asset_dir.join('create.py'),
'--chrome_src_path', src_dir,
'--browser_executable', src_dir.join('out', 'Release', 'chrome'),
'--target_dir', output_dir]
# TODO(rmistry): Uncomment the below after skbug.com/6797 is fixed.
# if 'Canary' not in api.properties['buildername']:
# cmd.append('--upload_to_partner_bucket')
with api.context(cwd=skia_dir):
api.run(api.step, 'Recreate SKPs', cmd=cmd)
# Upload the SKPs.
if 'Canary' not in api.properties['buildername']:
cmd = ['python',
skia_dir.join('infra', 'bots', 'upload_skps.py'),
'--target_dir', output_dir]
with api.context(cwd=skia_dir, env=api.infra.go_env):
api.run(api.step, 'Upload SKPs', cmd=cmd)
def GenTests(api):
builder = 'Housekeeper-Nightly-RecreateSKPs_Canary'
yield (
api.test(builder) +
api.properties(buildername=builder,
repository='https://skia.googlesource.com/skia.git',
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]') +
api.path.exists(api.path['start_dir'].join('skp_output'))
)
builder = 'Housekeeper-Weekly-RecreateSKPs'
yield (
api.test(builder) +
api.properties(buildername=builder,
repository='https://skia.googlesource.com/skia.git',
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]') +
api.path.exists(api.path['start_dir'].join('skp_output'))
)
yield (
api.test('failed_upload') +
api.properties(buildername=builder,
repository='https://skia.googlesource.com/skia.git',
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]') +
api.path.exists(api.path['start_dir'].join('skp_output')) +
api.step_data('Upload SKPs', retcode=1)
)
``` |
{
"source": "JonHub/stacasso",
"score": 4
} |
#### File: stacasso/src/hlf.py
```python
import numpy as np
import cirq
def make_interesting_circuit( n_qubits = 3 ):
""" Create smaller version of the HLF2D problem (3, 4 and 5 qubits)
q is number of qubits,
l is min_L_size
Every HLF2D problem has zero as an answer (???),
and can have additional answers
interesting versions of the problem have only a few answers
"""
# original code uses 'q'
q = n_qubits
# depending on the size of the problem (number of qubits),
# l must be set appropriately, to have a small number of solutions
if q == 3:
# three qubits
l = 4
elif q == 4:
# four qubits
l = 5
elif q == 5:
l = 12
elif q == 10:
# ten qubits (number from original google code)
l = 4
else:
assert False,'need to find a good l for this q'
print('Creating an HLF 2D problem instance with')
print(' ', q, 'qubits', '\n ', l, 'min size of L subspace\n')
problem = None
while problem is None:
# careful! this never exits ...
# just make sure q and l are set correctly
print('finding interesting problem ... ', end='')
problem = find_interesting_problem(q, l)
if problem is None:
print('(not found)')
else:
print('done!')
# the original google code uses 10 qbits
print("Size of subspace L:", len(problem.L))
print("Number of solutions: %d" % len(problem.all_zs))
hlf_circuit = generate_circuit_for_problem(problem)
return hlf_circuit
class HiddenLinearFunctionProblem:
"""Instance of Hidden Linear Function problem.
The problem is defined by matrix A and vector b, which are
the coefficients of quadratic form, in which linear function
is "hidden".
"""
def __init__(self, A, b):
self.n = A.shape[0]
assert A.shape == (self.n, self.n)
assert b.shape == (self.n, )
for i in range(self.n):
for j in range(i+1):
assert A[i][j] == 0, 'A[i][j] can be 1 only if i<j'
self.A = A
self.b = b
def q(self, x):
"""Action of quadratic form on binary vector (modulo 4).
Corresponds to `q(x)` in problem definition.
"""
assert x.shape == (self.n, )
return (2 * (x @ self.A @ x) + (self.b @ x)) % 4
def bruteforce_solve(self):
"""Calculates, by definition, all vectors `z` which are solutions to the problem."""
# All binary vectors of length `n`.
all_vectors = [np.array([(m >> i) % 2 for i in range(self.n)])
for m in range(2**self.n)]
def vector_in_L(x):
for y in all_vectors:
if self.q((x + y) % 2) != (self.q(x) + self.q(y)) % 4:
return False
return True
# L is subspace to which we restrict domain of quadratic form.
# Corresponds to `L_q` in the problem definition.
self.L = [x for x in all_vectors if vector_in_L(x)]
# All vectors `z` which are solutions to the problem.
self.all_zs = [z for z in all_vectors if self.is_z(z)]
def is_z(self, z):
"""Checks by definition, whether given vector `z` is solution to this problem."""
assert z.shape == (self.n, )
assert self.L is not None
for x in self.L:
if self.q(x) != 2 * ((z @ x) % 2):
return False
return True
# end class, functions here
def random_problem(n, seed=None):
"""Generates instance of the problem with given `n`.
Args:
n: dimension of the problem.
"""
if seed is not None:
np.random.seed(seed)
A = np.random.randint(0, 2, size=(n, n))
for i in range(n):
for j in range(i+1):
A[i][j] = 0
b = np.random.randint(0, 2, size=n)
problem = HiddenLinearFunctionProblem(A, b)
return problem
def find_interesting_problem(n, min_L_size):
"""Generates "interesting" instance of the problem.
Returns instance of problem with given `n`, such that size of
subspace `L_q` is at least `min_L_size`.
Args:
n: dimension of the problem.
min_L_size: minimal cardinality of subspace L.
"""
for _ in range(1000):
problem = random_problem(n)
problem.bruteforce_solve()
if len(problem.L) >= min_L_size and not np.max(problem.A) == 0:
return problem
return None
# quantum solution starts here
def edge_coloring(A):
"""Solves edge coloring problem.
Args:
A: adjacency matrix of a graph.
Returns list of lists of edges, such as edges in each list
do not have common vertex.
Tries to minimize length of this list.
"""
A = np.copy(A)
n = A.shape[0]
ans = []
while np.max(A) != 0:
edges_group = []
used = np.zeros(n, dtype=np.bool)
for i in range(n):
for j in range(n):
if A[i][j] == 1 and not used[i] and not used[j]:
edges_group.append((i, j))
A[i][j] = 0
used[i] = used[j] = True
ans.append(edges_group)
return ans
def generate_circuit_for_problem(problem):
"""Generates `cirq.Circuit` which solves instance of Hidden Linear Function problem."""
qubits = cirq.LineQubit.range(problem.n)
circuit = cirq.Circuit()
# Hadamard gates at the beginning (creating equal superposition of all states).
circuit += cirq.Moment([cirq.H(q) for q in qubits])
# Controlled-Z gates encoding the matrix A.
for layer in edge_coloring(problem.A):
for i, j in layer:
circuit += cirq.CZ(qubits[i], qubits[j])
# S gates encoding the vector b.
circuit += cirq.Moment([cirq.S.on(qubits[i])
for i in range(problem.n) if problem.b[i] == 1])
# Hadamard gates at the end.
circuit += cirq.Moment([cirq.H(q) for q in qubits])
# Measurements.
circuit += cirq.Moment([cirq.measure(qubits[i], key=str(i))
for i in range(problem.n)])
return circuit
def solve_problem(problem, print_circuit=False):
"""Solves instance of Hidden Linear Function problem.
Builds quantum circuit for given problem and simulates
it with the Clifford simulator.
Returns measurement result as binary vector, which is
guaranteed to be a solution to given problem.
"""
circuit = generate_circuit_for_problem(problem)
if print_circuit:
print(circuit)
sim = cirq.CliffordSimulator()
result = sim.simulate(circuit)
z = np.array([result.measurements[str(i)][0] for i in range(problem.n)])
return z
def test1():
problem = find_interesting_problem(10, 4)
print("Size of subspace L: %d" % len(problem.L))
print("Number of solutions: %d" % len(problem.all_zs))
# these variables are used in multiple tests (brute force and quantum)
A = np.array([[0, 1, 1, 0, 0, 1, 0, 0, 1, 1],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
b = np.array([0, 0, 0, 0, 1, 1, 1, 0, 0, 1])
problem_10_64 = HiddenLinearFunctionProblem(A, b)
def test2():
# solve, using brute force
problem_10_64.bruteforce_solve()
print("Size of subspace L: %d" % len(problem_10_64.L))
print("Number of solutions: %d" % len(problem_10_64.all_zs))
def test3():
# solve, using quantum computer simulator
solve_problem(problem_10_64, print_circuit=True)
# there are additional tests in the original google source
``` |
{
"source": "jonhue/bachelors-thesis",
"score": 3
} |
#### File: analysis/lib/utils.py
```python
from typing import List, Optional
from dataclasses import dataclass
from scipy.stats import median_abs_deviation, mode
import numpy as np
import seaborn as sb
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import tikzplotlib
import pandas as pd
@dataclass
class TimeDelta:
d: float
h: float
m: float
s: float
def timedelta(seconds: int) -> TimeDelta:
d, r = divmod(seconds, 24 * 60 * 60)
h, r = divmod(r, 60 * 60)
m, r = divmod(r, 60)
return TimeDelta(d, h, m, r)
@dataclass
class DistributionSummary:
mean: float
median: float
modes: List[float]
std: float
mad: float
min_: float
p25: float
p75: float
max_: float
def summarize_distribution(series: np.array) -> DistributionSummary:
return DistributionSummary(
series.mean(),
np.quantile(series, 0.5),
mode(series).mode,
series.std(),
median_abs_deviation(series),
series.min(),
np.quantile(series, 0.25),
np.quantile(series, 0.75),
series.max(),
)
def distance_distribution(series: np.array) -> np.array:
result = series - np.roll(series, 1)
result[0] = series[0]
return result
def plot_cdf(
samples: np.array, label: str, name: str, xaxis_factor: Optional[float] = None
):
fig = sb.ecdfplot(samples)
plt.xlabel(label)
plt.ylabel("proportion")
# if xaxis_factor is not None:
# scale_xaxis(fig, xaxis_factor)
tikzplotlib.save(f"out/figures/{name}.tex")
def plot(x: np.array, y: np.array, xlabel: str, ylabel: str, name: str):
df = pd.DataFrame({"x": x, "y": y})
sb.lineplot(x="x", y="y", data=df)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
tikzplotlib.save(f"out/figures/{name}.tex")
def barplot(x: np.array, y: np.array, xlabel: str, ylabel: str, name: str):
df = pd.DataFrame({"x": x, "y": y})
sb.barplot(x="x", y="y", data=df)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
tikzplotlib.save(f"out/figures/{name}.tex")
def scale_xaxis(fig, factor: float):
ticks = ticker.FuncFormatter(lambda x, pos: "{0:g}".format(round(x * factor, 2)))
fig.xaxis.set_major_formatter(ticks)
``` |
{
"source": "jonhue/osp",
"score": 2
} |
#### File: osp/analysis/main.py
```python
from enum import Enum
from typing import List, Literal, TypedDict, Union
# WARNING
# This data is accurate as of Thursday, 11 March 2021, 11:43 (CET)
class ThreadKind(Enum):
Issue = 0
PullRequest = 1
class Thread(TypedDict):
kind: ThreadKind
id: int
is_scheduled: bool
is_assigned: bool
class ActionKind(Enum):
Message = 0
Review = 1
Scheduling = 2
ChangesPushed = 3
Close = 4
Discussion = 5
class ChangesPushedAction(TypedDict):
kind: Literal[ActionKind.ChangesPushed]
id: str
class CloseAction(TypedDict):
kind: Literal[ActionKind.Close]
class DiscussionAction(TypedDict):
kind: Literal[ActionKind.Discussion]
id: int
class MessageAction(TypedDict):
kind: Literal[ActionKind.Message]
id: int
class ReviewAction(TypedDict):
kind: Literal[ActionKind.Review]
id: int
class SchedulingAction(TypedDict):
kind: Literal[ActionKind.Scheduling]
Action = Union[CloseAction, ChangesPushedAction, DiscussionAction, MessageAction, ReviewAction, SchedulingAction]
class ResponseKind(Enum):
Immediate = 0
Later = 1
class Response(TypedDict):
kind: ResponseKind
action: Action
thread: Thread
# no. of days between the previous action and this response
days: int
# whether the response was by an external contributor
is_external: bool
# my actions per thread to calculate the reponse rate
class ParticipatingThread(TypedDict):
thread: Thread
# no. of my actions
count: int
# whether my final action was responded to
is_addressed: bool
issue_41991 = {
'kind': ThreadKind.Issue,
'id': 41991,
'is_scheduled': False,
'is_assigned': False,
}
pull_request_42031 = {
'kind': ThreadKind.PullRequest,
'id': 42031,
'is_scheduled': False,
'is_assigned': False,
}
issue_41317 = {
'kind': ThreadKind.Issue,
'id': 41317,
'is_scheduled': True,
'is_assigned': True,
}
pull_request_41928 = {
'kind': ThreadKind.PullRequest,
'id': 41928,
'is_scheduled': True,
'is_assigned': True,
}
pull_request_42530 = {
'kind': ThreadKind.PullRequest,
'id': 42530,
'is_scheduled': False,
'is_assigned': False,
}
issue_41775 = {
'kind': ThreadKind.Issue,
'id': 41775,
'is_scheduled': False,
'is_assigned': False,
}
pull_request_969 = {
'kind': ThreadKind.PullRequest,
'id': 969,
'is_scheduled': False,
'is_assigned': False,
}
pull_request_42952 = {
'kind': ThreadKind.PullRequest,
'id': 42952,
'is_scheduled': False,
'is_assigned': False,
}
pull_request_42952_a = {
'kind': ThreadKind.PullRequest,
'id': 42952,
'is_scheduled': False,
'is_assigned': True,
}
pull_request_42382 = {
'kind': ThreadKind.PullRequest,
'id': 42382,
'is_scheduled': False,
'is_assigned': False,
}
pull_request_42602 = {
'kind': ThreadKind.PullRequest,
'id': 42602,
'is_scheduled': False,
'is_assigned': False,
}
pull_request_42835 = {
'kind': ThreadKind.PullRequest,
'id': 42835,
'is_scheduled': False,
'is_assigned': False,
}
pull_request_42835_a = {
'kind': ThreadKind.PullRequest,
'id': 42835,
'is_scheduled': False,
'is_assigned': True,
}
issue_43096 = {
'kind': ThreadKind.Issue,
'id': 43096,
'is_scheduled': False,
'is_assigned': False,
}
pull_request_43097 = {
'kind': ThreadKind.PullRequest,
'id': 43097,
'is_scheduled': False,
'is_assigned': False,
}
issue_41956 = {
'kind': ThreadKind.Issue,
'id': 41956,
'is_scheduled': False,
'is_assigned': False,
}
issue_42548 = {
'kind': ThreadKind.Issue,
'id': 42548,
'is_scheduled': False,
'is_assigned': False,
}
issue_42318 = {
'kind': ThreadKind.Issue,
'id': 42318,
'is_scheduled': False,
'is_assigned': False,
}
responses: List[Response] = [{
'kind': ResponseKind.Immediate,
'action': {
'kind': ActionKind.Message,
'id': 746868593,
},
'thread': issue_41991,
'days': 0,
'is_external': False,
}, {
'kind': ResponseKind.Immediate,
'action': {
'kind': ActionKind.Message,
'id': 753854017,
},
'thread': issue_41991,
'days': 14,
'is_external': False,
}, {
'kind': ResponseKind.Immediate,
'action': {
'kind': ActionKind.ChangesPushed,
'id': '19c68ca',
},
'thread': pull_request_42031,
'days': 1,
'is_external': False,
}, {
'kind': ResponseKind.Later,
'action': {
'kind': ActionKind.Review,
'id': 556932867,
},
'thread': pull_request_42031,
'days': 3,
'is_external': False,
}, {
'kind': ResponseKind.Later,
'action': {
'kind': ActionKind.Scheduling,
},
'thread': pull_request_42031,
'days': 8,
'is_external': False,
}, {
'kind': ResponseKind.Later,
'action': {
'kind': ActionKind.Close,
},
'thread': pull_request_42031,
'days': 5,
'is_external': False,
}, {
'kind': ResponseKind.Immediate,
'action': {
'kind': ActionKind.Message,
'id': 763820430,
},
'thread': issue_41317,
'days': 0,
'is_external': False,
}, {
'kind': ResponseKind.Immediate,
'action': {
'kind': ActionKind.Message,
'id': 743105025,
},
'thread': pull_request_41928,
'days': 0,
'is_external': False,
}, {
'kind': ResponseKind.Later,
'action': {
'kind': ActionKind.Message,
'id': 743356094,
},
'thread': pull_request_41928,
'days': 0,
'is_external': False,
}, {
'kind': ResponseKind.Later,
'action': {
'kind': ActionKind.Message,
'id': 743358680,
},
'thread': pull_request_41928,
'days': 0,
'is_external': False,
}, {
'kind': ResponseKind.Immediate,
'action': {
'kind': ActionKind.Review,
'id': 551865735,
},
'thread': pull_request_41928,
'days': 2,
'is_external': False,
}, {
'kind': ResponseKind.Immediate,
'action': {
'kind': ActionKind.Discussion,
'id': 546000932,
},
'thread': pull_request_41928,
'days': 2,
'is_external': False,
}, {
'kind': ResponseKind.Immediate,
'action': {
'kind': ActionKind.Message,
'id': 747095634,
},
'thread': pull_request_41928,
'days': 1,
'is_external': False,
}, {
'kind': ResponseKind.Immediate,
'action': {
'kind': ActionKind.Message,
'id': 749195594,
},
'thread': pull_request_41928,
'days': 0,
'is_external': False,
}, {
'kind': ResponseKind.Immediate,
'action': {
'kind': ActionKind.Message,
'id': 749685748,
},
'thread': pull_request_41928,
'days': 0,
'is_external': False,
}, {
'kind': ResponseKind.Immediate,
'action': {
'kind': ActionKind.Message,
'id': 751852447,
},
'thread': pull_request_41928,
'days': 0,
'is_external': False,
}, {
'kind': ResponseKind.Later,
'action': {
'kind': ActionKind.Scheduling,
},
'thread': pull_request_41928,
'days': 2,
'is_external': False,
}, {
'kind': ResponseKind.Immediate,
'action': {
'kind': ActionKind.Message,
'id': 758120780,
},
'thread': pull_request_41928,
'days': 0,
'is_external': False,
}, {
'kind': ResponseKind.Later,
'action': {
'kind': ActionKind.Review,
'id': 565672512,
},
'thread': pull_request_41928,
'days': 12,
'is_external': False,
}, {
'kind': ResponseKind.Later,
'action': {
'kind': ActionKind.Message,
'id': 758293970,
},
'thread': pull_request_41928,
'days': 1,
'is_external': False,
}, {
'kind': ResponseKind.Immediate,
'action': {
'kind': ActionKind.Review,
'id': 578642953,
},
'thread': pull_request_42530,
'days': 0,
'is_external': False,
}, {
'kind': ResponseKind.Immediate,
'action': {
'kind': ActionKind.Message,
'id': 769687104,
},
'thread': pull_request_42530,
'days': 0,
'is_external': False,
}, {
'kind': ResponseKind.Immediate,
'action': {
'kind': ActionKind.Message,
'id': 772135827,
},
'thread': pull_request_42530,
'days': 2,
'is_external': False,
}, {
'kind': ResponseKind.Immediate,
'action': {
'kind': ActionKind.Message,
'id': 754768730,
},
'thread': pull_request_969,
'days': 1,
'is_external': True,
}, {
'kind': ResponseKind.Immediate,
'action': {
'kind': ActionKind.Message,
'id': 784344870,
},
'thread': pull_request_969,
'days': 49,
'is_external': False,
}, {
'kind': ResponseKind.Immediate,
'action': {
'kind': ActionKind.Scheduling,
},
'thread': pull_request_42952,
'days': 7,
'is_external': False,
}, {
'kind': ResponseKind.Later,
'action': {
'kind': ActionKind.Close,
},
'thread': pull_request_42952_a,
'days': 1,
'is_external': False,
}, {
'kind': ResponseKind.Immediate,
'action': {
'kind': ActionKind.Scheduling,
},
'thread': pull_request_42382,
'days': 8,
'is_external': False,
}, {
'kind': ResponseKind.Later,
'action': {
'kind': ActionKind.Review,
'id': 784344870,
},
'thread': pull_request_42382,
'days': 3,
'is_external': True,
}, {
'kind': ResponseKind.Later,
'action': {
'kind': ActionKind.Review,
'id': 579554118,
},
'thread': pull_request_42382,
'days': 0,
'is_external': True,
}, {
'kind': ResponseKind.Immediate,
'action': {
'kind': ActionKind.Scheduling,
},
'thread': pull_request_42382,
'days': 23,
'is_external': False,
}, {
'kind': ResponseKind.Immediate,
'action': {
'kind': ActionKind.Scheduling,
},
'thread': pull_request_42602,
'days': 8,
'is_external': False,
}, {
'kind': ResponseKind.Later,
'action': {
'kind': ActionKind.Scheduling,
},
'thread': pull_request_42602,
'days': 21,
'is_external': False,
}, {
'kind': ResponseKind.Immediate,
'action': {
'kind': ActionKind.Message,
'id': 788963071,
},
'thread': pull_request_42835,
'days': 13,
'is_external': False,
}, {
'kind': ResponseKind.Immediate,
'action': {
'kind': ActionKind.Review,
'id': 608233848,
},
'thread': pull_request_42835_a,
'days': 5,
'is_external': False,
}, {
'kind': ResponseKind.Immediate,
'action': {
'kind': ActionKind.Scheduling,
},
'thread': issue_43096,
'days': 3,
'is_external': False,
}, {
'kind': ResponseKind.Immediate,
'action': {
'kind': ActionKind.Message,
'id': 744720538,
},
'thread': issue_41956,
'days': 0,
'is_external': False,
}, {
'kind': ResponseKind.Immediate,
'action': {
'kind': ActionKind.Message,
'id': 769940334,
},
'thread': issue_42548,
'days': 0,
'is_external': False,
}, {
'kind': ResponseKind.Immediate,
'action': {
'kind': ActionKind.Message,
'id': 759745196,
},
'thread': issue_42318,
'days': 0,
'is_external': False,
}, {
'kind': ResponseKind.Immediate,
'action': {
'kind': ActionKind.Message,
'id': 760395837,
},
'thread': issue_42318,
'days': 0,
'is_external': False,
}]
threads: List[ParticipatingThread] = [{
'thread': issue_41991,
'count': 2,
'is_addressed': True,
}, {
'thread': pull_request_42031,
'count': 1,
'is_addressed': True,
}, {
'thread': issue_41317,
'count': 1,
'is_addressed': True,
}, {
'thread': pull_request_41928,
'count': 7,
'is_addressed': True,
}, {
'thread': pull_request_42530,
'count': 3,
'is_addressed': True,
}, {
'thread': issue_41775,
'count': 1,
'is_addressed': False,
}, {
'thread': pull_request_969,
'count': 3,
'is_addressed': False,
}, {
'thread': pull_request_42952,
'count': 1,
'is_addressed': True,
}, {
'thread': pull_request_42382,
'count': 4,
'is_addressed': False,
}, {
'thread': pull_request_42602,
'count': 1,
'is_addressed': False,
}, {
'thread': pull_request_42835,
'count': 2,
'is_addressed': True,
}, {
'thread': issue_43096,
'count': 1,
'is_addressed': False,
}, {
'thread': pull_request_43097,
'count': 1,
'is_addressed': False,
}, {
'thread': issue_41956,
'count': 1,
'is_addressed': True,
}, {
'thread': issue_42548,
'count': 1,
'is_addressed': True,
}, {
'thread': issue_42318,
'count': 2,
'is_addressed': True,
}]
def response_rate(threads: List[ParticipatingThread]) -> float:
actions = sum([thread['count'] for thread in threads])
addressed_actions = sum([thread['count'] if thread['is_addressed'] else thread['count'] - 1 for thread in threads])
return addressed_actions / actions
def absolute_response_rate(threads: List[ParticipatingThread]) -> float:
addressed_threads = sum([1 if thread['is_addressed'] else 0 for thread in threads])
return addressed_threads / len(threads)
print('response rate:', response_rate(threads), ';', len(threads))
print('absolute response rate:', absolute_response_rate(threads), ';', len(threads))
issue_threads = [thread for thread in threads if thread['thread']['kind'] == ThreadKind.Issue]
pull_request_threads = [thread for thread in threads if thread['thread']['kind'] == ThreadKind.PullRequest]
print('issue response rate:', response_rate(issue_threads), ';', len(issue_threads))
print('issue absolute response rate:', absolute_response_rate(issue_threads), ';', len(issue_threads))
print('pull request response rate:', response_rate(pull_request_threads), ';', len(pull_request_threads))
print('pull request absolute response rate:', absolute_response_rate(pull_request_threads), ';', len(pull_request_threads))
def response_time(responses: List[Response]) -> float:
response_times = sum([response['days'] for response in responses])
return response_times / len(responses)
print('response time:', response_time(responses), 'days', ';', len(responses))
immediate_responses = [response for response in responses if response['kind'] == ResponseKind.Immediate]
later_responses = [response for response in responses if response['kind'] == ResponseKind.Later]
print('immediate response time:', response_time(immediate_responses), 'days', ';', len(immediate_responses))
print('later response time:', response_time(later_responses), 'days', ';', len(later_responses))
issue_responses = [response for response in responses if response['thread']['kind'] == ThreadKind.Issue]
pull_request_responses = [response for response in responses if response['thread']['kind'] == ThreadKind.PullRequest]
print('issue response time:', response_time(issue_responses), 'days', ';', len(issue_responses))
print('pull request response time:', response_time(pull_request_responses), 'days', ';', len(pull_request_responses))
pull_request_internal_responses = [response for response in responses if response['thread']['kind'] == ThreadKind.PullRequest and not response['is_external']]
print('pull request response time (not external):', response_time(pull_request_internal_responses), 'days', ';', len(pull_request_internal_responses))
unassigned_responses = [response for response in responses if not response['thread']['is_assigned']]
assigned_responses = [response for response in responses if response['thread']['is_assigned']]
print('unassigned response time:', response_time(unassigned_responses), 'days', ';', len(unassigned_responses))
issue_responses = [response for response in unassigned_responses if response['thread']['kind'] == ThreadKind.Issue]
pull_request_responses = [response for response in unassigned_responses if response['thread']['kind'] == ThreadKind.PullRequest]
print('unassigned issue response time:', response_time(issue_responses), 'days', ';', len(issue_responses))
print('unassigned pull request response time:', response_time(pull_request_responses), 'days', ';', len(pull_request_responses))
immediate_responses = [response for response in unassigned_responses if response['kind'] == ResponseKind.Immediate]
later_responses = [response for response in unassigned_responses if response['kind'] == ResponseKind.Later]
print('unassigned immediate response time:', response_time(immediate_responses), 'days', ';', len(immediate_responses))
print('unassigned later response time:', response_time(later_responses), 'days', ';', len(later_responses))
print('assigned response time:', response_time(assigned_responses), 'days', ';', len(assigned_responses))
issue_responses = [response for response in assigned_responses if response['thread']['kind'] == ThreadKind.Issue]
pull_request_responses = [response for response in assigned_responses if response['thread']['kind'] == ThreadKind.PullRequest]
print('assigned issue response time:', response_time(issue_responses), 'days', ';', len(issue_responses))
print('assigned pull request response time:', response_time(pull_request_responses), 'days', ';', len(pull_request_responses))
immediate_responses = [response for response in assigned_responses if response['kind'] == ResponseKind.Immediate]
later_responses = [response for response in assigned_responses if response['kind'] == ResponseKind.Later]
print('assigned immediate response time:', response_time(immediate_responses), 'days', ';', len(immediate_responses))
print('assigned later response time:', response_time(later_responses), 'days', ';', len(later_responses))
print('assigned response time factor:', response_time(unassigned_responses) / response_time(assigned_responses))
unscheduled_responses = [response for response in responses if not response['thread']['is_scheduled']]
scheduled_responses = [response for response in responses if response['thread']['is_scheduled']]
print('unscheduled response time:', response_time(unscheduled_responses), 'days', ';', len(unscheduled_responses))
issue_responses = [response for response in unscheduled_responses if response['thread']['kind'] == ThreadKind.Issue]
pull_request_responses = [response for response in unscheduled_responses if response['thread']['kind'] == ThreadKind.PullRequest]
print('unscheduled issue response time:', response_time(issue_responses), 'days', ';', len(issue_responses))
print('unscheduled pull request response time:', response_time(pull_request_responses), 'days', ';', len(pull_request_responses))
immediate_responses = [response for response in unscheduled_responses if response['kind'] == ResponseKind.Immediate]
later_responses = [response for response in unscheduled_responses if response['kind'] == ResponseKind.Later]
print('unscheduled immediate response time:', response_time(immediate_responses), 'days', ';', len(immediate_responses))
print('unscheduled later response time:', response_time(later_responses), 'days', ';', len(later_responses))
print('scheduled response time:', response_time(scheduled_responses), 'days', ';', len(scheduled_responses))
issue_responses = [response for response in scheduled_responses if response['thread']['kind'] == ThreadKind.Issue]
pull_request_responses = [response for response in scheduled_responses if response['thread']['kind'] == ThreadKind.PullRequest]
print('scheduled issue response time:', response_time(issue_responses), 'days', ';', len(issue_responses))
print('scheduled pull request response time:', response_time(pull_request_responses), 'days', ';', len(pull_request_responses))
immediate_responses = [response for response in scheduled_responses if response['kind'] == ResponseKind.Immediate]
later_responses = [response for response in scheduled_responses if response['kind'] == ResponseKind.Later]
print('scheduled immediate response time:', response_time(immediate_responses), 'days', ';', len(immediate_responses))
print('scheduled later response time:', response_time(later_responses), 'days', ';', len(later_responses))
print('scheduled response time factor:', response_time(unscheduled_responses) / response_time(scheduled_responses))
unassigned_unscheduled_responses = [response for response in responses if not response['thread']['is_assigned'] and not response['thread']['is_scheduled']]
print('unassigned & unscheduled response time:', response_time(unassigned_unscheduled_responses), 'days', ';', len(unassigned_unscheduled_responses))
unassigned_unscheduled_pr_responses = [response for response in unassigned_unscheduled_responses if response['thread']['kind'] == ThreadKind.PullRequest]
print('unassigned & unscheduled pull request response time:', response_time(unassigned_unscheduled_pr_responses), 'days', ';', len(unassigned_unscheduled_pr_responses))
``` |
{
"source": "jonhue/propose",
"score": 3
} |
#### File: propose/ast/implication.py
```python
from .binary import Binary
class Implication(Binary):
def __str__(self):
return '(' + str(self.left) + ' → ' + str(self.right) + ')'
def to_string(self, formulas, offset = 0):
left = self.left.to_string(formulas, offset + 1)
formulas.update({(offset + 2 + len(left)): self})
return '(' + left + ' → ' + self.right.to_string(formulas, offset + 4 + len(left)) + ')'
def eval(self, binding):
return not self.left.eval(binding) or self.right.eval(binding)
```
#### File: propose/ast/literal.py
```python
from .formula import Formula
class Literal(Formula):
def __init__(self, name: str):
self.name = name
def __str__(self):
return str(self.name)
def to_string(self, formulas, offset = 0):
return str(self)
def eval(self, binding):
return binding[self.name]
```
#### File: propose/ast/negation.py
```python
from .unary import Unary
class Negation(Unary):
def __str__(self):
return '¬' + str(self.formula)
def to_string(self, formulas, offset = 0):
formulas.update({offset: self})
return '¬' + self.formula.to_string(formulas, offset + 1)
def eval(self, binding):
return not self.formula.eval(binding)
```
#### File: propose/lexer/__init__.py
```python
from .lexer import Lexer
lexer = Lexer().get_lexer()
def lex(input):
return lexer.lex(input)
```
#### File: propose/parser/parser.py
```python
import decimal
from rply import ParserGenerator
from propose.ast import *
class Parser:
def __init__(self):
self.pg = ParserGenerator(
['->', '<->', '+', '*', '!', '(', ')', 'false', 'true', 'xor', 'LITERAL'],
precedence=[
('left', ['<->']),
('left', ['->']),
('left', ['xor']),
('left', ['+']),
('left', ['*']),
('left', ['!'])
]
)
def _add_productions(self):
@self.pg.production('formula : binary')
@self.pg.production('formula : unary')
@self.pg.production('formula : grouped')
@self.pg.production('formula : literal')
def formula(state, p):
return p[0]
@self.pg.production('literal : false')
@self.pg.production('literal : true')
@self.pg.production('literal : LITERAL')
def literal(state, p):
if p[0].gettokentype() == 'false':
return F()
elif p[0].gettokentype() == 'true':
return T()
else:
if p[0].getstr() not in state.literals:
state.literals.append(p[0].getstr())
return Literal(name=p[0].getstr())
@self.pg.production('grouped : ( formula )')
def grouped(state, p):
return p[1]
@self.pg.production('unary : ! formula')
def unary(state, p):
return Negation(formula=p[1])
@self.pg.production('binary : formula <-> formula')
@self.pg.production('binary : formula -> formula')
@self.pg.production('binary : formula xor formula')
@self.pg.production('binary : formula + formula')
@self.pg.production('binary : formula * formula')
def binary(state, p):
if p[1].gettokentype() == '<->':
return Biconditional(left=p[0], right=p[2])
elif p[1].gettokentype() == '->':
return Implication(left=p[0], right=p[2])
elif p[1].gettokentype() == 'xor':
return Xor(left=p[0], right=p[2])
elif p[1].gettokentype() == '+':
return Or(left=p[0], right=p[2])
elif p[1].gettokentype() == '*':
return And(left=p[0], right=p[2])
def get_parser(self):
self._add_productions()
return self.pg.build()
``` |
{
"source": "jonhus/CS747-GP-GAN",
"score": 2
} |
#### File: jonhus/CS747-GP-GAN/gp_gan.py
```python
import math
# import chainer
# import chainer.functions as F
import numpy as np
import torch
# from chainer import cuda, Variable
from scipy.fftpack import dct, idct
from scipy.ndimage import correlate
from scipy.optimize import minimize
from skimage.filters import gaussian, sobel_h, sobel_v, scharr_h, scharr_v, roberts_pos_diag, roberts_neg_diag, \
prewitt_h, prewitt_v
from skimage.transform import resize
from skimage.io import imsave
################## Gradient Operator #########################
normal_h = lambda im: correlate(im, np.asarray([[0, -1, 1]]), mode='nearest')
normal_v = lambda im: correlate(im, np.asarray([[0, -1, 1]]).T, mode='nearest')
gradient_operator = {
'normal': (normal_h, normal_v),
'sobel': (sobel_h, sobel_v),
'scharr': (scharr_h, scharr_v),
'roberts': (roberts_pos_diag, roberts_neg_diag),
'prewitt': (prewitt_h, prewitt_v)
}
###########################################################
def preprocess(im):
im = np.transpose(im * 2 - 1, (2, 0, 1)).astype(np.float32)
return im
def ndarray_resize(im, image_size, order=3, dtype=None):
im = resize(im, image_size, preserve_range=True, order=order, mode='constant')
if dtype:
im = im.astype(dtype)
return im
def z_generate(z, G, copy_paste_var, nz, gpu):
z = np.reshape(z, (nz, 1, 1)).astype(np.float32)
z_var = Variable(chainer.dataset.concat_examples([z], gpu))
loss = F.mean_squared_error(copy_paste_var, G(z_var))
# Backward
loss.backward()
# Transfer loss & diff from GPU to CPU
loss = cuda.to_cpu(loss.data)
dz = np.squeeze(cuda.to_cpu(z_var.grad))
return loss, np.asarray(dz.flatten(), dtype=np.float64)
def imfilter2d(im, filter_func):
gradients = np.zeros_like(im)
for i in range(im.shape[2]):
gradients[:, :, i] = filter_func(im[:, :, i])
return gradients
def gradient_feature(im, color_feature, gradient_kernel):
result = np.zeros((*im.shape, 5))
gradient_h, gradient_v = gradient_operator[gradient_kernel]
result[:, :, :, 0] = color_feature
result[:, :, :, 1] = imfilter2d(im, gradient_h)
result[:, :, :, 2] = imfilter2d(im, gradient_v)
result[:, :, :, 3] = np.roll(result[:, :, :, 1], 1, axis=1)
result[:, :, :, 4] = np.roll(result[:, :, :, 2], 1, axis=0)
return result.astype(im.dtype)
def fft2(K, size, dtype):
w, h = size
param = np.fft.fft2(K)
param = np.real(param[0:w, 0:h])
return param.astype(dtype)
def laplacian_param(size, dtype):
w, h = size
K = np.zeros((2 * w, 2 * h)).astype(dtype)
laplacian_k = np.array([[0, -1, 0], [-1, 4, -1], [0, -1, 0]])
kw, kh = laplacian_k.shape
K[:kw, :kh] = laplacian_k
K = np.roll(K, -(kw // 2), axis=0)
K = np.roll(K, -(kh // 2), axis=1)
return fft2(K, size, dtype)
def gaussian_param(size, dtype, sigma):
w, h = size
K = np.zeros((2 * w, 2 * h)).astype(dtype)
K[1, 1] = 1
K[:3, :3] = gaussian(K[:3, :3], sigma)
K = np.roll(K, -1, axis=0)
K = np.roll(K, -1, axis=1)
return fft2(K, size, dtype)
def dct2(x, norm='ortho'):
return dct(dct(x, norm=norm).T, norm=norm).T
def idct2(x, norm='ortho'):
return idct(idct(x, norm=norm).T, norm=norm).T
def gaussian_poisson_editing(X, param_l, param_g, color_weight=1, eps=1e-12):
Fh = (X[:, :, :, 1] + np.roll(X[:, :, :, 3], -1, axis=1)) / 2
Fv = (X[:, :, :, 2] + np.roll(X[:, :, :, 4], -1, axis=0)) / 2
L = np.roll(Fh, 1, axis=1) + np.roll(Fv, 1, axis=0) - Fh - Fv
param = param_l + color_weight * param_g
param[(param >= 0) & (param < eps)] = eps
param[(param < 0) & (param > -eps)] = -eps
Y = np.zeros(X.shape[:3])
for i in range(3):
Xdct = dct2(X[:, :, i, 0])
Ydct = (dct2(L[:, :, i]) + color_weight * Xdct) / param
Y[:, :, i] = idct2(Ydct)
return Y
def run_gp_editing(src_im, dst_im, mask_im, gan_im, color_weight, sigma, gradient_kernel='normal'):
dst_feature = gradient_feature(dst_im, gan_im, gradient_kernel)
src_feature = gradient_feature(src_im, gan_im, gradient_kernel)
feature = dst_feature * (1 - mask_im) + src_feature * mask_im
size, dtype = feature.shape[:2], feature.dtype
param_l = laplacian_param(size, dtype)
param_g = gaussian_param(size, dtype, sigma)
gan_im = gaussian_poisson_editing(feature, param_l, param_g, color_weight=color_weight)
gan_im = np.clip(gan_im, 0, 1)
return gan_im
def laplacian_pyramid(im, max_level, image_size, smooth_sigma):
im_pyramid = [im]
diff_pyramid = []
for i in range(max_level - 1, -1, -1):
smoothed = gaussian(im_pyramid[-1], smooth_sigma, multichannel=True)
diff_pyramid.append(im_pyramid[-1] - smoothed)
smoothed = ndarray_resize(smoothed, (image_size * 2 ** i, image_size * 2 ** i))
im_pyramid.append(smoothed)
im_pyramid.reverse()
diff_pyramid.reverse()
return im_pyramid, diff_pyramid
"""
GP-GAN: Towards Realistic High-Resolution Image Blending
obj: source image, size: w x h x 3, dtype: float, value: [0, 1]
bg : destination image, size: w x h x 3, dtype: float, value: [0, 1]
mask: mask image, size: w x h, dtype: float, value: {0, 1}
G: Generator
image_size: image_size for Blending GAN
gpu: gpu id
color_weight: beta in Gaussion-Poisson Equation
sigma: sigma for gaussian smooth of Gaussian-Poisson Equation
gradient_kernel: kernel type for calc gradient
smooth_sigma: sigma for gaussian smooth of Laplacian pyramid
supervised: supervised Blending GAN ?
## If supervised = False
nz: noise vector lendth
n_iteration: # of iterations for optimization
"""
def gp_gan(obj, bg, mask, G, image_size, gpu, color_weight=1, sigma=0.5, gradient_kernel='normal', smooth_sigma=1,
supervised=True, nz=100, n_iteration=1000):
w_orig, h_orig, _ = obj.shape
############################ Gaussian-Poisson GAN Image Editing ###########################
# pyramid
max_level = int(math.ceil(np.log2(max(w_orig, h_orig) / image_size)))
obj_im_pyramid, _ = laplacian_pyramid(obj, max_level, image_size, smooth_sigma)
bg_im_pyramid, _ = laplacian_pyramid(bg, max_level, image_size, smooth_sigma)
# init GAN image
mask_init = ndarray_resize(mask, (image_size, image_size), order=0)[:, :, np.newaxis]
copy_paste_init = obj_im_pyramid[0] * mask_init + bg_im_pyramid[0] * (1 - mask_init)
# copy_paste_init_var = Variable(chainer.dataset.concat_examples([preprocess(copy_paste_init)], gpu))
# print("Variable: {}".format(copy_paste_init_var.shape))
copy_paste_init = torch.from_numpy(preprocess(copy_paste_init))
print("Tensor: {}".format(copy_paste_init.shape))
copy_paste_init.unsqueeze_(0)
print("Tensor: {}".format(copy_paste_init.shape))
G.eval()
if supervised:
# gan_im_var = G(copy_paste_init_var)
gan_im_var = G(copy_paste_init)
gan_im_var = gan_im_var.detach().numpy()
print("GAN_IM_VAR Type: {} and Shape: {}".format(type(gan_im_var), gan_im_var.shape))
# gan_im_var = gan_im_var.array()
else:
z_init = np.random.normal(size=(nz, 1, 1))
res = minimize(z_generate, z_init, args=(G, copy_paste_init_var, nz, gpu), method='L-BFGS-B', jac=True,
options={'maxiter': n_iteration, 'disp': False})
z = np.reshape(res.x, (nz, 1, 1)).astype(np.float32)
gan_im_var = G(Variable(chainer.dataset.concat_examples([z], gpu)))
# gan_im = np.clip(np.transpose((np.squeeze(cuda.to_cpu(gan_im_var.data)) + 1) / 2, (1, 2, 0)), 0, 1).astype(
# obj.dtype)
gan_im = np.clip(np.transpose((np.squeeze(gan_im_var.data) + 1) / 2, (1, 2, 0)), 0, 1).astype(obj.dtype)
imsave("~/Desktop/lowres.png", gan_im)
# Start pyramid
for level in range(max_level + 1):
size = obj_im_pyramid[level].shape[:2]
mask_im = ndarray_resize(mask, size, order=0)[:, :, np.newaxis, np.newaxis]
if level != 0:
gan_im = ndarray_resize(gan_im, size)
gan_im = run_gp_editing(obj_im_pyramid[level], bg_im_pyramid[level], mask_im, gan_im, color_weight, sigma,
gradient_kernel)
gan_im = np.clip(gan_im * 255, 0, 255).astype(np.uint8)
return gan_im
```
#### File: jonhus/CS747-GP-GAN/train_blending_gan-new.py
```python
from __future__ import print_function
import cv2
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torchvision.utils import make_grid
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import shutil
from IPython.display import HTML
import matplotlib
# matplotlib.use('Agg')
# import chainer
# from chainer import training, Variable
# from chainer.training import extensions
from model_new import EncoderDecoder, Discriminator, Generator, weights_init
# from model_new import EncoderDecoder, Discriminator, init_bn, init_conv
from dataset_new import BlendingDataset
# from updater import EncoderDecoderBlendingUpdater
# from sampler import sampler
# Root directory for dataset
# dataroot = "/Users/jonhus/Downloads/celeba"
dataroot = "/Users/jonhus/Downloads/imageAlignedLD"
# Number of workers for dataloader
workers = 2
# Batch size during training
batch_size = 128
# Spatial size of training images. All images will be resized to this
# size using a transformer.
image_size = 64
# Number of training epochs
num_epochs = 25
# Location to save models
model_path = "saved_model.pt"
checkpoint_path = os.getcwd()
def make_optimizer(model, alpha, beta1):
optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1)
optimizer.setup(model)
return optimizer
def show_tensor_images(image_tensor, num_images=2, size=(3, 64, 64)):
'''
Function for visualizing images: Given a tensor of images, number of images, and
size per image, plots and prints the images in an uniform grid.
'''
image_tensor = (image_tensor + 1) / 2
image_unflat = image_tensor.detach().cpu()
image_grid = make_grid(image_unflat[:num_images], nrow=5)
plt.imshow(image_grid.permute(1, 2, 0).squeeze())
plt.show()
def save_checkpoint(state, checkpoint_dir):
'''
Function for saving checkpoints during training that can be reloaded
later to continue training
'''
f_path = os.path.join(checkpoint_dir, 'checkpoint.pt')
torch.save(state, f_path)
# if is_best:
# best_fpath = best_model_dir / 'best_model.pt'
# shutil.copyfile(f_path, best_fpath)
def main():
parser = argparse.ArgumentParser(description='Train Blending GAN')
parser.add_argument('--nef', type=int, default=64, help='# of base filters in encoder')
parser.add_argument('--ngf', type=int, default=64, help='# of base filters in decoder')
parser.add_argument('--nc', type=int, default=3, help='# of output channels in decoder')
parser.add_argument('--nBottleneck', type=int, default=4000, help='# of output channels in encoder')
parser.add_argument('--ndf', type=int, default=64, help='# of base filters in D')
parser.add_argument('--lr_d', type=float, default=0.0002, help='Learning rate for Critic, default=0.0002')
parser.add_argument('--lr_g', type=float, default=0.002, help='Learning rate for Generator, default=0.002')
parser.add_argument('--beta1', type=float, default=0.5, help='Beta for Adam, default=0.5')
parser.add_argument('--l2_weight', type=float, default=0.999, help='Weight for l2 loss, default=0.999')
parser.add_argument('--gpu', type=int, default=0, help='GPU ID (negative value indicates CPU)')
parser.add_argument('--n_epoch', type=int, default=25, help='# of epochs to train for')
parser.add_argument('--data_root', default = dataroot, help='Path to dataset')
parser.add_argument('--load_size', type=int, default=64, help='Scale image to load_size')
parser.add_argument('--image_size', type=int, default=64, help='The height / width of the input image to network')
parser.add_argument('--ratio', type=float, default=0.5, help='Ratio for center square size v.s. image_size')
parser.add_argument('--val_ratio', type=float, default=0.05, help='Ratio for validation set v.s. data set')
parser.add_argument('--d_iters', type=int, default=5, help='# of D iters per each G iter')
parser.add_argument('--clamp_lower', type=float, default=-0.01, help='Lower bound for clipping')
parser.add_argument('--clamp_upper', type=float, default=0.01, help='Upper bound for clipping')
parser.add_argument('--experiment', default='encoder_decoder_blending_result',
help='Where to store samples and models')
parser.add_argument('--test_folder', default='samples', help='Where to store test results')
parser.add_argument('--workers', type=int, default=4, help='# of data loading workers')
parser.add_argument('--batch_size', type=int, default=64, help='Input batch size')
parser.add_argument('--test_size', type=int, default=64, help='Batch size for testing')
# parser.add_argument('--train_samples', type=int, default=150000, help='# of training examples')
parser.add_argument('--train_samples', type=int, default=15000, help='# of training examples')
parser.add_argument('--test_samples', type=int, default=256, help='# of testing examples')
parser.add_argument('--manual_seed', type=int, default=5, help='Manul seed')
parser.add_argument('--resume', default='', help='Resume the training from snapshot')
parser.add_argument('--snapshot_interval', type=int, default=1, help='Interval of snapshot (epochs)')
parser.add_argument('--print_interval', type=int, default=1, help='Interval of printing log to console (iteration)')
parser.add_argument('--plot_interval', type=int, default=10, help='Interval of plot (iteration)')
parser.add_argument('--ngpu', type=int, default=1, help='Number of GPUs available. Use 0 for CPU')
parser.add_argument('--resume_training_file', default=None, help='/path/to/checkpoint.pt to resume training with')
args = parser.parse_args()
random.seed(args.manual_seed)
print('Input arguments:')
for key, value in vars(args).items():
print('\t{}: {}'.format(key, value))
print('')
# Set up G & D
# print('Create & Init models ...')
# print('\tInit G network ...')
# G = EncoderDecoder(args.nef, args.ngf, args.nc, args.nBottleneck, image_size=args.image_size, conv_init=init_conv,
# bn_init=init_bn)
# print('\tInit D network ...')
# D = DCGAN_D(args.image_size, args.ndf, conv_init=init_conv, bn_init=init_bn)
# if args.gpu >= 0:
# print('\tCopy models to gpu {} ...'.format(args.gpu))
# chainer.cuda.get_device(args.gpu).use() # Make a specified GPU current
# G.to_gpu() # Copy the model to the GPU
# D.to_gpu()
# print('Init models done ...\n')
# # Setup an optimizer
# optimizer_d = make_optimizer(D, args.lr_d, args.beta1)
# optimizer_g = make_optimizer(G, args.lr_g, args.beta1)
########################################################################################################################
# Setup dataset & iterator
print('Load images from {} ...'.format(args.data_root))
folders = sorted(
[folder for folder in os.listdir(args.data_root) if os.path.isdir(os.path.join(args.data_root, folder))])
val_end = int(args.val_ratio * len(folders))
print('\t{} folders in total, {} val folders ...'.format(len(folders), val_end))
trainset = BlendingDataset(args.train_samples, folders[val_end:], args.data_root, args.ratio, args.load_size,
args.image_size)
valset = BlendingDataset(args.test_samples, folders[:val_end], args.data_root, args.ratio, args.load_size,
args.image_size)
print('\tTrainset contains {} image files'.format(len(trainset)))
print('\tValset contains {} image files'.format(len(valset)))
print('')
# train_iter = chainer.iterators.MultiprocessIterator(trainset, args.batch_size, n_processes=args.workers,
# n_prefetch=args.workers)
# print('Load images from {} ...'.format(args.data_root))
# folders = sorted(
# [folder for folder in os.listdir(args.data_root) if os.path.isdir(os.path.join(args.data_root, folder))])
# val_end = int(args.val_ratio * len(folders))
# print('\t{} folders in total, {} val folders ...'.format(len(folders), val_end))
# We can use an image folder dataset the way we have it setup.
# Create the dataset
# dataset = dset.ImageFolder(root=dataroot,
# transform=transforms.Compose([
# transforms.Resize(image_size),
# transforms.CenterCrop(image_size),
# transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
# ]))
# Create the dataloader
# dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
# shuffle=True, num_workers=workers)
dataloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=True, num_workers=workers)
# Decide which device we want to run on
device = torch.device("cuda:0" if (torch.cuda.is_available() and ngpu > 0) else "cpu")
########################################################################################################################
# Create the generator
# netG = Generator(args.ngpu, nz=args.nBottleneck).to(device)
netG = EncoderDecoder(args.nef, args.ngf, args.nc, args.nBottleneck, image_size=args.image_size).to(device)
# G = EncoderDecoder(args.nef, args.ngf, args.nc, args.nBottleneck, image_size=args.image_size, conv_init=init_conv,
# bn_init=init_bn)
# Handle multi-gpu if desired
if (device.type == 'cuda') and (args.ngpu > 1):
netG = nn.DataParallel(netG, list(range(args.ngpu)))
# Apply the weights_init function to randomly initialize all weights
# to mean=0, stdev=0.2.
netG.apply(weights_init)
# Print the model
print(netG)
# Create the Discriminator
# netD = Discriminator(args.ngpu).to(device)
netD = Discriminator(args.image_size, args.ndf).to(device)
# D = DCGAN_D(args.image_size, args.ndf, conv_init=init_conv, bn_init=init_bn)
# Handle multi-gpu if desired
if (device.type == 'cuda') and (ngpu > 1):
netD = nn.DataParallel(netD, list(range(ngpu)))
# Apply the weights_init function to randomly initialize all weights
# to mean=0, stdev=0.2.
netD.apply(weights_init)
# Print the model
print(netD)
# Initialize BCELoss function
# criterion = nn.BCELoss()
criterion = nn.MSELoss()
# Create batch of latent vectors that we will use to visualize
# the progression of the generator
# fixed_noise = torch.randn(64, args.nBottleneck, 1, 1, device=device)
fixed_noise = torch.randn(64, args.nc, args.image_size, args.image_size, device=device)
# Establish convention for real and fake labels during training
real_label = 1.
fake_label = 0.
# Setup Adam optimizers for both G and D
optimizerD = optim.Adam(netD.parameters(), lr=args.lr_d, betas=(args.beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=args.lr_g, betas=(args.beta1, 0.999))
# Training Loop
# Lists to keep track of progress
img_list = []
G_losses = []
D_losses = []
iters = 0
start_epoch = 0
if args.resume_training_file:
checkpoint = torch.load(args.resume_training_file)
netD.load_state_dict(checkpoint['state_dict']['discriminator_state_dict'])
netG.load_state_dict(checkpoint['state_dict']['generator_state_dict'])
optimizerD.load_state_dict(checkpoint['optimizer']['disc_optimizer_state_dict'])
optimizerG.load_state_dict(checkpoint['optimizer']['gen_optimizer_state_dict'])
start_epoch = checkpoint['epoch']
print("Starting Training Loop...")
# For each epoch
for epoch in range(start_epoch, num_epochs):
# For each batch in the dataloader
# for i, data in enumerate(dataloader, 0):
for i, data in enumerate(dataloader):
# Debug code to show pair of training images
# fake_img = data[0][i]
# real_img = data[1][i]
# img_pair = torch.stack([fake_img, real_img])
# show_tensor_images(img_pair)
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
## Train with all-real batch
netD.zero_grad()
# Format batch
# real_cpu = data[0].to(device)
real_cpu = data[1].to(device)
b_size = real_cpu.size(0)
label = torch.full((b_size,), real_label, dtype=torch.float, device=device)
# Forward pass real batch through D
output = netD(real_cpu).view(-1)
# Calculate loss on all-real batch
errD_real = criterion(output, label)
# Calculate gradients for D in backward pass
errD_real.backward()
D_x = output.mean().item()
## Train with all-fake batch
# Generate batch of latent vectors
# noise = torch.randn(b_size, args.nBottleneck, 1, 1, device=device)
# noise = torch.randn(b_size, args.nc, args.image_size, args.image_size, device=device)
# Train with fake images
fake_images = data[0].to(device)
# Generate fake image batch with G
fake_images = netG(fake_images)
label.fill_(fake_label)
# Classify all fake batch with D
# output = netD(fake.detach()).view(-1)
output = netD(fake_images).view(-1)
# Calculate D's loss on the all-fake batch
errD_fake = criterion(output, label)
# Calculate the gradients for this batch, accumulated (summed) with previous gradients
errD_fake.backward(retain_graph=True)
D_G_z1 = output.mean().item()
# Compute error of D as sum over the fake and the real batches
errD = errD_real + errD_fake
# Update D
optimizerD.step()
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
netG.zero_grad()
label.fill_(real_label) # fake labels are real for generator cost
# Since we just updated D, perform another forward pass of all-fake batch through D
output = netD(fake_images).view(-1)
# Calculate G's loss based on this output
errG = criterion(output, label)
# Calculate gradients for G
errG.backward()
D_G_z2 = output.mean().item()
# Update G
optimizerG.step()
# Output training stats
if i % 50 == 0:
print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
% (epoch, num_epochs, i, len(dataloader),
errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))
# Save Losses for plotting later
G_losses.append(errG.item())
D_losses.append(errD.item())
# Check how the generator is doing by saving G's output on fixed_noise
# if (iters % 500 == 0) or ((epoch == num_epochs-1) and (i == len(dataloader)-1)):
# with torch.no_grad():
# fake = netG(fixed_noise).detach().cpu()
# img_list.append(vutils.make_grid(fake, padding=2, normalize=True))
iters += 1
# Save the checkpoint file after each epoch
checkpoint = {
'epoch': epoch + 1,
'state_dict': {
'generator_state_dict': netG.state_dict(),
'discriminator_state_dict': netD.state_dict()
},
'optimizer': {
'gen_optimizer_state_dict': optimizerG.state_dict(),
'disc_optimizer_state_dict': optimizerD.state_dict()
}
}
save_checkpoint(checkpoint, checkpoint_path)
# Save the model
print("Save the model...")
torch.save({'generator_state_dict': netG.state_dict(),
'discriminator_state_dict': netD.state_dict(),
}, model_path)
if __name__ == '__main__':
main()
``` |
{
"source": "jonhusen/ArcGIS",
"score": 2
} |
#### File: ArcGIS/ArcGISDesktop/nightly_gdb_maintenance.py
```python
import arcpy, os
# Set workspace
workspace = r'c:\Temp\test_editpermissions_sa.sde'
# Set the workspace environment
arcpy.env.workspace = workspace
# Block new connections to the database.
arcpy.AcceptConnections(workspace, False)
# Wait 15 minutes
# time.sleep(900)
# Disconnect all users from the database.
arcpy.DisconnectUser(workspace, "ALL")
# Use a list comprehension to get a list of version names where the owner
# is the current user and make sure sde.default is not selected.
verList = [ver.name for ver in arcpy.da.ListVersions() if ver.isOwner == True and ver.name.lower() != 'sde.default']
# Reconcile and Post versions to sde.DEFAULT
arcpy.ReconcileVersions_management(workspace,
"ALL_VERSIONS",
"SDE.Default",
verList,
"LOCK_ACQUIRED",
"NO_ABORT",
"BY_OBJECT",
"FAVOR_TARGET_VERSION",
"POST",
"DELETE_VERSION",
"c:\Temp\RecLog.txt")
print 'Reconciling Complete'
# Compress database
arcpy.Compress_management(workspace)
print 'Database Compression Complete'
# Rebuild indexes
arcpy.RebuildIndexes_management(workspace,
"SYSTEM",
"ALL")
print 'Rebuild Indexes complete'
# Analyze the states and states_lineages system tables
arcpy.AnalyzeDatasets_management(workspace,
"SYSTEM",
"ANALYZE_BASE",
"ANALYZE_DELTA",
"ANALYZE_ARCHIVE")
print 'Analyze Datasets Complete'
# Recreate Child versions
# Set variables
parentVersion = "sde.DEFAULT"
# Execute create Child Versions
def CreateChildVersions(workspace, verList):
parentVersion = "sde.DEFAULT"
for version in verList:
arcpy.createVersion_management(workspace,
parentVersion,
version,
"Public")
print 'Child Versions Recreated'
# Allow the database to begin accepting connections again
arcpy.AcceptConnections(workspace, True)
print 'Database ready for editing.'
```
#### File: ArcGIS/ArcGISDesktop/reconcile_post_versions.py
```python
import arcpy, os, sys, string
#Populate parent and child versions in the following manner('Parent':'Child', etc). DO NOT LIST DEFAULT
vTree = {'SDE.Parent':'SDE.Child','SDE.QA':'SDE.Edit'}
#Reconcile and post child versions with parent
def RecPostNonDefault(workspace,logWorkspace,logName):
outLog = open(os.path.join(logWorkspace, logName), 'w')
for key, val in vTree.iteritems():
arcpy.ReconcileVersion_management(workspace, val, key,"BY_OBJECT", "FAVOR_TARGET_VERSION", "NO_LOCK_AQUIRED", "NO_ABORT", "POST")
print "Reconciling and posting {0} to {1}".format(val, key)
outLog.write("Reconciling and posting {0} to {1}".format(val, key))
outLog.write("\n")
outLog.close()
del outLog, key, val
#Reconcile and post with parent
def RecPostDefault(workspace,logWorkspace,logName2,defaultVersion):
outLog = open(os.path.join(logWorkspace, logName2), 'w')
#Reconcile and post parents with DEFAULT
for key, val in vTree.iteritems():
arcpy.ReconcileVersion_management(workspace, key, defaultVersion,"BY_OBJECT", "FAVOR_TARGET_VERSION", "NO_LOCK_AQUIRED", "NO_ABORT", "POST")
print "Reconciling and posting {0} to DEFAULT".format(key)
outLog.write("Reconciling and posting {0} to DEFAULT".format(key))
outLog.write("\n")
outLog.close()
del outLog, key, val
def DeleteChildVersions(workspace):
arcpy.ClearWorkspaceCache_management()
for key, val in vTree.iteritems():
arcpy.DeleteVersion_management(workspace, val)
print "Deleted {0}".format(val)
def DeleteParentVersions(workspace):
arcpy.ClearWorkspaceCache_management()
for key, val in vTree.iteritems():
arcpy.DeleteVersion_management(workspace, key)
print "Deleted {0}".format(key)
#Compress database
def Compress(workspace,logWorkspace,logName3):
arcpy.ClearWorkspaceCache_management()
outLog = open(os.path.join(logWorkspace, logName3), 'w')
arcpy.Compress_management(workspace)
print ("Compressed database {0}".format(workspace))
outLog.write("Compressed database {0}".format(workspace))
outLog.close()
def RecreateVersions(workspace, defaultVersion):
for key, val in vTree.iteritems():
arcpy.CreateVersion_management(workspace,defaultVersion, key[4:], "PUBLIC")
print "Created version {0}".format(key)
arcpy.CreateVersion_management(workspace, key, val[4:], "PUBLIC")
print "Created version {0}".format(val)
if __name__=="__main__":
workspace = r"Database Connections\MXD2.sde"
defaultVersion = "sde.DEFAULT"
logName = "RecPostLog.txt"
logName2 = "RecPostDefaultLog.txt"
logName3 = "CompressLog.txt"
logWorkspace = r"C:\temp"
RecPostNonDefault(workspace,logWorkspace,logName)
RecPostDefault(workspace,logWorkspace,logName2,defaultVersion)
DeleteChildVersions(workspace)
DeleteParentVersions(workspace)
Compress(workspace,logWorkspace,logName3)
RecreateVersions(workspace, defaultVersion)
``` |
{
"source": "jonhusen/learning-python",
"score": 3
} |
#### File: environments/api/api-test.py
```python
import os
import json
from getpass import getpass
import requests
def audit_umbrelladns(networks_fwrules):
"""Accepts a list of firewall rules for a client
Checks for rules to allow DNS lookups to Umbrella and
deny all other DNS lookups.
Returns a list of clients and a boolean of whether Umbrella DNS
is configured properly"""
umbrelladns_audit = []
host1 = '208.67.222.222/32'
host2 = '208.67.220.220/32'
for customer in networks_fwrules:
customer_result = {
'organizationId': customer['organizationId'],
'organizationName': customer['organizationName']
}
for network in customer['networks']:
umbrella_allow, dns_deny = 'False', 'False'
if 'l3FirewallRules' in network:
for rule in network['l3FirewallRules']:
destcidr = rule['destCidr'].split(",")
if rule['policy'] == 'allow' \
and rule['protocol'] == 'tcp' \
and rule['destPort'] == '53' \
and (host1 in destcidr and host2 in destcidr):
umbrella_allow = 'True'
if rule['policy'] == 'allow' \
and rule['protocol'] == 'udp' \
and rule['destPort'] == '53' \
and (host1 in destcidr and host2 in destcidr):
umbrella_allow = 'True'
if rule['policy'] == 'deny' \
and rule['protocol'] == 'tcp' \
and rule['destPort'] == '53' \
and rule['destCidr'] == 'Any':
dns_deny = 'True'
if rule['policy'] == 'deny' \
and rule['protocol'] == 'udp' \
and rule['destPort'] == '53' \
and rule['destCidr'] == 'Any':
dns_deny = 'True'
if umbrella_allow is 'True' and dns_deny is 'True':
customer_result['umbrellaDns'] = 'True'
else:
customer_result['umbrellaDns'] = 'False'
umbrelladns_audit.append(customer_result)
return umbrelladns_audit
with open('c:\\temp\\fw_rules_test.json', 'r') as read_file:
rules = json.load(read_file)
audit = audit_umbrelladns(rules)
``` |
{
"source": "jonhyde-legl/posthog-foss",
"score": 2
} |
#### File: posthog/api/team.py
```python
import posthoganalytics
from django.contrib.auth import login, password_validation
from django.db import transaction
from rest_framework import generics, permissions, serializers
from posthog.api.user import UserSerializer
from posthog.models import Team, User
from posthog.models.user import EE_MISSING, MULTI_TENANCY_MISSING
class TeamSignupSerializer(serializers.Serializer):
first_name: serializers.Field = serializers.CharField(max_length=128)
email: serializers.Field = serializers.EmailField()
password: serializers.Field = serializers.CharField()
company_name: serializers.Field = serializers.CharField(
max_length=128, required=False, allow_blank=True,
)
email_opt_in: serializers.Field = serializers.BooleanField(default=True)
def validate_password(self, value):
password_validation.validate_password(value)
return value
def create(self, validated_data):
company_name = validated_data.pop("company_name", "")
is_first_user: bool = not User.objects.exists()
realm: str = "cloud" if not MULTI_TENANCY_MISSING else "hosted"
if self.context["request"].user.is_authenticated:
raise serializers.ValidationError("Authenticated users may not create additional teams.")
if not is_first_user and MULTI_TENANCY_MISSING:
raise serializers.ValidationError("This instance does not support multiple teams.")
with transaction.atomic():
user = User.objects.create_user(**validated_data)
self._team = Team.objects.create_with_data(users=[user], name=company_name)
login(
self.context["request"], user, backend="django.contrib.auth.backends.ModelBackend",
)
posthoganalytics.capture(
user.distinct_id, "user signed up", properties={"is_first_user": is_first_user, "is_team_first_user": True},
)
posthoganalytics.identify(
user.distinct_id, properties={"email": user.email, "realm": realm, "ee_available": not EE_MISSING},
)
return user
def to_representation(self, instance):
serializer = UserSerializer(instance=instance)
return serializer.data
class TeamSignupViewset(generics.CreateAPIView):
serializer_class = TeamSignupSerializer
permission_classes = (permissions.AllowAny,)
``` |
{
"source": "joni115/neuralFrame",
"score": 3
} |
#### File: neuralFrame/embeddings/utils.py
```python
import numpy as np
from sklearn.decomposition import TruncatedSVD
def compute_pc(X, npc=1):
"""
Compute the principal components. DO NOT MAKE THE DATA ZERO MEAN!
:param X: X[i,:] is a data point
:param npc: number of principal components to remove
:return: component_[i,:] is the i-th pc
"""
svd = TruncatedSVD(n_components=npc, n_iter=7, random_state=0)
svd.fit(X)
return svd.components_
def remove_pc(X, npc=1):
"""
Remove the projection on the principal components
:param X: X[i,:] is a data point for a sentences
:param npc: number of principal components to remove
:return: XX[i, :] is the data point after removing its projection
"""
pc = compute_pc(X, npc)
if npc==1:
XX = X - X.dot(pc.transpose()) * pc
else:
XX = X - X.dot(pc.transpose()).dot(pc)
return XX
def save_word_embeddings(embeddings, file_to_dump):
"""
dump the embeddings into a .txt file.
:embeddings: the embeddings has to be an numpy type.
"""
np.save(file_to_dump, embedding_deco, allow_pickle=False)
def load_word_embedding(file_to_load):
"""
load word embeddings from a txt file.
:file_to_load: a path to a file with the embeddings (.npy)
return the embeddings with a numpy type
"""
return np.load(file_to_load, allow_pickle=False)
```
#### File: neuralFrame/processing/truecase.py
```python
from processing.utils import which_encoding
from collections import defaultdict
from sacremoses import MosesTruecaser
class TrueCase:
"""
This class allow you to create a truecase model. the simplest one.
https://en.wikipedia.org/wiki/Truecasing.
"""
def __init__(self, modelfile, sentences=[], infile=''):
"""
:modelfile: the file model.
:infile: sentences to train the model.
If it's not given, the class will train a new model.
:sentences: list of sentences to train the model.
"""
self.modelfile = modelfile
self.infile = infile
self.sentences = sentences
if self.infile or self.sentences:
self.__truecaser = self.__train_truecase()
else:
self.__truecaser = self.__load_truecaser()
def __train_truecase(self):
"""
:infile: path to the train data.
return a model in modelfile.
"""
sentences = self.sentences
if self.infile:
with open(self.infile, 'r', encoding=which_encoding(self.infile)) as train_file:
sentences = train_file.readlines()
assert(len(sentences) != 0)
sentences = [sentence.strip().split() for sentence in sentences]
mtr = MosesTruecaser()
mtr.train(sentences, save_to=self.modelfile, processes=20, progress_bar=True)
return mtr
def __load_truecaser(self):
""""
Load the model file to do truecasting.
The model will be load onto distribution_words attribute.
"""
return MosesTruecaser(self.modelfile)
def is_upper(self, word):
"""
This method will return if the word must be in upper
:word: string
return true if is an upper word.
"""
return max(self.distribution_words.get(word.lower(), {0: 1, 1:0}).items(), key=lambda p: p[1])[0]
def get_first_word(self, sentence):
"""
get the first word from a sentence
:sentence: string.
reutrn the first word of the sentence and the remaining part in other list
"""
sentence_splited = sentence.split(' ')
first_word = sentence_splited[0]
try:
remaining = sentence_splited[1:]
remaining = ' '.join(remaining)
except IndexError:
remaining = ''
return first_word, remaining
def is_upper_sentence(self, sentence):
"""
This method will return if the first word of the sentences
should be in upper
:sentence: string
return true if the first word of the sentences is upper
"""
first_word, _ = self.get_first_word(sentence)
return self.is_upper(first_word)
def upper_first_word(self, sentence):
"""
This method will upper the first word of the sentence
:sentence: string
return the sentences with the first word uppered.
"""
return sentence[0].upper() + sentence[1:]
def lower_first_word(self, sentence):
"""
This method will lower the first word of the sentence
:sentence: string
return the sentences with the first word lowered.
"""
first_word, sentence = self.get_first_word(sentence)
return first_word.lower() + ' ' + sentence
def true_case_sentence(self, sentence):
"""
True case a single sentence with the distribution_words model.
:sentence: a sequence of strings
return a truecased sentence.
"""
return self.__truecaser.truecase(sentence, return_str=True)
def true_case_sentences(self, sentences):
"""
Truecase a list of sentences
"""
return [self.true_case_sentence(sent) for sent in sentences]
def recaser_sentence(self, source_s, target_s):
"""
The recaser will be depend on the source sentences.
:source_s: source sentence.
:target_s: target sentence.
return a recase of the target sentence.
"""
first_word, _ = self.get_first_word(source_s)
if first_word.istitle():
target_s = self.upper_first_word(target_s)
else:
target_s = self.lower_first_word(target_s)
return target_s
def recaser_sentences(self, source_sents, target_sents):
"""
Recase all the target sentences depend on source sentences.
:source_sents: list of source sentences
:target_sents: list of target sentences
return a list of recases sentences
"""
target_recase = []
for source, target in zip(source_sents, target_sents):
target_recase.append(self.recaser_sentence(source, target))
return target_recase
```
#### File: neuralFrame/translatorMT/neural.py
```python
import yaml
import tempfile
from opennmt.runner import Runner
from opennmt.models.model import Model
from opennmt.config import load_config, load_model
class Neural:
"""
This class will be wrapped class from openNMT-tf.
https://arxiv.org/abs/1701.02810
"""
def __init__(self, config):
"""
Configuration for the model
:config: the configuration for the model.
-- :config_path: a list of path to configure the model
-- :model_type: a model type
-- :check_point_path: a check_point for the path
"""
self.__config = {}
for config_path in config['config_path']:
with open(config_path, 'r') as f:
self.__config.update(yaml.load(f.read()))
self.__config['model_type'] = config['model_type']
self.__config['checkpoint_path'] = config['checkpoint_path']
model = load_model(self.__config['model_dir'],
model_name=self.__config['model_type'])
self.model = Runner(model, self.__config, auto_config=config['auto_config'])
def infer(self, sentences):
"""
This method is to infer.
:sentences: a list of preprocessed sentences.
return a sentence translated.
"""
# we are using opennmt-tf so we should open a file to write sentences to translated.
file_to_translate = tempfile.NamedTemporaryFile('w', delete=False)
file_to_translate.writelines(sentences)
file_to_translate.close()
file_translated = tempfile.NamedTemporaryFile('w', delete=False)
file_translated.close()
self.model.infer(features_file=file_to_translate.name,
predictions_file=file_translated.name,
checkpoint_path=self.__config['checkpoint_path'])
with open(file_translated.name, 'r') as f:
sentences_translated = f.readlines()
return sentences_translated
``` |
{
"source": "jonico/pacbot",
"score": 3
} |
#### File: aws/boto3/aws_lambda.py
```python
from core.providers.aws.boto3 import prepare_aws_client_with_given_cred
import boto3
def get_lambda_client(aws_auth_cred):
"""
Returns the client object for AWS Lambda
Args:
aws_auth (dict): Dict containing AWS credentials
Returns:
obj: Lambda Client Obj
"""
return prepare_aws_client_with_given_cred("lambda", aws_auth_cred)
def check_function_exists(function_name, aws_auth_cred):
"""
Checks the passed lambda function exists or not
Args:
function_name (str): AWS Lambda function name
aws_auth (dict): Dict containing AWS credentials
Returns:
boolean: True if Lambda exists else False
"""
client = get_lambda_client(aws_auth_cred)
try:
response = client.get_function(FunctionName=function_name)
return True if response['Configuration'] else False
except:
return False
```
#### File: providers/aws/destroy.py
```python
from core.config import Settings
from core.providers.aws import BaseAction
from core.terraform import PyTerraform
from core import constants as K
from time import sleep
from threading import Thread
from datetime import datetime
import importlib
import sys
class Destroy(BaseAction):
"""
AWS provider for destroy command
Attributes:
executed_with_error (boolean): this is set to True if any error occurs
destroy_start_time (time): Starting time when the execution started
destroy_statuses (dict): Available destroy statuses
exception (Excpetion obj): exception object if occured
terraform_thread (thread): Destroy python threads
"""
executed_with_error = False
destroy_start_time = datetime.now()
destroy_statuses = {
"tf_destroy_start": 1,
"execution_finished": 3
}
exception = None
terraform_thread = None
def __init__(self, args, input_obj):
self.args = args
super().__init__(input_obj)
def execute(self, resources, terraform_with_targets, dry_run):
"""
This is the starting method where destroy begins. This is the actual method called from the main destroy class
Args:
resources (list): Resources to be destroyed
terraform_with_targets (boolean): If partial destroy is to be done (if --tags is supplied)
dry_run (boolean): Decides whether original destroy should be done
"""
error_response = self.validate_arguments(resources, terraform_with_targets)
if not error_response:
self._create_terraform_provider_file()
self.execute_terraform_destroy(resources, terraform_with_targets, dry_run)
self._delete_terraform_provider_file()
else:
self.exit_with_validation_errors(error_response)
def execute_terraform_destroy(self, resources, terraform_with_targets, dry_run):
"""
Initialises the destroy execution, print the message and call the threads creation method
Args:
resources (list): Resources to be destroyed
terraform_with_targets (boolean): If partial destroy is to be done (if --tags is supplied)
dry_run (boolean): Decides whether original destroy should be done
"""
self.show_step_heading(K.TERRAFORM_DESTROY_STARTED, write_log=False)
if not dry_run:
self.destroy_start_time = datetime.now()
self.current_destroy_status = self.destroy_statuses.get('tf_destroy_start')
self.destroy_resources_and_show_progress(resources, terraform_with_targets)
self._cleanup_destroy()
if self.executed_with_error:
raise Exception(self.exception)
else:
self.show_step_finish(K.TERRAFORM_DESTROY_DRY_RUN)
def _cleanup_destroy(self):
self._delete_terraform_provider_file()
def run_pre_destoy(self, resources):
"""
Call all resource's pre destroy hook if there is any post destroy activity is to be made
Args:
resources (list): Resources to be destroyed
"""
for resource in resources:
resource.pre_terraform_destroy()
def run_post_destoy(self, resources):
"""
Call all resource's post_destroy hook if there is any post destroy activity is to be made
Args:
resources (list): Resources to be destroyed
"""
for resource in resources:
resource.post_terraform_destroy()
resource.remove_terraform()
def destroy_resources_and_show_progress(self, resources, terraform_with_targets):
"""
Creates 2 thread
1. For actualy destroy
2. For displaying the status of destruction
Since python is interpreted language we need to create threads to display the status in one and actual process in another
Args:
resources (list): Resources to be destroyed
terraform_with_targets (boolean): If partial destroy is to be done (if --tags is supplied)
dry_run (boolean): Decides whether original destroy should be done
"""
self.terraform_thread = Thread(target=self.destroy_resources, args=(list(resources), terraform_with_targets))
progressbar_thread = Thread(target=self.show_progress_status, args=(list(resources), terraform_with_targets))
self.terraform_thread.start()
progressbar_thread.start()
self.terraform_thread.join()
progressbar_thread.join()
def destroy_resources(self, resources, terraform_with_targets):
"""
Start destroying the esources by calling PyTerraform class destroy
Args:
resources (list): Resources to be destroyed
terraform_with_targets (boolean): If partial destroy is to be done (if --tags is supplied)
"""
destroy_resources = resources if terraform_with_targets else None
self.run_pre_destoy(resources)
# May be timeout causes first destroy to be a failure hence attempt as many times as the value in the setting
for attempt in range(Settings.DESTROY_NUM_ATTEMPTS):
self.executed_with_error = False
self.exception = None
try:
PyTerraform().terraform_destroy(destroy_resources)
self.run_post_destoy(resources)
break
except Exception as e:
self.executed_with_error = True
self.exception = e
PyTerraform.save_terraform_output()
self.current_destroy_status = self.destroy_statuses.get('execution_finished')
def show_progress_status(self, resources, terraform_with_targets):
"""
Show status of the destruction to user by printing messages
Args:
resources (list): Resources to be destroyed
terraform_with_targets (boolean): If partial destroy is to be done (if --tags is supplied)
"""
sleep(1) # To sleep initaially for pre-destroy to process
while self.destroy_statuses.get('execution_finished') != self.current_destroy_status and self.terraform_thread.isAlive():
duration = self.CYAN_ANSI + self.get_duration(datetime.now() - self.destroy_start_time) + self.END_ANSI
message = "Time elapsed: %s" % duration
self.show_progress_message(message, 1.5)
self.erase_printed_line()
if self.destroy_statuses.get('execution_finished') == self.current_destroy_status:
if self.executed_with_error:
self.show_step_finish(K.TERRAFORM_DESTROY_ERROR, write_log=False, color=self.ERROR_ANSI)
else:
self.show_step_finish(K.TERRAFORM_DESTROY_COMPLETED, write_log=False, color=self.GREEN_ANSI)
end_time = datetime.now()
self.display_process_duration(self.destroy_start_time, end_time)
```
#### File: providers/aws/input.py
```python
from abc import ABCMeta
from core.config import Settings
from core.mixins import MsgMixin
from core import constants as K
from core.providers.aws.boto3.sts import get_aws_caller_identity
from core.providers.aws.boto3.sts import generate_temp_credentials
import uuid
class SystemInput(MsgMixin, metaclass=ABCMeta):
"""Base input class for installation/destruction/status commands. This class reads required input from user for the process to start"""
AWS_AUTH_CRED = {}
def read_input(self):
"""Read required inputs from user for the process to start"""
self.show_step_heading(K.INPUT_READING_STARTED)
self.AWS_AUTH_CRED['aws_auth_option'] = self.read_aws_auth_mechanism()
if self.AWS_AUTH_CRED['aws_auth_option'] == 1:
self.AWS_AUTH_CRED['aws_access_key'] = self.read_aws_access_key()
self.AWS_AUTH_CRED['aws_secret_key'] = self.read_aws_secret_key()
elif self.AWS_AUTH_CRED['aws_auth_option'] == 2:
self.AWS_AUTH_CRED['assume_role_arn'] = self.read_aws_assume_role_arn()
self.AWS_AUTH_CRED['tmp_credentials'] = generate_temp_credentials(self.AWS_AUTH_CRED['assume_role_arn'])
self.AWS_AUTH_CRED['aws_region'] = self.read_aws_region()
Settings.set('AWS_AUTH_CRED', self.AWS_AUTH_CRED)
self.load_aws_account_details()
self.show_step_finish(K.INPUT_READING_COMPLETED)
def read_aws_auth_mechanism(self):
while True:
self.show_inner_inline_message("\n\t%s" % K.AWS_AUTH_MECHANISM)
self.show_inner_inline_message("\n\t%s" % K.AWS_WITH_KEYS)
self.show_inner_inline_message("\n\t%s" % K.AWS_WITH_ASSUME_ROLE)
self.show_inner_inline_message("\n\t%s" % K.AWS_WITH_EC2_ROLE)
auth_mechanism = int(input("\n\t%s" % K.AWS_CHOOSE_AUTH_OPTION))
if auth_mechanism in [1, 2, 3]:
break
self.show_step_inner_warning(K.AWS_INCORRECT_MECHANISM)
return auth_mechanism
def read_aws_access_key(self):
"""Read AWS access key from user if it is not already set in settings"""
settings_access_key = getattr(Settings, 'AWS_ACCESS_KEY', None)
if settings_access_key is None or settings_access_key == '':
aws_access_key = input("\n\t%s" % K.AWS_ACCESS_KEY_INPUT)
if len(aws_access_key) < 20:
self.show_step_inner_error("\n\t" + K.INVALID_KEY)
raise Exception(K.INVALID_KEY)
else:
aws_access_key = settings_access_key
return aws_access_key
def read_aws_secret_key(self):
"""Read AWS secret key from user if it is not already set in settings"""
settings_secret_key = getattr(Settings, 'AWS_SECRET_KEY', None)
if settings_secret_key is None or settings_secret_key == '':
aws_secret_key = input("\n\t%s" % K.AWS_SECRET_KEY_INPUT)
if len(aws_secret_key) < 25:
self.show_step_inner_error("\n\t" + K.INVALID_KEY)
raise Exception(K.INVALID_KEY)
else:
aws_secret_key = settings_secret_key
return aws_secret_key
def read_aws_assume_role_arn(self):
"""Read AWS secret key from user if it is not already set in settings"""
settings_assume_role_arn = getattr(Settings, 'AWS_ASSUME_ROLE_ARN', None)
if settings_assume_role_arn is None or settings_assume_role_arn == '':
assume_role_arn = input("\n\t%s" % K.AWS_ASSUME_ROLE_INPUT)
else:
assume_role_arn = settings_assume_role_arn
return assume_role_arn
def read_aws_region(self):
"""Read AWS region from user if it is not already set in settings"""
settings_region = getattr(Settings, 'AWS_REGION', None)
if settings_region is None or settings_region == '':
aws_region = input("\n\t%s" % K.AWS_REGION_INPUT)
else:
aws_region = settings_region
Settings.set('AWS_REGION', aws_region)
return aws_region
def load_aws_account_details(self):
"""Find AWS Account ID from the credentials given"""
caller_identity = get_aws_caller_identity(self.AWS_AUTH_CRED)
Settings.set('AWS_ACCOUNT_ID', caller_identity.get('Account'))
Settings.set('CALLER_ARN', caller_identity.get('Arn'))
self.AWS_ACCOUNT_ID = caller_identity.get('Account')
self.CALLER_ARN = caller_identity.get('Arn')
class SystemInstallInput(SystemInput):
"""Input class for installation. This class reads required input from user for the process to start"""
def read_input(self):
super().read_input()
class SystemDestroyInput(SystemInput):
"""Input class for destruction. This class reads required input from user for the process to start"""
def read_input(self):
super().read_input()
# for item in Settings.get('INSTALL_INPUTS_REQUIRED', []):
# key_val = input("\n\t%s" % item['input_msg'])
# if item['required']:
# if key_val.strip() == "":
# raise Exception("Value required for %s" % item['input_key'])
# Settings.set(item['input_key'], key_val)
# setattr(self, item['input_key'], key_val)
class SystemStatusInput(SystemInput):
"""Input class for Status command. This class reads required input from user for the process to start"""
def read_input(self):
Settings.set('AWS_ACCESS_KEY', "TempAccessKey")
Settings.set('AWS_SECRET_KEY', "TempSecretKey")
Settings.set('AWS_REGION', "TempRegion")
Settings.set('AWS_ACCOUNT_ID', "TempAccountId")
```
#### File: resources/aws/ecs.py
```python
from core.terraform.resources import TerraformResource
from core.config import Settings
from core.providers.aws.boto3 import ecs
class ECSClusterResource(TerraformResource):
"""
Base resource class for Terraform AWS ECS cluster resource
Attributes:
resource_instance_name (str): Type of resource instance
available_args (dict): Instance configurations
"""
resource_instance_name = "aws_ecs_cluster"
setup_time = 600
available_args = {
'name': {'required': True, 'prefix': True, 'sep': "-"},
'tags': {'required': False}
}
def check_exists_before(self, input, tf_outputs):
"""
Check if the resource is already exists in AWS
Args:
input (instance): input object
tf_outputs (dict): Terraform output dictionary
Returns:
exists (boolean): True if already exists in AWS else False
checked_details (dict): Status of the existence check
"""
checked_details = {'attr': "name", 'value': self.get_input_attr('name')}
exists = False
if not self.resource_in_tf_output(tf_outputs):
exists = ecs.check_ecs_cluster_exists(
checked_details['value'],
input.AWS_AUTH_CRED)
return exists, checked_details
class ECSTaskDefinitionResource(TerraformResource):
"""
Base resource class for Terraform AWS ECS task definition resource
Attributes:
resource_instance_name (str): Type of resource instance
available_args (dict): Instance configurations
"""
resource_instance_name = "aws_ecs_task_definition"
setup_time = 600
available_args = {
'family': {'required': True, 'prefix': True, 'sep': "-"},
'container_definitions': {'required': True},
'requires_compatibilities': {'required': True},
'network_mode': {'required': True},
'cpu': {'required': True},
'memory': {'required': True},
'execution_role_arn': {'required': True},
'task_role_arn': {'required': True},
'tags': {'required': False}
}
def check_exists_before(self, input, tf_outputs):
"""
Check if the resource is already exists in AWS
Args:
input (instance): input object
tf_outputs (dict): Terraform output dictionary
Returns:
exists (boolean): True if already exists in AWS else False
checked_details (dict): Status of the existence check
"""
checked_details = {'attr': "name", 'value': self.get_input_attr('family')}
exists = False
if not self.resource_in_tf_output(tf_outputs):
exists = ecs.check_ecs_task_definition_exists(
checked_details['value'],
input.AWS_AUTH_CRED)
return exists, checked_details
class ECSServiceResource(TerraformResource):
"""
Base resource class for Terraform AWS ECS service resource
Attributes:
resource_instance_name (str): Type of resource instance
available_args (dict): Instance configurations
"""
resource_instance_name = "aws_ecs_service"
setup_time = 600
available_args = {
'name': {'required': True, 'prefix': True, 'sep': "-"},
'task_definition': {'required': True},
'desired_count': {'required': True},
'launch_type': {'required': True},
'cluster': {'required': True},
'network_configuration': {
'required': True,
'inline_args': {
'network_configuration_security_groups': {'required': True, 'tf_arg_key': "security_groups"},
'network_configuration_subnets': {'required': True, 'tf_arg_key': "subnets"},
'network_configuration_assign_public_ip': {'required': True, 'tf_arg_key': "assign_public_ip"},
}
},
'load_balancer': {
'required': True,
'inline_args': {
'load_balancer_target_group_arn': {'required': True, 'tf_arg_key': "target_group_arn"},
'load_balancer_container_name': {'required': True, 'tf_arg_key': "container_name"},
'load_balancer_container_port': {'required': True, 'tf_arg_key': "container_port"},
}
},
'tags': {'required': False},
'propagate_tags': {'required': False}
}
```
#### File: resources/aws/rds.py
```python
from core.terraform.resources import TerraformResource
from core.config import Settings
from core.providers.aws.boto3 import rds
class RDSResource(TerraformResource):
"""
Base resource class for Terraform AWS RDS resource
Attributes:
resource_instance_name (str): Type of resource instance
available_args (dict): Instance configurations
"""
resource_instance_name = "aws_db_instance"
OUTPUT_LIST = ['endpoint']
setup_time = 600
available_args = {
'identifier': {'required': True, 'prefix': True, 'sep': '-'},
'allocated_storage': {'required': True},
'storage_type': {'required': True},
'engine': {'required': True},
'engine_version': {'required': True, },
'instance_class': {'required': True, },
'name': {'required': True},
'username': {'required': True},
'password': {'<PASSWORD>': True},
'db_subnet_group_name': {'required': False},
'option_group_name': {'required': False},
'skip_final_snapshot': {'required': True},
'parameter_group_name': {'required': False},
'vpc_security_group_ids': {'required': False},
'final_snapshot_identifier': {'required': False},
'tags': {'required': False},
'apply_immediately': {'required': False}
}
def check_exists_before(self, input, tf_outputs):
"""
Check if the resource is already exists in AWS
Args:
input (instance): input object
tf_outputs (dict): Terraform output dictionary
Returns:
exists (boolean): True if already exists in AWS else False
checked_details (dict): Status of the existence check
"""
checked_details = {'attr': "identifier", 'value': self.get_input_attr('identifier')}
exists = False
if not self.resource_in_tf_output(tf_outputs):
exists = rds.check_rds_instance_exists(
checked_details['value'],
input.AWS_AUTH_CRED)
return exists, checked_details
class RDSOptionGroupResource(TerraformResource):
"""
Base resource class for Terraform AWS RDS option group resource
Attributes:
resource_instance_name (str): Type of resource instance
available_args (dict): Instance configurations
"""
resource_instance_name = "aws_db_option_group"
setup_time = 60
available_args = {
'name': {'required': True, 'prefix': True, 'sep': '-'},
'engine_name': {'required': True},
'major_engine_version': {'required': True},
'option_group_description': {'required': False},
'tags': {'required': False}
}
option_group_description = Settings.RESOURCE_DESCRIPTION
def check_exists_before(self, input, tf_outputs):
"""
Check if the resource is already exists in AWS
Args:
input (instance): input object
tf_outputs (dict): Terraform output dictionary
Returns:
exists (boolean): True if already exists in AWS else False
checked_details (dict): Status of the existence check
"""
checked_details = {'attr': "name", 'value': self.get_input_attr('name')}
exists = False
if not self.resource_in_tf_output(tf_outputs):
exists = rds.check_rds_option_group_exists(
checked_details['value'],
input.AWS_AUTH_CRED)
return exists, checked_details
class RDSParameterGroupResource(TerraformResource):
"""
Base resource class for Terraform AWS parameter group resource
Attributes:
resource_instance_name (str): Type of resource instance
available_args (dict): Instance configurations
"""
resource_instance_name = "aws_db_parameter_group"
setup_time = 60
available_args = {
'name': {'required': True, 'prefix': True, 'sep': '-'},
'family': {'required': True},
'description': {'required': False},
'tags': {'required': False}
}
description = Settings.RESOURCE_DESCRIPTION
def check_exists_before(self, input, tf_outputs):
"""
Check if the resource is already exists in AWS
Args:
input (instance): input object
tf_outputs (dict): Terraform output dictionary
Returns:
exists (boolean): True if already exists in AWS else False
checked_details (dict): Status of the existence check
"""
checked_details = {'attr': "name", 'value': self.get_input_attr('name')}
exists = False
if not self.resource_in_tf_output(tf_outputs):
exists = rds.check_rds_parameter_group_exists(
checked_details['value'],
input.AWS_AUTH_CRED)
return exists, checked_details
class RDSSubnetGroupResource(TerraformResource):
"""
Base resource class for Terraform AWS RDS Subnet group resource
Attributes:
resource_instance_name (str): Type of resource instance
available_args (dict): Instance configurations
"""
resource_instance_name = "aws_db_subnet_group"
setup_time = 60
available_args = {
'name': {'required': True, 'prefix': True, 'sep': '-'},
'subnet_ids': {'required': True},
'description': {'required': False},
'tags': {'required': False}
}
description = Settings.RESOURCE_DESCRIPTION
def check_exists_before(self, input, tf_outputs):
"""
Check if the resource is already exists in AWS
Args:
input (instance): input object
tf_outputs (dict): Terraform output dictionary
Returns:
exists (boolean): True if already exists in AWS else False
checked_details (dict): Status of the existence check
"""
checked_details = {'attr': "name", 'value': self.get_input_attr('name')}
exists = False
if not self.resource_in_tf_output(tf_outputs):
exists = rds.check_rds_subnet_group_exists(
checked_details['value'],
input.AWS_AUTH_CRED)
return exists, checked_details
```
#### File: files/scripts/create_docker_image_and_push_to_ecr.py
```python
from utils import get_provider_details, get_docker_push_aws_auth_config, build_docker_image, write_to_log_file
from utils import write_to_debug_log
from docker import Client
import os
def build_and_push_docker_image(provider_json_file, ecr_repo, docker_file, docker_file_dir, log_file):
"""
Build docket image and push that to the ECR repo.
Args:
provider_json_file (path): Path of the terraform provider file to get aws credentials
docker_file (str): Docker file name
ecr_repo (str): AWS ECR repo url
docker_file_dir (path): Abs Path of folder where docker file is present
log_file (path): Log file path
Raises:
If failed to push image to ECR
"""
write_to_debug_log(log_file, "Docker image creation and push to ecr repo: %s is started" % str(ecr_repo))
aws_details = get_provider_details("aws", provider_json_file)
auth_config_payload = get_docker_push_aws_auth_config(aws_details, log_file)
docker_client = build_docker_image(docker_file_dir, docker_file, ecr_repo, log_file)
latest_tag = "latest"
pushed = docker_client.push(ecr_repo, tag=latest_tag, auth_config=auth_config_payload)
if pushed:
write_to_debug_log(log_file, "Pushed docker image to repo: %s" % ecr_repo)
else:
write_to_log_file(log_file, " ERROR: failed to push. %s" % ecr_repo)
raise Exception("Failed to push image: %s" % str(pushed))
delete_docker_images_from_local(os.path.join(docker_file_dir, docker_file))
def delete_docker_images_from_local(docker_file_abs_path):
"""
Delete docker image from local installer machine
Args:
docker_file_abs_path (path): Abs path of docker file
Raises:
If failed to push image to ECR
"""
docker_client = Client(base_url='unix://var/run/docker.sock')
# Delete original image
docker_client.remove_image(ecr_repo, force=True)
write_to_debug_log(log_file, "Deleted image %s from local !!!" % ecr_repo)
# Delete Base Image
with open(docker_file_abs_path, "r") as f:
lines = f.readlines()
for line in lines:
if "FROM" in line:
image_line = line.strip().strip("\n")
break
try:
local_image_name = image_line.split("FROM ")[1].strip()
docker_client.remove_image(local_image_name, force=True)
write_to_debug_log(log_file, "Deleted image %s from local !!!" % local_image_name)
except:
pass
if __name__ == "__main__":
"""
This script is executed from the provisioner of terraform resource to create docker image and push it
"""
provider_json_file = os.getenv('PROVIDER_FILE')
ecr_repo = os.getenv('ECR_REPOSITORY')
docker_file = os.getenv('DOCKER_FILE')
docker_file_dir = os.getenv('DOCKER_FILE_DIR')
log_file = os.getenv('LOG_FILE', 'debug.log')
build_and_push_docker_image(provider_json_file, ecr_repo, docker_file, docker_file_dir, log_file)
``` |
{
"source": "jonico/st2contrib",
"score": 2
} |
#### File: docker/actions/pull_image.py
```python
from lib.base import DockerBasePythonAction
__all__ = [
'DockerPullImageAction'
]
class DockerPullImageAction(DockerBasePythonAction):
def run(self, repo, tag=None, insecure_registry=False,
auth_username_override=None, auth_password_override=None):
auth_override = (auth_username_override and auth_password_override)
if auth_override:
auth_config = {}
auth_config['username'] = auth_username_override
auth_config['password'] = <PASSWORD>
return self.wrapper.pull(repo=repo, tag=tag, insecure_registry=insecure_registry,
auth_config=auth_config)
else:
return self.wrapper.pull(repo=repo, tag=tag, insecure_registry=insecure_registry)
```
#### File: actions/lib/items_selector.py
```python
from curator.api.utils import * # noqa
from curator.api.filter import * # noqa
from items_filter import ItemsFilter
from easydict import EasyDict
from utils import xstr
import sys
import logging
logger = logging.getLogger(__name__)
class ItemsSelector(object):
def __init__(self, client, **opts):
self.opts = EasyDict(opts)
self.client = client
self.ifilter = ItemsFilter().build(**opts)
def _apply_filters(self, source_items, act_on):
"""Applies filters to a list of indices or snapshots.
:param source_items: List of indices or snapshots.
:param act_on: Specifies whether we act on indices or snapshots.
"""
opts = self.opts
all_items_selected = opts.get('all_{0}'.format(act_on), None)
# Choose explicitly chosen indices or snapshots
#
if act_on == 'indices':
explicit_items = opts.index or []
else:
explicit_items = opts.snapshot or []
# I don't care about using only timestring if it's a `dry_run` of show
if not any((xstr(opts.newer_than), xstr(opts.older_than), opts.dry_run)) and \
opts.timestring:
logger.warn('Used only timestring parameter.')
logger.warn('Actions can be performed on all %s matching %s', act_on, opts.timestring)
logger.debug("Full list of %s: %s", act_on, source_items)
if not source_items:
print 'ERROR. No {0} found in Elasticsearch.'.format(act_on)
sys.exit(1)
else:
working_list = source_items
# No filters has been added and not all items selected,
# this means index or snapshot parameter is used alone.
if not all_items_selected and not self.ifilter.filter_list:
working_list = []
else:
# Otherwise safely apply filtering
working_list = self.ifilter.apply(working_list, act_on=act_on)
# Include explict items into resulting working list.
if explicit_items:
working_list.extend((i for i in explicit_items if i in source_items))
if not working_list:
logger.error('No %s matched provided args: %s', act_on, opts)
print "ERROR. No {} found in Elasticsearch.".format(act_on)
sys.exit(99)
# Make a sorted, unique list of indices/snapshots
return sorted(list(set(working_list)))
def snapshots(self, on_nofilters_showall=False):
"""
Get a list of snapshots to act on from the provided arguments.
"""
if not any((self.opts.all_snapshots, self.opts.snapshot, self.ifilter.filter_list)):
if on_nofilters_showall:
self.opts.all_snapshots = True
else:
print 'Error: At least one snapshot filter parameter must be provided!'
sys.exit(1)
if not self.opts.repository:
print 'Missing required parameter: repository.'
sys.exit(1)
# Get a master-list of snapshots
snapshots = get_snapshots(self.client, repository=self.opts.repository)
return self._apply_filters(snapshots, act_on='snapshots')
def indices(self, on_nofilters_showall=False):
"""
Get a list of indices to act on from the provided arguments.
"""
# Check if we have selection to operate
if not any((self.opts.all_indices, self.opts.index, self.ifilter.filter_list)):
if on_nofilters_showall:
self.opts.all_indices = True
else:
print 'Error: At least one index filter parameter must be provided!'
sys.exit(1)
# Get a master-list of indices
indices = get_indices(self.client)
return self._apply_filters(indices, act_on='indices')
def fetch(self, act_on, on_nofilters_showall=False):
if act_on not in ['indices', 'snapshots']:
raise ValueError('invalid argument: %s', act_on)
if act_on == 'indices':
return self.indices(on_nofilters_showall=on_nofilters_showall) # noqa
else:
return self.snapshots(on_nofilters_showall=on_nofilters_showall) # noqa
```
#### File: fireeye/actions/submit_malware.py
```python
from lib.actions import BaseAction
import json
import os
class SubmitMalwareAction(BaseAction):
def run(self, **kwargs):
payload = json.dumps(kwargs)
file_basename = os.path.basename(kwargs['file_name'])
files = [
('file', (file_basename, open(kwargs['file_name'], 'rb'),
kwargs['file_type'])),
('file', ('analysis.json', payload, 'application/json'))
]
response = self._api_post('submission', files=files)
return response
```
#### File: github/actions/add_status.py
```python
from github import GithubObject
from lib.base import BaseGithubAction
__all__ = [
'AddCommitStatusAction'
]
class AddCommitStatusAction(BaseGithubAction):
def run(self, user, repo, sha, state, target_url=None, description=None):
target_url = target_url or GithubObject.NotSet
description = description or GithubObject.NotSet
user = self._client.get_user(user)
repo = user.get_repo(repo)
commit = repo.get_commit(sha)
commit.create_status(state=state, target_url=target_url,
description=description)
return True
```
#### File: hue/actions/alert.py
```python
from lib import action
class ToggleAction(action.BaseAction):
def run(self, light_id, long_alert=False):
light = self.hue.lights.get(light_id)
if long_alert:
light.alert('lselect')
else:
light.alert()
```
#### File: actions/lib/action.py
```python
from hue import Hue
from st2actions.runners.pythonrunner import Action
class BaseAction(Action):
def __init__(self, config):
super(BaseAction, self).__init__(config)
self.hue = self._get_client()
def _get_client(self):
hue = Hue()
hue.station_ip = self.config['station_ip']
hue.get_state()
return hue
```
#### File: hue/actions/list_bulbs.py
```python
from lib import action
class ListBulbsAction(action.BaseAction):
def run(self):
bulbs = {}
lights = self.hue.state['lights']
for light_id, light in lights.iteritems():
bulbs["l%s" % light_id] = light['name']
return bulbs
```
#### File: hue/actions/rgb.py
```python
from lib import action
class RGBAction(action.BaseAction):
def run(self, light_id, red, green, blue, transition_time):
light = self.hue.lights.get(light_id)
light.rgb(red, green, blue, transition_time)
```
#### File: irc/sensors/irc_sensor.py
```python
import time
import random
import eventlet
from irc.bot import SingleServerIRCBot
from st2reactor.sensor.base import Sensor
eventlet.monkey_patch(
os=True,
select=True,
socket=True,
thread=True,
time=True)
class StackStormSensorBot(SingleServerIRCBot):
def __init__(self, server_host, server_port, nickname, channels, handlers, logger):
server_list = [(server_host, server_port)]
super(StackStormSensorBot, self).__init__(server_list=server_list, nickname=nickname,
realname=nickname)
self._channels = channels
self._handlers = handlers
self._logger = logger
def on_welcome(self, connection, event):
self._logger.debug('Connected to the server')
for channel in self._channels:
self._logger.debug('Joining #%s...' % (channel))
connection.join(channel)
def on_nicknameinuse(self, connection, event):
new_nickname = '%s-%s' % (connection.get_nickname(), random.randint(1, 1000))
connection.nick(new_nickname)
def on_pubmsg(self, connection, event):
event.timestamp = int(time.time())
handler = self._handlers.get('pubmsg', lambda connection, event: connection)
handler(connection=connection, event=event)
def on_privmsg(self, connection, event):
event.timestamp = int(time.time())
handler = self._handlers.get('privmsg', lambda connection, event: connection)
handler(connection=connection, event=event)
def on_join(self, connection, event):
event.timestamp = int(time.time())
handler = self._handlers.get('join', lambda connection, event: connection)
handler(connection=connection, event=event)
def on_part(self, connection, event):
event.timestamp = int(time.time())
handler = self._handlers.get('part', lambda connection, event: connection)
handler(connection=connection, event=event)
class IRCSensor(Sensor):
def __init__(self, sensor_service, config=None):
super(IRCSensor, self).__init__(sensor_service=sensor_service,
config=config)
self._logger = self._sensor_service.get_logger(__name__)
split = self._config['server'].split(':')
self._server_host = split[0]
self._server_port = int(split[1])
self._nickname = self._config['nickname']
self._channels = self._config['channels']
def setup(self):
handlers = {
'pubmsg': self._handle_pubmsg,
'privmsg': self._handle_privmsg,
'join': self._handle_join,
'part': self._handle_part
}
self._bot = StackStormSensorBot(server_host=self._server_host,
server_port=self._server_port,
nickname=self._nickname, channels=self._channels,
handlers=handlers,
logger=self._logger)
def run(self):
self._bot.start() # pylint: disable=no-member
def cleanup(self):
self._bot.disconnect(msg='Disconnecting') # pylint: disable=no-member
def add_trigger(self, trigger):
pass
def update_trigger(self, trigger):
pass
def remove_trigger(self, trigger):
pass
def _handle_pubmsg(self, connection, event):
trigger = 'irc.pubmsg'
payload = {
'source': {
'nick': event.source.nick,
'host': event.source.host
},
'channel': event.target,
'timestamp': event.timestamp,
'message': event.arguments[0]
}
self._sensor_service.dispatch(trigger=trigger, payload=payload)
def _handle_privmsg(self, connection, event):
trigger = 'irc.privmsg'
payload = {
'source': {
'nick': event.source.nick,
'host': event.source.host
},
'timestamp': event.timestamp,
'message': event.arguments[0]
}
self._sensor_service.dispatch(trigger=trigger, payload=payload)
def _handle_join(self, connection, event):
trigger = 'irc.join'
payload = {
'source': {
'nick': event.source.nick,
'host': event.source.host
},
'timestamp': event.timestamp,
'channel': event.target
}
self._sensor_service.dispatch(trigger=trigger, payload=payload)
def _handle_part(self, connection, event):
trigger = 'irc.part'
payload = {
'source': {
'nick': event.source.nick,
'host': event.source.host
},
'timestamp': event.timestamp,
'channel': event.target
}
self._sensor_service.dispatch(trigger=trigger, payload=payload)
```
#### File: actions/lib/base.py
```python
from jira import JIRA
# from st2actions.runners.pythonrunner import Action
__all__ = [
'BaseJiraAction'
]
class Action(object):
def __init__(self, config):
self.config = config
class BaseJiraAction(Action):
def __init__(self, config):
super(BaseJiraAction, self).__init__(config=config)
self._client = self._get_client()
def _get_client(self):
config = self.config
options = {'server': config['url']}
rsa_cert_file = config['rsa_cert_file']
rsa_key_content = self._get_file_content(file_path=rsa_cert_file)
oauth_creds = {
'access_token': config['oauth_token'],
'access_token_secret': config['oauth_secret'],
'consumer_key': config['consumer_key'],
'key_cert': rsa_key_content
}
client = JIRA(options=options, oauth=oauth_creds)
return client
def _get_file_content(self, file_path):
with open(file_path, 'r') as fp:
content = fp.read()
return content
```
#### File: jmx/actions/invoke_method.py
```python
import os
import re
from st2common.util.shell import run_command
from st2actions.runners.pythonrunner import Action
__all__ = [
'JAR_PATH'
]
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
JAR_PATH = os.path.join(CURRENT_DIR, '../extern/cmdline-jmxclient/cmdline-jmxclient-0.10.3.jar')
JAR_PATH = os.path.abspath(JAR_PATH)
class InvokeMBeanMethodAction(Action):
def run(self, hostname, port, bean_name, command, arguments=None,
username=None, password=<PASSWORD>):
args = self._get_args(hostname=hostname, port=port,
bean_name=bean_name, command=command,
arguments=arguments, username=username,
password=password)
command = ' '.join(args)
self.logger.debug('Running command: "%s"' % (command))
exit_code, stdout, stderr = run_command(cmd=args)
if exit_code != 0:
msg = 'Failed to invoke command: %s' % (stderr)
raise Exception(msg)
if re.match('.*Operation .*? not found.*', stderr):
msg = 'Failed to invoke command: %s' % (stderr)
raise Exception(msg)
if 'Passed param count does not match signature count' in stderr:
msg = 'Failed to invoke command: %s' % (stderr)
raise Exception(msg)
self.logger.debug('Command successfully finished. Output: %s' % (stdout))
return True
def _get_args(self, hostname, port, bean_name, command, arguments=None,
username=None, password=<PASSWORD>):
credentials = []
if username:
credentials.append(username)
if password:
credentials.append(password)
if credentials:
credentials = ':'.join(credentials)
else:
credentials = '-'
url = '%s:%s' % (hostname, port)
if arguments:
arguments = ','.join(arguments)
command = '%s=%s' % (command, arguments)
else:
command = command
args = [
'java',
'-jar',
JAR_PATH,
credentials,
url,
bean_name,
command
]
return args
```
#### File: libcloud/actions/list_vms.py
```python
from lib.actions import BaseAction
__all__ = [
'ListVMsAction'
]
NODE_ATTRIBUTES = [
'id',
'name',
'state',
'public_ips',
'private_ips'
]
class ListVMsAction(BaseAction):
api_type = 'compute'
def run(self, credentials):
driver = self._get_driver_for_credentials(credentials=credentials)
vms = driver.list_nodes()
result = []
for vm in vms:
values = vm.__dict__
item = dict([(k, v) for k, v in values.items()
if k in NODE_ATTRIBUTES])
result.append(item)
return result
```
#### File: mmonit/actions/action_host.py
```python
from lib.mmonit import MmonitBaseAction
class MmonitActionHost(MmonitBaseAction):
def run(self, host_id, action, service):
self.login()
data = {"service": service, "id": host_id, "action": action}
self.session.post("{}/admin/hosts/action".format(self.url), data=data)
self.logout()
return True
```
#### File: mmonit/actions/get_uptime_host.py
```python
from lib.mmonit import MmonitBaseAction
class MmonitGetUptimeHost(MmonitBaseAction):
def run(self, host_id, uptime_range=0, datefrom=0, dateto=0):
self.login()
if datefrom != 0 and uptime_range != 12:
raise Exception("If datefrom is set, range should be 12")
data = {"id": host_id, "range": uptime_range, "datefrom": datefrom, "dateto": dateto}
req = self.session.post("{}/reports/uptime/get".format(self.url), data=data)
try:
return req.json()
except Exception:
raise
finally:
self.logout()
```
#### File: mmonit/actions/test_connection_to_host.py
```python
from lib.mmonit import MmonitBaseAction
class MmonitTestConnectionHost(MmonitBaseAction):
def run(self, ipaddr, ssl, port, monituser, monitpassword):
self.login()
data = {"ipaddr": ipaddr, "port": port, "ssl": ssl, "monituser": monituser,
"monitpassword": <PASSWORD>}
req = self.session.post("{}/admin/hosts/test".format(self.url), data=data)
try:
return req.json()
except Exception:
raise
finally:
self.logout()
```
#### File: actions/lib/actions.py
```python
try:
import requests
except ImportError:
message = ('Missing "requests", please install it using pip:\n'
'pip install requests')
raise ImportError(message)
try:
import json
except ImportError:
message = ('Missing "json", please install it using pip:\n'
'pip install requests')
raise ImportError(message)
from st2actions.runners.pythonrunner import Action
__all__ = [
'OctopusDeployAction',
]
class OctopusDeployAction(Action):
def __init__(self, config):
super(OctopusDeployAction, self).__init__(config)
self.client = self._init_client()
def _init_client(self):
api_key = self.config['api_key']
host = self.config['host']
port = self.config['port']
return OctopusDeployClient(api_key=api_key, host=host, port=port)
def _build_uri(self):
# big assumption but it'll cover 99% case,
# as octopus runs https by default
start = "http://" if self.client.port is 80 else "https://"
return start + self.client.host + ":" + str(self.client.port) + "/api/"
def make_post_request(self, action, payload):
response = requests.post(self._build_uri() + action,
data=json.dumps(payload), verify=False,
headers=self.client.headers)
return response.json()
def make_get_request(self, action):
response = requests.get(self._build_uri() + action,
verify=False,
headers=self.client.headers)
return response.json()
class OctopusDeployClient(object):
def __init__(self, api_key, host, port):
self.api_key = api_key
self.host = host
self.port = port
self.headers = {'X-Octopus-ApiKey': self.api_key,
'Content-type': 'application/json',
'Accept': 'text/plain'}
```
#### File: openhab/actions/set_state.py
```python
from lib.action import BaseAction
class SetStateAction(BaseAction):
def run(self, item, state):
self._put(item, state)
return {'status': 'ok'}
```
#### File: actions/lib/openstack.py
```python
import os
import re
import yaml
import types
from keystoneclient.auth.identity import v2
from keystoneclient import session
from keystoneclient.v2_0.client import Client as keyclient
class OpenStack(object):
def __init__(self, conf):
config_file = os.path.join(os.path.dirname(__file__), conf)
try:
fh = open(config_file)
config = yaml.load(fh)
fh.close()
except Exception as e:
print("Error reading config file %s: %s" % (conf, e))
self.username = config['username']
self.password = config['password']
self.tenant = config['tenant']
self.auth_url = config['auth_url']
self.endpoints = config['endpoints']
self.act_name = ""
def getSession(self):
self.auth = v2.Password(auth_url=self.auth_url,
username=self.username,
password=<PASSWORD>,
tenant_name=self.tenant)
return session.Session(auth=self.auth)
def getToken(self):
session = self.getSession()
return session.get_token()
def getEndpoint(self, service):
token = self.getToken()
client = keyclient(auth_url=self.auth_url, token=token)
print(client.services.list())
def parseAction(self, instance, parts):
args, parts = self.parseArgs(parts)
foo = None
try:
self.act_name = parts[0]
foo = getattr(instance, self.act_name)
except AttributeError:
print("That look like an incorrect tiddly bit.")
else:
parts.pop(0)
for p in parts:
try:
foo = getattr(foo, p)
except AttributeError:
print("That tiddly bit be wrong")
return foo, args
def parseOutput(self, output):
results = {self.act_name: []}
if hasattr(
output,
'__getitem__') or isinstance(
output,
types.GeneratorType):
for result in output:
if hasattr(result, 'to_dict'):
result = result.to_dict()
results[self.act_name].append(result)
else:
if hasattr(output, 'to_dict'):
results[self.act_name] = output.to_dict()
else:
results[self.act_name] = output
return results
def run(self, instance, parts):
action, args = self.parseAction(instance, parts)
return self.parseOutput(action(**args))
def parseArgs(self, arg):
arg.pop(0)
args = {}
parts = []
for a in arg:
if re.search('=', a):
argsplit = a.split('=')
args[argsplit[0]] = argsplit[1]
else:
parts.append(a)
return args, parts
```
#### File: packer/actions/validate.py
```python
from lib.actions import BaseAction
class ValidateAction(BaseAction):
def run(self, packerfile, cwd=None, exclude=None, only=None, variables=None,
variables_file=None):
if cwd:
self.set_dir(cwd)
p = self.packer(packerfile)
return p.validate(syntax_only=False)
```
#### File: puppet/actions/cert_clean.py
```python
from lib.python_actions import PuppetBasePythonAction
__all__ = [
'PuppetCertCleanAction'
]
class PuppetCertCleanAction(PuppetBasePythonAction):
def run(self, environment, host):
success = self.client.cert_clean(environment=environment, host=host)
return success
```
#### File: puppet/actions/cert_revoke.py
```python
from lib.python_actions import PuppetBasePythonAction
__all__ = [
'PuppetCertRevokeAction'
]
class PuppetCertRevokeAction(PuppetBasePythonAction):
def run(self, environment, host):
success = self.client.cert_revoke(environment=environment, host=host)
return success
```
#### File: actions/lib/python_actions.py
```python
from st2actions.runners.pythonrunner import Action
from lib.puppet_client import PuppetHTTPAPIClient
class PuppetBasePythonAction(Action):
def __init__(self, config):
super(PuppetBasePythonAction, self).__init__(config=config)
self.client = self._get_client()
def _get_client(self):
master_config = self.config['master']
auth_config = self.config['auth']
client = PuppetHTTPAPIClient(master_hostname=master_config['hostname'],
master_port=master_config['port'],
client_cert_path=auth_config['client_cert_path'],
client_cert_key_path=auth_config['client_cert_key_path'],
ca_cert_path=auth_config['ca_cert_path'])
return client
```
#### File: servicenow/actions/insert.py
```python
from lib.actions import BaseAction
class InsertAction(BaseAction):
def run(self, table, payload):
self.client.table = table # pylint: disable=no-member
response = self.client.insert(payload) # pylint: disable=no-member
return response
```
#### File: st2/actions/rules_list.py
```python
from lib.action import St2BaseAction
from lib.formatters import format_client_list_result
__all__ = [
'St2RulesListAction'
]
EXCLUDE_ATTRIBUTES = [
'trigger',
'criteria',
'action'
]
def format_result(result):
return format_client_list_result(result=result, exclude_attributes=EXCLUDE_ATTRIBUTES)
class St2RulesListAction(St2BaseAction):
def run(self, pack=None, limit=10):
kwargs = {}
kwargs['limit'] = limit
if pack:
kwargs['pack'] = pack
result = self._run_client_method(method=self.client.rules.get_all,
method_kwargs=kwargs,
format_func=format_result)
return result
```
#### File: trello/actions/find_card_by_name.py
```python
from lib import action
class FindCardByNameAction(action.BaseAction):
def run(self, name, board_id, list_id, api_key=None, token=None):
if api_key:
self._set_creds(api_key=api_key, token=token)
cards = []
board = self._client().get_board(board_id)
lst = board.get_list(list_id)
for card in lst.list_cards():
if card.name == name and not card.closed:
cards.append(card.id)
if len(cards) == 0:
return False
else:
return cards
```
#### File: trello/actions/move_card.py
```python
from lib import action
class MoveCardAction(action.BaseAction):
def run(self, card_id, target_list_id, api_key=None, token=None):
if api_key:
self._set_creds(api_key=api_key, token=token)
card = self._client().get_card(card_id)
card.change_list(list_id=target_list_id)
return card
``` |
{
"source": "jonidakolgjini/password_manager",
"score": 4
} |
#### File: jonidakolgjini/password_manager/main.py
```python
from tkinter import *
from tkinter import messagebox
from random import randint, choice, shuffle
import pyperclip
import json
# ---------------------------- SEARCH JSON FILE ------------------------------- #
def find_password():
website = website_entry.get()
try:
with open("password_data.json", "r") as data:
# Read old data
json_data = json.load(data)
except FileNotFoundError:
messagebox.showinfo(title="Error", message="No Data File Found")
else:
if website in json_data:
email = json_data[website]["email"]
password = json_data[website]["password"]
messagebox.showinfo(title=website, message=f"Email: {email}\nPassword: {password}")
else:
messagebox.showinfo(title="Error", message="No data for this file exists")
# ---------------------------- PASSWORD GENERATOR ------------------------------- #
def generate_rand_password():
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
symbols = ['!', '#', '$', '%', '&', '(', ')', '*', '+']
password_list = []
[password_list.append(choice(letters)) for letter in range(randint(8, 10))]
[password_list.append(choice(symbols)) for symbol in range(randint(2, 4))]
[password_list.append(choice(numbers)) for num in range(randint(2, 4))]
shuffle(password_list)
password = "".join(password_list)
password_entry.insert(0, password)
pyperclip.copy(password)
# ---------------------------- SAVE PASSWORD ------------------------------- #
def save():
website = website_entry.get()
email = emailuser_entry.get()
password = password_entry.get()
new_data = {
website: {
"email": email,
"password": password
}
}
if len(website) == 0 or len(password) == 0:
messagebox.showinfo(title="Oops", message="Please don't leave any fields empty!")
else:
try:
with open("password_data.json", "r") as data:
# Read old data
json_data = json.load(data)
except FileNotFoundError:
with open("password_data.json", "w") as data:
json.dump(new_data, data, indent=4)
else:
# Updating old data with new data
json_data.update(new_data)
with open("password_data.json", "w") as data:
# saving updated data
json.dump(json_data, data, indent=4)
finally:
website_entry.delete(0, END)
password_entry.delete(0, END)
# ---------------------------- UI SETUP ------------------------------- #
window = Tk()
window.title("Password Manager")
window.config(padx=50, pady=50)
canvas = Canvas(width=200, height=200, highlightthickness=0)
logo_img = PhotoImage(file="logo.png")
canvas.create_image(100, 100, image=logo_img)
canvas.grid(column=1, row=0)
# Labels
website_label = Label(text="Website:")
website_label.grid(column=0, row=1)
emailuser_label = Label(text="Email/ Username:")
emailuser_label.grid(column=0, row=2)
password_label = Label(text="Password:")
password_label.grid(column=0, row=3)
# Entries
website_entry = Entry(width=21)
website_entry.focus()
website_entry.grid(column=1, row=1)
emailuser_entry = Entry(width=38)
emailuser_entry.insert(0, "<EMAIL>")
emailuser_entry.grid(column=1, row=2, columnspan=2)
password_entry = Entry(width=21)
password_entry.grid(column=1, row=3)
# Buttons
generate_password = Button(text="Generate Password", command=generate_rand_password)
generate_password.grid(column=2, row=3)
add_button = Button(text="Add", width=36, command=save)
add_button.grid(column=1, row=4, columnspan=2)
search_button = Button(text="Search", command=find_password, padx=38)
search_button.grid(column=2, row=1, columnspan=1)
window.mainloop()
``` |
{
"source": "joniemi/pytest-plt",
"score": 3
} |
#### File: pytest_plt/tests/test_plt.py
```python
import numpy as np
def test_rectification(plt):
"""The test shown in the documentation.
Included here to be extra sure that the example works when copy-pasted
into a user's tests, and to easily generate the plot that we display
in documentation.
"""
values = list(range(-10, 11))
rectified = [v if v > 0 else 0 for v in values]
assert all(v >= 0 for v in rectified)
plt.plot(values, label="Original")
plt.plot(rectified, label="Rectified")
plt.legend()
# Use png to render easier in docs
plt.saveas = "%s.png" % (plt.saveas[:-4],)
def test_mock_iter(plt):
fig = plt.figure()
for _ in enumerate(fig.axes):
assert False, "Mock object iterating forever"
plt.saveas = None
def test_simple_plot(plt):
plt.plot(np.linspace(0, 1, 20), np.linspace(0, 2, 20))
def test_bbox_extra_artists(plt):
plt.plot(np.linspace(0, 1, 20), np.linspace(0, 2, 20), label="line")
legend = plt.legend(loc="upper left", bbox_to_anchor=(1.0, 1.0))
plt.bbox_extra_artists = (legend,)
def test_saveas(plt):
assert plt.saveas.endswith("saveas.pdf")
plt.saveas = None
def test_saveas_pickle(plt):
plt.subplots(2, 3) # The pickled figure will contain six axes.
plt.saveas = "%s.pickle" % (plt.saveas[:-4],)
``` |
{
"source": "jonieson/pythonDemo",
"score": 4
} |
#### File: pythonDemo/BaseLogic/functionDemo.py
```python
def printParama(str):
print str
return ;
#调用函数
printParama('i love code')
#类型属于对象,变量是没有类型的
a = [1,2,3,4] #[1,2,3,4]是list类型
a = 'hello jonieson' #hello jonieson'是string类型
#a是变量不属于类型
print a
#不可变参数传递
def changeInt(num):
num = 10
numTwo = 2
changeInt(numTwo)
print numTwo
#可变参数传递
def changeArr(arr):
arr.append(5)
print '函数内打印:%s' %arr
return
list = [1,2,3,4]
changeArr(list)
print '函数外打印:%s'% list
``` |
{
"source": "jonigirl/Badb",
"score": 3
} |
#### File: Badb/cogs/dataIO.py
```python
import json
from discord.ext import commands
from discord.ext import vbu
class DataIO(vbu.Cog):
def __init__(self, bot):
self.bot = bot
def fileIO(filename, IO, data=None):
if IO == "save" and data is None:
with open(filename, encoding="utf-8", mode="w") as f:
f.write(
json.dumps(data, indent=4, sort_keys=True, separators=(",", " : "))
)
elif IO == "load" and data is None:
with open(filename, encoding="utf-8", mode="r") as f:
return json.loads(f.read())
elif IO == "check" and data is None:
try:
with open(filename, encoding="utf-8", mode="r") as f:
return True
except Exception as e:
logging.exception()
return False
else:
raise ("Invalid fileIO call")
def get_value(filename, key):
with open(filename, encoding="utf-8", mode="r") as f:
data = json.loads(f.read())
return data[key]
def set_value(filename, key, value):
data = fileIO(filename, "load")
data[key] = value
fileIO(filename, "save", data)
return True
def setup(bot: vbu.Bot):
x = DataIO(bot)
bot.add_cog(x)
```
#### File: Badb/cogs/mod.py
```python
import asyncio
import discord
import json
import time
from discord.ext import commands
from discord.ext import vbu
class Moderation(vbu.Cog):
def __init__(self, bot):
self.bot = bot
@vbu.command(usage="[#channel/id]", name="lock", description="Locks a channel")
@commands.has_permissions(manage_messages=True)
async def lock(self, ctx, channel: discord.TextChannel = None):
"""Locks a text channel.
.. Note::
Must have manage messages permission
:param ctx: The invocation context.
:param msg: The message the bot will repeat.
"""
channel = channel or ctx.channel
overwrite = channel.overwrites_for(ctx.guild.default_role)
overwrite.send_messages = False
await channel.set_permissions(ctx.guild.default_role, overwrite=overwrite)
await ctx.send(f"{channel.mention} locked!")
@vbu.command(usage="[#channel/id]", name="unlock", description="Unlocks a channel")
@commands.has_permissions(manage_messages=True)
async def unlock(self, ctx, channel: discord.TextChannel = None):
"""Unlocks a text channel.
.. Note::
Must have manage messages permission
:param ctx: The invocation context.
:param msg: The message the bot will repeat.
"""
channel = channel or ctx.channel
overwrite = channel.overwrites_for(ctx.guild.default_role)
overwrite.send_messages = True
await channel.set_permissions(ctx.guild.default_role, overwrite=overwrite)
await ctx.send(f"{channel.mention} unlocked!")
@vbu.command(
name="kick",
usage="<member> [reason]",
description="Kicks a user from the server",
)
@commands.has_permissions(kick_members=True)
async def kick(self, ctx, member: discord.Member, *, reason="No reason provided"):
"""Kicks a user from the server.
.. Note::
Must have kick members permission
:param ctx: The invocation context.
:param msg: The message the bot will repeat.
"""
if member.id == ctx.author.id:
await ctx.send("You cannot kick yourself!")
return
await member.kick(reason=reason)
await ctx.message.delete()
kick = discord.Embed(
description=f"**A member has been kicked.**\n\n"
f"Moderator: {ctx.author.mention}\n"
f"Member: {member.mention}",
colour=discord.Colour.blue(),
)
kick.add_field(name="Reason", value=reason, inline=False)
await ctx.send(embed=kick)
@vbu.command(
usage="<member> [reason]", name="ban", description="Bans a user from the server"
)
@commands.has_permissions(ban_members=True)
async def ban(self, ctx, member: discord.Member, *, reason="No reason provided"):
"""Bans a member from the server.
.. Note::
Must have ban members permission
:param ctx: The invocation context.
:param msg: The message the bot will repeat.
"""
if member.id == ctx.author.id:
await ctx.send("You cannot ban yourself!")
return
await member.ban(reason=reason, delete_message_days=0)
ban = discord.Embed(
description=f"**A member has been banned.**\n\n"
f"Moderator: {ctx.author.mention}\n"
f"Member: {member.mention}",
colour=discord.Colour.blue(),
)
ban.add_field(name="Reason", value=reason, inline=False)
await ctx.send(embed=ban)
@vbu.command(
usage="<member> [reason]",
name="softban",
description="Bans a user from the server and deletes all of his messages of the last 7 days",
)
@commands.has_permissions(ban_members=True)
async def softban(
self, ctx, member: discord.Member, *, reason="No reason provided"
):
"""Bans a member from the server and deletes
all of his messages of the last 7 days.
.. Note::
Must have ban members permission
:param ctx: The invocation context.
:param msg: The message the bot will repeat.
"""
if member.id == ctx.author.id:
await ctx.send("You cannot ban yourself!")
return
await member.ban(reason=reason, delete_message_days=7)
softban = discord.Embed(
description=f"**A member has been banned.**\n\n"
f"Moderator: {ctx.author.mention}\n"
f"Member: {member.mention}",
colour=discord.Colour.blue(),
)
softban.add_field(name="Reason", value=reason, inline=False)
await ctx.send(embed=softban)
@vbu.command(
usage="<id>", name="unban", description="Unbans a user from the server"
)
@commands.has_permissions(ban_members=True)
async def unban(self, ctx, *, user_id: int):
"""Unbans a member from the server.
.. Note::
Must have ban members permission
:param ctx: The invocation context.
:param msg: The message the bot will repeat.
"""
user = await self.client.fetch_user(user_id)
await ctx.guild.unban(user)
unban = discord.Embed(
description=f"**A user has been unbanned.**\n\n"
f"Moderator: {ctx.author.mention}\n"
f"Member: {user.mention}",
colour=discord.Colour.blue(),
)
await ctx.send(embed=unban)
@vbu.command(
usage="amount", name="clear", description="Deletes a certain number of messages"
)
@commands.has_permissions(manage_messages=True)
async def clear(self, ctx, amount=0):
"""Clears channel history of x messages.
.. Note::
Must have manage messages permission
:param ctx: The invocation context.
:param msg: The message the bot will repeat.
"""
await ctx.channel.purge(limit=amount + 1)
await ctx.send(f"I have cleared **{amount}** messages.", delete_after=3)
@vbu.command(
usage="<member> [reason]", name="mute", description="Mutes a user on the server"
)
@commands.has_permissions(manage_messages=True)
async def mute(self, ctx, member: discord.Member, *, reason="No reason provided"):
"""Mutes a member and gived the the Muted role.
.. Note::
Must have manage messages permission
:param ctx: The invocation context.
:param msg: The message the bot will repeat.
"""
guild = ctx.guild
mutedRole = discord.utils.get(guild.roles, name="Muted")
if not mutedRole:
mutedRole = await guild.create_role(name="Muted")
for channel in guild.channels:
await channel.set_permissions(
mutedRole,
speak=False,
send_messages=False,
read_message_history=True,
read_messages=False,
)
mute = discord.Embed(
description=f"**A member has been muted.**\n\n"
f"Moderator: {ctx.author.mention}\n"
f"Member: {member.mention}",
colour=discord.Colour.blue(),
)
mute.add_field(name="Reason", value=reason)
await member.add_roles(mutedRole, reason=reason)
await ctx.send(embed=mute)
@vbu.command(usage="<member>", name="Unmutes a user on the server")
@commands.has_permissions(manage_messages=True)
async def unmute(self, ctx, member: discord.Member):
"""Unmutes a member that has been prevously muted.
.. Note::
Must have manage messages permission
:param ctx: The invocation context.
:param msg: The message the bot will repeat.
"""
mutedRole = discord.utils.get(ctx.guild.roles, name="Muted")
await member.remove_roles(mutedRole)
unmute = discord.Embed(
description=f"**A member has been unmuted.**\n\n"
f"Moderator: {ctx.author.mention}\n"
f"Member: {member.mention}",
colour=discord.Colour.blue(),
)
await ctx.send(embed=unmute)
@vbu.command(
name="nuke", description="Clones a text channel and then deletes the old one"
)
@commands.has_permissions(administrator=True)
@commands.cooldown(1, 60, commands.BucketType.guild)
async def nuke(self, ctx):
"""Clones a test channel the nukes the old one.
.. Note::
Must be administrator
:param ctx: The invocation context.
:param msg: The message the bot will repeat.
"""
channelthings = [ctx.channel.category, ctx.channel.position]
await ctx.channel.clone()
await ctx.channel.delete()
nukedchannel = channelthings[0].text_channels[-1]
await nukedchannel.edit(position=channelthings[1])
await nukedchannel.send(f"Channel was nuked by {ctx.author.mention}")
@vbu.command(
usage="<add/remove> <member> <role>",
name="role",
description="Adds or removes a role from a user",
)
@commands.has_permissions(manage_roles=True)
async def role(self, ctx, addORremove, member: discord.Member, role: discord.Role):
"""Adds or Removes roles to members.
.. Note::
Must have manage roles permission
:param ctx: The invocation context.
:param msg: The message the bot will repeat.
"""
addORremove = addORremove.lower()
if addORremove == "add":
if role == ctx.author.top_role:
return await ctx.send(
"That role has the same position as your top role!"
)
if role in member.roles:
return await ctx.send("The member already has this role assigned!")
if role.position >= ctx.guild.me.top_role.position:
return await ctx.send(
"This role is higher than my role, move it to the top!"
)
await member.add_roles(role)
await ctx.send(f"I have added {member.mention} the role {role.mention}")
if addORremove == "remove":
if role == ctx.author.top_role:
return await ctx.send(
"That role has the same position as your top role!"
)
if role not in member.roles:
return await ctx.send("The member does not have this role!")
if role.position >= ctx.guild.me.top_role.position:
return await ctx.send(
"This role is higher than my role, move it to the top!"
)
await member.remove_roles(role)
await ctx.send(f"I have removed {member.mention} the role {role.mention}")
@vbu.command(usage="<seconds>")
@commands.has_permissions(manage_messages=True)
async def slowmode(self, ctx, seconds: int):
"""Invokes slowmode for the channel for the stated seconds.
.. Note::
Must have manage messages permission
:param ctx: The invocation context.
:param msg: The message the bot will repeat.
"""
await ctx.channel.edit(slowmode_delay=seconds)
await ctx.send(
f"Slowmode is now enabled in this channel with a chat delay of {seconds} seconds."
)
@vbu.command(name="say", hidden=True)
@commands.is_owner()
async def repeat_message(self, ctx, *, msg: str):
"""Repeats the message as the bot. The invoking message is deleted.
.. Note::
Only the bot owner can use this.
:param ctx: The invocation context.
:param msg: The message the bot will repeat.
"""
await ctx.message.delete()
await ctx.send(msg)
@vbu.command(name="spam", hidden=True)
@commands.has_permissions(manage_messages=True)
async def delete_spam_messages(self, ctx):
"""Deletes duplicate messages in the channel.
.. Note::
Messages are checked per author.
The original message will remain.
:param ctx: The invocation context.
"""
msgs = []
spam = []
async for msg in ctx.channel.history(limit=50):
c = str(msg.author) + msg.content
if c in msgs:
spam.append(msg)
else:
msgs.append(c)
spam.append(ctx.message)
await ctx.channel.delete_messages(spam)
if len(spam) > 1:
embed = quickembed.info(
"```Deleted {} spam messages```".format(len(spam)),
DiscordUser(ctx.author),
)
self.bot.log(embed=embed)
def setup(bot: vbu.Bot):
x = Moderation(bot)
bot.add_cog(x)
```
#### File: Badb/cogs/scambanner.py
```python
import re
import discord
from discord.ext import vbu
SCAM_REGEX = re.compile(
r"""
(gift|nitro|airdrop|@everyone|:\))
.+?
(
(https?://)(\S*?)
(
((?:d|cl)s?[li](?:sc|cs|zc|cz|s|c|sck)r?oc?r?c?(?:d|cl)?s?)
(\S*?)\.
(com|pw|org|app|info|net|gift|codes|click|club)
)
)
""",
re.MULTILINE | re.DOTALL | re.VERBOSE | re.IGNORECASE,
)
class ScamBanner(vbu.Cog):
@vbu.Cog.listener()
async def on_message(self, message: discord.Message):
# Ignore DMs
if message.guild is None:
return
if message.guild.id != 800996069525028944:
return
# Ignore people with roles
try:
assert isinstance(message.author, discord.Member)
except AssertionError:
return # They were already banned
if len(message.author.roles) > 1:
return
# See if we match
match = SCAM_REGEX.search(message.content)
if not match:
return
matched_domain = match.group(5).lower()
# Leave the legit links alone
valid_links = [
"discord.gift",
"discordapp.com",
"discord.com",
"discord.gg",
]
if matched_domain in valid_links:
return
# Ban the user
try:
await message.author.ban(reason=f"Suspected scam link ({matched_domain})")
except discord.HTTPException:
pass
def setup(bot: vbu.Bot):
x = ScamBanner(bot)
bot.add_cog(x)
``` |
{
"source": "joniknsk/Office365-REST-Python-Client",
"score": 2
} |
#### File: examples/sharepoint/file_operations.py
```python
import json
import os
from settings import settings
from office365.runtime.auth.authentication_context import AuthenticationContext
from office365.sharepoint.caml_query import CamlQuery
from office365.sharepoint.client_context import ClientContext
from office365.sharepoint.file import File
from office365.sharepoint.file_creation_information import FileCreationInformation
from office365.sharepoint.list_data_service import ListDataService
def read_folder_and_files_alt(context, list_title):
"""Read a folder example"""
list_obj = context.web.lists.get_by_title(list_title)
qry = CamlQuery.create_all_items_query()
items = list_obj.get_items(qry)
context.load(items)
context.execute_query()
for cur_item in items:
print("File name: {0}".format(cur_item.properties["Title"]))
def read_folder_and_files(context, list_title):
"""Read a folder example"""
list_obj = context.web.lists.get_by_title(list_title)
folder = list_obj.root_folder
context.load(folder)
context.execute_query()
print("List url: {0}".format(folder.properties["ServerRelativeUrl"]))
files = folder.files
context.load(files)
context.execute_query()
for cur_file in files:
print("File name: {0}".format(cur_file.properties["Name"]))
folders = context.web.folders
context.load(folders)
context.execute_query()
for folder in folders:
print("Folder name: {0}".format(folder.properties["Name"]))
def upload_file_alt(target_folder, name, content):
context = target_folder.context
info = FileCreationInformation()
info.content = content
info.url = name
info.overwrite = True
target_file = target_folder.files.add(info)
context.execute_query()
return target_file
def upload_file(context):
upload_into_library = True
path = "../tests/data/SharePoint User Guide.docx"
with open(path, 'rb') as content_file:
file_content = content_file.read()
if upload_into_library:
list_title = "Documents"
target_folder = context.web.lists.get_by_title(list_title).root_folder
file = upload_file_alt(target_folder, os.path.basename(path), file_content)
print("File url: {0}".format(file.properties["ServerRelativeUrl"]))
else:
target_url = "/Shared Documents/{0}".format(os.path.basename(path))
File.save_binary(context, target_url, file_content)
def download_file(context):
response = File.open_binary(context, "/Shared Documents/SharePoint User Guide.docx")
with open("./data/SharePoint User Guide.docx", "wb") as local_file:
local_file.write(response.content)
if __name__ == '__main__':
site_url = 'https://mediadev8.sharepoint.com/'
ctx_auth = AuthenticationContext(url=site_url)
if ctx_auth.acquire_token_for_user(username=settings['user_credentials']['username'],
password=settings['<PASSWORD>']['password']):
# if ctx_auth.acquire_token_for_app(client_id=settings['client_credentials']['client_id'],
# client_secret=settings['client_credentials']['client_secret']):
ctx = ClientContext(site_url, ctx_auth)
# read_folder_and_files(ctx, "Documents")
# read_folder_and_files_alt(ctx, "Documents")
# upload_file(ctx)
# download_file(ctx)
file = ctx.web.get_file_by_server_relative_url("/Shared Documents/SharePoint User Guide.docx")
ctx.load(file)
ctx.execute_query()
path = "../data/SharePoint User Guide.docx"
# with open(path, 'rb') as content_file:
# file_content = content_file.read()
# list_title = "Documents"
# target_list = ctx.web.lists.get_by_title(list_title)
# file = upload_file_alt(target_list.root_folder, os.path.basename(path), file_content)
# find out user id
user = ctx.web.site_users.get_by_email("<EMAIL>")
ctx.load(user)
ctx.execute_query()
user_id = user.properties['Id']
user_field_value = json.dumps([{'Key': user.properties['LoginName']}])
# set file metadata
list_item = file.listitem_allfields # get associated listItem
field_editor = list_item.parent_list.fields.get_by_internal_name_or_title("Modified By")
ctx.load(field_editor)
ctx.execute_query()
if field_editor.properties['ReadOnlyField']:
field_editor.set_property('ReadOnlyField', False)
field_editor.update()
ctx.execute_query()
list_item.set_property("EditorId", user_id) # update ModifiedBy field value
# list_item.set_property("ModifiedById", user_id) # update ModifiedBy field value
# list_item.set_property("Comment", 'some comment goes here212aaa..')
# field_values = [
# {"FieldName": 'Editor', "FieldValue": user_field_value},
# ]
# list_item.system_update(field_values, True)
list_svc = ListDataService(site_url, ctx_auth)
# field_values = {"Comment": "Some comment goes here"}
field_values = {"ModifiedById": 11}
# list_item = list_svc.get_list_item("Documents", 4)
# list_svc.load(list_item)
list_svc.update_list_item("Documents", 4, field_values)
list_svc.execute_query()
print("Ok")
else:
print(ctx_auth.get_last_error())
```
#### File: examples/sharepoint/listitems_operations_alt.py
```python
import json
from settings import settings
from office365.runtime.auth.authentication_context import AuthenticationContext
from office365.runtime.client_request import ClientRequest
from office365.runtime.utilities.http_method import HttpMethod
from office365.runtime.utilities.request_options import RequestOptions
from office365.sharepoint.client_context import ClientContext
def read_list_items(context, list_title, url):
"""Read list items example"""
request = ClientRequest(context)
options = RequestOptions("{0}/web/lists/getbyTitle('{1}')/items".format(url, list_title))
options.set_header('Accept', 'application/json; odata=nometadata')
print("Retrieving list items from List {0}".format(list_title))
response = request.execute_request_direct(options)
data = json.loads(response.content)
for item in data['value']:
print("Item title: {0}".format(item["Title"]))
def create_list_item(context, list_title, url):
"""Create list item example"""
request = ClientRequest(context)
options = RequestOptions("{0}/web/lists/getbyTitle('{1}')/items".format(url, list_title))
options.set_header('Accept', 'application/json; odata=nometadata') # JSON Light nometadata mode!
options.data = {'Title': 'New Task'}
options.method = HttpMethod.Post
print("Creating list item...")
response = request.execute_request_direct(options)
item = json.loads(response.content)
print("Task {0} has been successfully [created]".format(item['Title']))
return item
def update_list_item(context, list_title, item_id, url):
"""Update list item example"""
request = ClientRequest(context)
options = RequestOptions(
"{0}/web/lists/getbyTitle('{1}')/items({2})".format(url, list_title, item_id))
options.set_header('Accept', 'application/json; odata=nometadata') # JSON Light nometadata mode!
options.set_header('IF-MATCH', '*')
options.set_header('X-HTTP-Method', 'MERGE')
options.data = {'Title': 'New Task (updated)'}
options.method = HttpMethod.Post
print("Updating list item...")
request.execute_request_direct(options)
print("Task has been successfully [updated]")
def delete_list_item(context, list_title, item_id, url):
"""Delete list item example"""
request = ClientRequest(context)
options = RequestOptions(
"{0}/web/lists/getbyTitle('{1}')/items({2})".format(url, list_title, item_id))
options.set_header('Accept', 'application/json; odata=nometadata') # JSON Light nometadata mode!
options.set_header('IF-MATCH', '*')
options.set_header('X-HTTP-Method', 'DELETE')
options.data = {'Title': 'New Task (updated)'}
options.method = HttpMethod.Post
print("Deleting list item...")
request.execute_request_direct(options)
print("Task has been successfully [deleted]")
if __name__ == '__main__':
ctx_auth = AuthenticationContext(url=settings['url'])
if ctx_auth.acquire_token_for_user(username=settings['user_credentials']['username'],
password=settings['user_credentials']['password']):
target_list_title = "Tasks"
ctx = ClientContext(settings['url'], ctx_auth) # Initialize client context
read_list_items(ctx, target_list_title, settings['url'])
task_item = create_list_item(ctx, target_list_title, settings['url'])
update_list_item(ctx, target_list_title, task_item['Id'], settings['url'])
delete_list_item(ctx, target_list_title, task_item['Id'], settings['url'])
else:
print(ctx_auth.get_last_error())
```
#### File: runtime/odata/odata_encoder.py
```python
from json import JSONEncoder
from office365.runtime.client_object import ClientObject
from office365.runtime.client_value_object import ClientValueObject
from office365.runtime.odata.odata_metadata_level import ODataMetadataLevel
class ODataEncoder(JSONEncoder):
"""OData request payload serializer"""
def __init__(self, json_format, **kwargs):
super(ODataEncoder, self).__init__(**kwargs)
self._json_format = json_format
def default(self, payload):
if isinstance(payload, ClientObject):
return self.normalize_entity(payload)
elif isinstance(payload, ClientValueObject):
return self.normalize_property(payload)
else:
return payload
def normalize_property(self, value):
payload = dict((k, v) for k, v in value.__dict__.items() if v is not None)
if self._json_format.metadata == ODataMetadataLevel.Verbose:
payload["__metadata"] = {'type': value.type_name}
if value.tag_name:
payload = {value.tag_name: payload}
return payload
def normalize_entity(self, value):
"""Generates resource payload for OData endpoint"""
payload = dict((k, v) for k, v in value.properties.items()
if k in value.properties_metadata and value.properties_metadata[k]['readonly'] is False)
if self._json_format.metadata == ODataMetadataLevel.Verbose and "__metadata" not in payload.items():
payload["__metadata"] = {'type': value.entity_type_name}
else:
payload = dict((k, v) for k, v in payload.items() if k != "__metadata")
return payload
```
#### File: office365/sharepoint/field.py
```python
from office365.runtime.client_object import ClientObject
from office365.runtime.client_query import ClientQuery
class Field(ClientObject):
"""Represents a field in a SharePoint Web site"""
def update(self):
"""Update the field."""
qry = ClientQuery.update_entry_query(self)
self.context.add_query(qry)
def delete_object(self):
"""Deletes the field."""
qry = ClientQuery.delete_entry_query(self)
self.context.add_query(qry)
self.remove_from_parent_collection()
```
#### File: office365/sharepoint/list_data_service.py
```python
from office365.runtime.action_type import ActionType
from office365.runtime.client_query import ClientQuery
from office365.runtime.client_runtime_context import ClientRuntimeContext
from office365.runtime.odata.json_light_format import JsonLightFormat
from office365.runtime.odata.odata_metadata_level import ODataMetadataLevel
from office365.runtime.resource_path_service_operation import ResourcePathServiceOperation
from office365.sharepoint.listitem import ListItem
class ListDataService(ClientRuntimeContext):
"""SharePoint 2010 list data service"""
def __init__(self, base_url, auth_context):
if base_url.endswith("/"):
base_url = base_url[:len(base_url) - 1]
super(ListDataService, self).__init__(base_url + "/_vti_bin/listdata.svc/", auth_context)
self.json_format = JsonLightFormat(ODataMetadataLevel.Verbose)
def get_list_item(self, list_name, item_id):
return ListItem(self,
ResourcePathServiceOperation(self, None, list_name, [item_id]))
def delete_list_item(self, list_name, item_id):
resource_url = self.service_root_url + list_name + "(" + str(item_id) + ")"
qry = ClientQuery(resource_url, ActionType.DeleteEntity)
self.add_query(qry)
def update_list_item(self, list_name, item_id, field_values):
resource_url = self.service_root_url + list_name + "(" + str(item_id) + ")"
qry = ClientQuery(resource_url, ActionType.UpdateEntity, field_values)
self.add_query(qry)
```
#### File: Office365-REST-Python-Client/tests/test_onedrive_drive.py
```python
import json
import os
from unittest import TestCase
from settings import settings
from office365.graph_client import GraphClient
def get_token(auth_ctx):
client_id, client_secret = os.environ['Office365_Python_Sdk_ClientCredentials'].split(';')
token = auth_ctx.acquire_token_with_client_credentials(
"https://graph.microsoft.com",
client_id,
client_secret)
return token
class TestDrive(TestCase):
"""OneDrive specific test case base class"""
@classmethod
def setUpClass(cls):
ci_tenant_name = settings['tenant']
cls.client = GraphClient(ci_tenant_name, get_token)
def test1_get_drives(self):
drives = self.client.drives.top(2)
self.client.load(drives)
self.client.execute_query()
self.assertLessEqual(len(drives), 2)
for drive in drives:
self.assertIsNotNone(drive.web_url)
def test2_get_drives_alt(self):
resp = self.client.execute_request("/drives?$top=2")
drives = json.loads(resp.content.decode('utf-8'))['value']
self.assertLessEqual(len(drives), 2)
for drive in drives:
self.assertIsNotNone(drive['webUrl'])
def test3_get_first_drive(self):
drives = self.client.drives.top(1)
self.client.load(drives)
self.client.execute_query()
self.assertLessEqual(len(drives), 1)
target_drive_id = drives[0].id
target_drive = self.client.drives.get_by_id(target_drive_id)
self.client.load(target_drive)
self.client.execute_query()
self.assertEqual(target_drive.id, target_drive_id)
```
#### File: Office365-REST-Python-Client/tests/test_sharepoint_listItem.py
```python
from tests import random_seed
from tests.sharepoint_case import SPTestCase
from tests.test_utilities import ListExtensions
from office365.sharepoint.list_creation_information import ListCreationInformation
from office365.sharepoint.list_template_type import ListTemplateType
class TestListItem(SPTestCase):
@classmethod
def setUpClass(cls):
super(TestListItem, cls).setUpClass()
cls.target_list = ListExtensions.ensure_list(cls.context.web,
ListCreationInformation("Tasks",
None,
ListTemplateType.Tasks)
)
cls.target_item_properties = {
"Title": "Task %s" % random_seed,
"Id": None
}
@classmethod
def tearDownClass(cls):
cls.target_list.delete_object()
cls.context.execute_query()
def test_1_create_list_item(self):
item_properties = {'Title': self.target_item_properties["Title"], '__metadata': {'type': 'SP.Data.TasksListItem'}}
item = self.target_list.add_item(item_properties)
self.context.execute_query()
self.assertIsNotNone(item.properties["Title"])
self.target_item_properties["Id"] = item.properties["Id"]
def test_2_delete_list_item(self):
item = self.target_list.get_item_by_id(self.target_item_properties["Id"])
item.delete_object()
self.context.execute_query()
result = self.target_list.get_items().filter("Id eq {0}".format(self.target_item_properties["Id"]))
self.context.load(result)
self.context.execute_query()
self.assertEqual(0, len(result))
``` |
{
"source": "jonikula/pyosmo",
"score": 3
} |
#### File: pyosmo/examples/4_randomized_timing.py
```python
from osmo import Osmo
import random
import time
class PositiveCalculator:
@staticmethod
def guard_something():
return True
@staticmethod
def step_something():
print("1. inside step")
# Random wait can be added inside test step
wait_ms = random.randint(200, 1000)
print("{} sleep inside step".format(wait_ms))
time.sleep(wait_ms / 1000)
print("2. inside step")
@staticmethod
def after():
# Random wait can be added also between test steps
wait_ms = random.randint(200, 3000)
print('Waiting for: {}ms between steps'.format(wait_ms))
time.sleep(wait_ms / 1000)
print('')
osmo = Osmo(PositiveCalculator())
osmo.generate()
```
#### File: pyosmo/algorithm/random.py
```python
from base import osmoAlgorithm
class RandomAlgorithm(osmoAlgorithm):
def choose(self, history, choices):
return self.random.choice(choices)
```
#### File: pyosmo/algorithm/weighted.py
```python
from pyosmo.algorithm.base import osmoAlgorithm
class Choice(object):
def __init__(self, ending, count_in_history, weight):
self.ending = ending
self.count_in_history = count_in_history
self.weight = weight
@property
def compare_value(self):
return self.count_in_history * (1 / self.weight)
class WeightedAlgorithm(osmoAlgorithm):
def choose(self, history, choices):
choice_list = list()
for choice in choices:
choice_list.append(Choice(
choice,
history.get_count_in_current_test_case(choice),
self.model.get_step_weight(choice)
))
compare_values = [x.compare_value for x in choice_list]
lowest = min(compare_values)
temp = filter(lambda x: x.compare_value == lowest, choice_list)
temp = self.random.choice(list(temp)).ending
return temp
```
#### File: pyosmo/pyosmo/osmo.py
```python
import random
import time
from pyosmo.model import Model
from pyosmo.history import OsmoHistory
from pyosmo.algorithm.random import RandomAlgorithm
class Osmo(object):
""" Osmo tester core """
def __init__(self, model, seed=None):
""" Osmo need at least one model to work """
self.model = Model()
self.model.add_model(model)
self.history = OsmoHistory()
self.tests_in_a_suite = 10
self.steps_in_a_test = 10
self.debug = False
# Use random as default algorithm
self.algorithm = RandomAlgorithm()
if seed is None:
self.seed = random.randint(0, 10000)
else:
self.seed = seed
self.random = random.Random(self.seed)
print("Using seed: {}".format(self.seed))
def p(self, text):
""" Print debugging texts if debug is enabled """
if self.debug:
print(text)
def set_debug(self, debug):
self.debug = debug
def add_model(self, model):
""" Add model for osmo """
self.model.add_model(model)
def _execute_step(self, ending):
"""
Execute step and save it to the history
:param ending: letter after step_
:return:
"""
step_name = 'step_{}'.format(ending)
start_time = time.time()
self.model.execute(step_name)
self.history.add_step(step_name, time.time() - start_time)
def generate(self):
""" Generate / run tests """
# Initialize algorithm
self.algorithm.inititalize(self.random, self.model)
self.model.execute_optional('before_suite')
if not self.tests_in_a_suite:
raise Exception("Empty model!")
for _ in range(self.tests_in_a_suite):
self.history.start_new_test()
self.model.execute_optional('before_test')
for _ in range(self.steps_in_a_test):
# Use algorithm to select the step
ending = self.algorithm.choose(self.history, self.model.get_list_of_available_steps())
self.model.execute_optional('pre_{}'.format(ending))
self._execute_step(ending)
self.model.execute_optional('post_{}'.format(ending))
# General after step which is run after each step
self.model.execute_optional('after')
self.model.execute_optional('after_test')
self.model.execute_optional('after_suite')
self.history.stop()
``` |
{
"source": "joniliu/cf-ui5-app",
"score": 3
} |
#### File: joniliu/cf-ui5-app/app.py
```python
import os
from flask import Flask, render_template, make_response, send_from_directory
app = Flask(__name__)
# Render index.html initially
@app.route('/')
def render_index():
return render_template('index.html')
# Render stylings
@app.route('/css/style.css', methods=["GET"])
def render_style():
try:
response = make_response(render_template('css/style.css'))
response.headers['Content-type'] = "text/css; charset=utf-8"
return response
except Exception as e:
print("\033[93m" + str(e) + "\033[0m")
return 'OK'
# Render SAPUI5 web app files from templates folder
@app.route('/<path:path>')
def render_path(path):
if "img" in path or ".js" in path or "i18n" in path or "favicon" in path or ".json" in path or ".css" in path:
return send_from_directory('templates', path)
else:
return render_template(path)
port = int(os.getenv("PORT", 0))
if __name__ == '__main__':
if port != 0:
app.run(host='0.0.0.0', port=port)
else:
app.run()
``` |
{
"source": "jonimoas/rest_api_logger",
"score": 3
} |
#### File: jonimoas/rest_api_logger/main.py
```python
from waitress import serve
import os
import requests
from flask import Flask
from flask import request
from flask import Response
from flask import jsonify
from flask import after_this_request
import urllib.parse
import json
import datetime
from tinydb import TinyDB, Query
currentDate = datetime.datetime.today()
dateString = str(currentDate.year)+"-" + \
str(currentDate.month) + "-" + str(currentDate.day)
db = TinyDB("db"+dateString+".json")
app = Flask(__name__)
@app.route('/<path:path>', methods=['GET', 'POST', 'DELETE', 'PUT', 'PATCH'])
def main(path):
global currentDate
global db
if currentDate.day != datetime.datetime.now().day:
currentDate = datetime.datetime.now()
dateString = str(currentDate.year)+"-" + \
str(currentDate.month) + "-" + str(currentDate.day)
db = TinyDB("db" + dateString + ".json")
dateString = dateString
method = request.method
args = request.args.to_dict(flat=False)
headers = dict(request.headers)
body = request.get_json()
response = requests.request(method, os.environ["API_TO_WATCH"]+path, params=args,
headers=headers, allow_redirects=False, data=json.dumps(body))
response_body = None
response_headers = dict(response.headers)
try:
response_body = response.json()
except:
response_body = None
db.insert(dict({'method': request.method,
'path': str(path),
'args': args,
'request_headers': headers,
'request_body': body,
'response': response_body,
'response_headers': response_headers,
'timestamp': str(datetime.datetime.now()),
'status': response.status_code
}))
result = None
if response_body != None:
result = jsonify(response_body)
else:
result = Response()
result.status_code = response.status_code
if "Transfer-Encoding" in response_headers:
del response_headers["Transfer-Encoding"]
if "Content-Encoding" in response_headers:
del response_headers["Content-Encoding"]
result.headers = response_headers
return result
serve(app, host="0.0.0.0", port="8000")
``` |
{
"source": "jon-infante/CS-1.3-Core-Data-Structures",
"score": 4
} |
#### File: CS-1.3-Core-Data-Structures/Code/set_test.py
```python
from set import HashSet
import unittest
class SetTest(unittest.TestCase):
def test_init(self):
elements = ['A', 'B', 'C']
set = HashSet(elements)
assert set.size is 3
def test_size(self):
elements = ['A', 'B', 'C', 'D', 'E']
set = HashSet(elements)
assert set.size is 5
def test_contains(self):
elements = ['P', 'C', 'X', 'U']
set = HashSet(elements)
assert set.contains('P') is True
assert set.contains('C') is True
assert set.contains('U') is True
assert set.contains('D') is False
assert set.contains('J') is False
def test_add(self):
elements = ['J', 'K']
set = HashSet(elements)
set.add('P')
set.add('E')
with self.assertRaises(KeyError):
set.add('K') # Element already exists
with self.assertRaises(KeyError):
set.add('E') # Element already exists
assert set.size is 4
assert set.contains('P') is True
assert set.contains('E') is True
def test_remove(self):
elements = ['U', '8', 'Q', 'D']
set = HashSet(elements)
with self.assertRaises(KeyError):
set.remove('K') # Element doesn't exist
with self.assertRaises(KeyError):
set.remove('0') # Element doesn't exist
set.remove('U')
set.remove('Q')
assert set.contains('U') is False
assert set.contains('Q') is False
with self.assertRaises(KeyError):
set.remove('Q') # Element doesn't exist anymore
def test_union(self):
elements = ['A', 'C', 'D', 'F']
elements2 = ['A', 'B', 'D', 'F', 'G', 'H']
elements3 = ['C', 'Y', 'T', 'A']
set = HashSet(elements)
set2 = HashSet(elements2)
set3 = HashSet(elements3)
self.assertCountEqual(set.union(set2).hash.values(), ['A', 'B', 'C', 'D', 'F', 'G', 'H']) # Ignore item order
self.assertCountEqual(set.union(set3).hash.values(), ['A', 'C', 'D', 'F', 'T', 'Y']) # Ignore item order
def test_intersection(self):
elements = ['0', 'B', 'C', 'K']
elements2 = ['0', 'D', 'E', 'C', 'Y', 'K']
elements3 = ['B', 'D', 'P', 'K', 'G', '9']
set = HashSet(elements)
set2 = HashSet(elements2)
set3 = HashSet(elements3)
self.assertCountEqual(set.intersection(set2).hash.values(), ['0', 'C', 'K']) # Ignore item order
self.assertCountEqual(set.intersection(set3).hash.values(), ['B', 'K']) # Ignore item order
def test_difference(self):
elements = ['4', '7', '8', '9', '0']
elements2 = ['4', '5', '6', '10', '8', '9']
elements3 = ['1', '3', '5', '7', '0']
set = HashSet(elements)
set2 = HashSet(elements2)
set3 = HashSet(elements3)
self.assertCountEqual(set.difference(set2).hash.values(), ['7', '0']) # Ignore item order
self.assertCountEqual(set.difference(set3).hash.values(), ['4', '8', '9']) # Ignore item order
def test_is_subset(self):
elements = ['Y', 'C', 'D']
elements2 = ['C', 'G', 'U', 'D', 'T', 'Y']
elements3 = ['P', 'H', 'Y', 'D', 'E', 'F']
set = HashSet(elements)
set2 = HashSet(elements2)
set3 = HashSet(elements3)
assert set.is_subset(set2) is True
assert set.is_subset(set3) is False
assert set2.is_subset(set3) is False
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joniok/hybra-medialoader",
"score": 3
} |
#### File: hybra-medialoader/old/linkki_iltamakasiini.py
```python
import requests
import datetime
from bs4 import BeautifulSoup
def nouda( out ):
lapi = raw_input("Monta uutissivua lapi(20 uutista per sivu): ")
lapi_num = int(lapi)
sivu = 1
r = requests.get( 'http://www.iltamakasiini.fi/haku?hakusanat=vaalit')
for x in range(0,lapi_num):
sivu_str = str(sivu)
r.encoding = 'UTF-8'
soup = BeautifulSoup( r.text )
for teksti in soup.find_all( class_='group-main' ):
a = teksti.find('a').get('href')
out.write('http://www.iltamakasiini.fi' + a + "\n")
sivu = sivu + 1
r = requests.get('http://www.iltamakasiini.fi/haku?hakusanat=vaalit&page=' + sivu_str)
if __name__ == '__main__':
nouda("http://www.iltamakasiini.fi/haku?hakusanat=vaalit", file('linkki_iltamakasiini.txt', 'w'))
```
#### File: hybra-medialoader/sites/aamuset.py
```python
import requests
from bs4 import BeautifulSoup
import processor
from datetime import datetime
def parse( url ):
r = requests.get( url )
if r.status_code == 404:
return processor.create_dictionary('', url, r.status_code, [u''], [u''], u'', u'', u'', u'', [u''], [u''])
r.encoding = 'UTF-8'
soup = BeautifulSoup( r.text, "html.parser" )
article = soup.find( 'article' )
if article == None:
return processor.create_dictionary('', url, r.status_code, [u''], [u''], u'', u'', u'', u'', [u''], [u''])
processor.decompose_all( article.find_all( 'script' ) )
processor.decompose_all( article.find_all( class_ = 'views-field-field-aamuset-related-images' ) )
categories_element = soup.find( class_ = 'tsv3-c-as-articletags' )
categories = processor.collect_categories( categories_element.find_all( 'li' ) )
datetime_list = processor.collect_datetime( article.find( 'time' ) )
author = processor.collect_text( article.find( class_ = 'kirjoittaja' ) )
processor.decompose( article.find( class_ = 'kirjoittaja' ) )
title = processor.collect_text( article.find( class_ = 'otsikko' ) )
text = processor.collect_text( article.find( class_ = 'tsv3-c-as-article__textitem--teksti' ) )
images = processor.collect_images( article.find_all( 'img' ), 'src', 'http://www.aamuset.fi' )
captions = processor.collect_image_captions( article.find_all( class_ = 'tsv3-c-as-article__attachment__caption' ) )
return processor.create_dictionary('Aamuset', url, r.status_code, categories, datetime_list, author, title, u'', text, images, captions)
if __name__ == '__main__':
parse("http://www.aamuset.fi/naista-puhutaan/politiikka/yrttiaho-kanteli-oikeuskanslerille-nato-sopimuksesta", file('aamuset.txt', 'w'))
```
#### File: hybra-medialoader/test/common.py
```python
import os
import difflib
import filecmp
import datetime
def initialise_file(out, content_dictionary):
if ( os.path.isfile(out) ):
os.remove(out)
write_file( file(out, 'w'), content_dictionary )
def write_file( out, content ):
file_content = content['domain'] + "\n"
file_content += content['url'] + "\n"
file_content += str( content['http'] ) + "\n"
for category in content['categories']:
file_content += category.encode('utf-8') + "\n"
for datetime_object in content['datetime_list']:
file_content += str( datetime_object ) + "\n"
file_content += content['author'].encode('utf-8') + "\n"
file_content += content['title'].encode('utf-8') + "\n"
file_content += content['ingress'].encode('utf-8') + "\n"
file_content += content['text'].encode('utf-8') + "\n"
for img in content['images']:
file_content += img.encode('utf-8') + "\n"
for caption in content['captions']:
file_content += caption.encode('utf-8') + "\n"
out.write( file_content.strip() )
def write_difference_log(domain, out, test_content_path):
filename = 'test/difference_logs/' + domain + '_diff.txt'
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
with open(filename, "w") as diff_log:
test_content = open(test_content_path, 'r')
content = open(out, 'r')
test_content_text = test_content.read().replace(' ', ' \n')
content_text = content.read().replace(' ', ' \n')
test_content_lines = test_content_text.splitlines()
content_lines = content_text.splitlines()
d = difflib.Differ()
diff = d.compare(test_content_lines, content_lines)
diff_log.write( '\n'.join(diff) )
test_content.close()
content.close()
def file_exists(out):
assert os.path.isfile(out)
def file_not_empty(out):
assert os.path.getsize(out) > 0
def file_contents_match(domain, out):
test_content_path = 'test/test_contents/' + domain + '.txt'
write_difference_log( domain, out, test_content_path )
assert filecmp.cmp( test_content_path, out )
def dictionary_created(d):
assert bool( d )
def dictionary_contains_right_keys(d):
keys = ['domain', 'url', 'http', 'categories', 'datetime_list', 'author', 'title', 'ingress', 'text', 'images', 'captions']
for key in keys:
assert key in d
def dictionary_values_correct_type(d):
assert type( d['domain'] ) is str
assert type( d['url'] ) is str
assert type( d['http'] ) is int
assert type( d['categories'] ) is list
for category in d['categories']:
type( category ) is unicode
assert type( d['datetime_list'] ) is list
assert len( d['datetime_list'] ) > 0
for datetime_object in d['datetime_list']:
assert type( datetime_object ) is datetime.datetime or datetime.date
assert type( d['author'] ) is unicode
assert type( d['title'] ) is unicode
assert type( d['ingress'] ) is unicode
assert type( d['text'] ) is unicode
assert type( d['images'] ) is list
for img in d['images']:
assert type( img ) is unicode
assert type( d['captions'] ) is list
for caption in d['captions']:
assert type( caption ) is unicode
```
#### File: hybra-medialoader/test/test_downloader.py
```python
import sys
import os
path = os.path.abspath('.')
sys.path.append(path)
import downloader as d
good_urls = [
'http://yle.fi/uutiset/tv-uutisten_politiikantoimittaja_timo_kuparinen_kuollut/9167149',
'http://www.hs.fi/kotimaa/art-2000005007566.html',
'http://www.iltasanomat.fi/musiikki/art-2000005008209.html'
]
bad_urls = ['http://example.com', 'http://www.example.org']
class TestParser:
@classmethod
def setup_class(self):
self.raw_path = 'test-data-raw/'
self.data_path = 'test-data/'
self.errors = 'test-errors.log'
errors = open( self.errors, 'w')
for f in [ self.raw_path, self.data_path ]:
if not os.path.exists( f ):
os.makedirs( f )
for url in good_urls:
d.download( url , self.raw_path, errors )
for url in bad_urls:
d.download( url , self.raw_path, errors )
self.collected = d.resort_pickles( self.raw_path )
def teardown_class(self):
import shutil
for f in [ self.raw_path, self.data_path ]:
shutil.rmtree(f)
os.remove( self.errors )
def test_downloaded_files_exists(self):
assert len( os.listdir( self.raw_path ) ) == len( good_urls )
def test_errors_logged(self):
assert len( open( self.errors ).readlines() ) == len( bad_urls )
def test_file_contents_ok(self):
import pickle
keys = ['url', 'http', 'categories', 'datetime_list', 'author', 'title', 'ingress', 'text', 'images', 'captions']
for d in os.listdir( self.raw_path ):
d = pickle.load( open( self.raw_path + d ) )
for key in keys:
assert key in d
def test_pickles_sorted_correctly(self):
count = sum( map( lambda x: len( x ), self.collected.values() ) )
assert count == len( good_urls )
def test_pickles_sorted_keys(self):
## check file names
assert 'yle' in ''.join( self.collected.keys() )
assert 'hs' in ''.join( self.collected.keys() )
assert 'iltasanomat' in ''.join( self.collected.keys() )
## years
assert '2016' in ''.join( self.collected.keys() )
## month
assert '9' in ''.join( self.collected.keys() )
assert '12' in ''.join( self.collected.keys() )
``` |
{
"source": "Jonirulah/Nyaa-Torrent-Downloader",
"score": 3
} |
#### File: Jonirulah/Nyaa-Torrent-Downloader/Nyaa_InterfazGUI.pyw
```python
from tkinter import ttk
from tkinter import *
import os
import requests
import shutil
from NyaaPy.nyaa import Nyaa
from pathlib import Path
# Config Ventana
master = Tk()
master.title('Nyaa-Torrent-Downloader v1.2')
master.resizable(width=FALSE, height=FALSE)
master.geometry("980x860+30+30")
master.iconbitmap("icon.ico")
home = str(Path.home())
Directorio = os.path.dirname(home + '\Documents' + '\\' + 'Nyaa-Downloader' + '\\')
# Funciones
# Funcion para cuando cambiamos de Idioma
def IdiomaEsp():
global Lan1, Lan2, Lan3, Lan4, Lan5, Lan6, Lan7, Lan8, Lan9, Lan10, Lan11, ImgDescarga, ImgBuscar, seedersactivos, leechersactivos
Lan1 = "Buscando: "
Lan2 = "Se han encontrado "
Lan3 = " Resultados"
Lan4 = "Descargando: "
Lan5 = "Descarga Finalizada: "
Lan6 = "Búsqueda:"
Lan7 = "Categoría:"
Lan8 = "Resultados de la Búsqueda:"
Lan9 = ('Anime - AMVs', 'Anime - Traducido al Inglés', 'Anime - No-Traducido al Inglés', 'Anime - RAW', 'Audio - Sin Pérdida', 'Audio - Con Pérdida', 'Literatura - Traducido al Inglés', 'Literatura - No-Traducido al Inglés', 'Literatura - RAW', 'Live Action - Traducido al Inglés', 'Live Action - Idol/Vídeo Promocional', 'Live Action - No-Traducido al Inglés', 'Live Action - RAW', 'Imágenes - Gráficos', 'Imágenes - Fotos', 'Software - Aplicaciones', 'Software - Juegos')
Lan10 = "Progreso:"
Lan11 = "No hay nínguna tarea en proceso."
seedersactivos = "0"
leechersactivos = "0"
ImgDescarga = PhotoImage(file="assets/descargar.png")
ImgBuscar = PhotoImage(file="assets/busca.png")
def IdiomaEng():
global Lan1, Lan2, Lan3, Lan4, Lan5, Lan6, Lan7, Lan8, Lan9, Lan10, Lan11, ImgDescarga, ImgBuscar, seedersactivos, leechersactivos
Lan1 = "Searching: "
Lan2 = "Search found "
Lan3 = " Results"
Lan4 = "Downloading: "
Lan5 = "Download Ended: "
Lan6 = "Search:"
Lan7 = "Category:"
Lan8 = "Search Results:"
Lan9 = ('Anime - AMVs', 'Anime - English-Translated', 'Anime - Non-English-Translated', 'Anime - RAW', 'Audio - Lossless', 'Audio - Lossy', 'Literature - English-Translated', 'Literature - Non-English-Translated', 'Literature - RAW', 'Live Action - English-Translated', 'Live Action - Idol/Promotional Video', 'Live Action - Non-English-Translated', 'Live Action - RAW', 'Pictures - Graphics', 'Pictures - Photos', 'Software - Applications', 'Software - Games')
Lan10 = "Progress:"
Lan11 = "There's no task in process."
seedersactivos = "0"
leechersactivos = "0"
ImgDescarga = PhotoImage(file="assets/download.png")
ImgBuscar = PhotoImage(file="assets/search.png")
# Funcion que se ejecuta cuando le damos al botón Buscar
def BuscarBoton():
Listbox.delete(0, END)
Valordelista = 1
global Almacen, Almacen2
Almacen = []
Almacen2 = []
Palabra = textoaintroducir.get()
Valoractual = country.current()
textoacambiar.set(Lan1 + Palabra)
Tk.update(master)
if Valoractual == 0:
Numero = 1
Decimal = 1
elif Valoractual == 1:
Numero = 1
Decimal = 2
elif Valoractual == 2:
Numero = 1
Decimal = 3
elif Valoractual == 3:
Numero = 1
Decimal = 4
elif Valoractual == 4:
Numero = 2
Decimal = 1
elif Valoractual == 5:
Numero = 2
Decimal = 2
elif Valoractual == 6:
Numero = 3
Decimal = 1
elif Valoractual == 7:
Numero = 3
Decimal = 2
elif Valoractual == 8:
Numero = 3
Decimal = 3
elif Valoractual == 9:
Numero = 4
Decimal = 1
elif Valoractual == 10:
Numero = 4
Decimal = 2
elif Valoractual == 11:
Numero = 4
Decimal = 3
elif Valoractual == 12:
Numero = 4
Decimal = 4
elif Valoractual == 13:
Numero = 5
Decimal = 1
elif Valoractual == 14:
Numero = 5
Decimal = 2
elif Valoractual == 15:
Numero = 6
Decimal = 1
elif Valoractual == 16:
Numero = 6
Decimal = 2
Diccionario = Nyaa.search(category=Numero, keyword=Palabra, subcategory=Decimal)
for i in range(len(Diccionario)):
Lista = Diccionario[(i)]
Almacen.append(Lista["name"])
Almacen.append(Lista["download_url"])
Almacen2.append(Lista["seeders"])
Almacen2.append(Lista["leechers"])
Listbox.insert(i, str(Valordelista) + " - " + Lista["name"])
Valordelista = Valordelista + 1
Listbox.curselection()
textoacambiar.set(Lan2 + str(i) + Lan3)
Tk.update(master)
master.after(675, seedleech)
# Funcion para Tecla Enter
def BuscarEnter(s):
Listbox.delete(0, END)
Valordelista = 1
global Almacen, Almacen2
Almacen = []
Almacen2 = []
Palabra = textoaintroducir.get()
Valoractual = country.current()
textoacambiar.set(Lan1 + Palabra)
Tk.update(master)
if Valoractual == 0:
Numero = 1
Decimal = 1
elif Valoractual == 1:
Numero = 1
Decimal = 2
elif Valoractual == 2:
Numero = 1
Decimal = 3
elif Valoractual == 3:
Numero = 1
Decimal = 4
elif Valoractual == 4:
Numero = 2
Decimal = 1
elif Valoractual == 5:
Numero = 2
Decimal = 2
elif Valoractual == 6:
Numero = 3
Decimal = 1
elif Valoractual == 7:
Numero = 3
Decimal = 2
elif Valoractual == 8:
Numero = 3
Decimal = 3
elif Valoractual == 9:
Numero = 4
Decimal = 1
elif Valoractual == 10:
Numero = 4
Decimal = 2
elif Valoractual == 11:
Numero = 4
Decimal = 3
elif Valoractual == 12:
Numero = 4
Decimal = 4
elif Valoractual == 13:
Numero = 5
Decimal = 1
elif Valoractual == 14:
Numero = 5
Decimal = 2
elif Valoractual == 15:
Numero = 6
Decimal = 1
elif Valoractual == 16:
Numero = 6
Decimal = 2
Diccionario = Nyaa.search(category=Numero, keyword=Palabra, subcategory=Decimal)
for i in range(len(Diccionario)):
Lista = Diccionario[(i)]
Almacen.append(Lista["name"])
Almacen.append(Lista["download_url"])
Almacen2.append(Lista["seeders"])
Almacen2.append(Lista["leechers"])
Listbox.insert(i, str(Valordelista) + " - " + Lista["name"])
Valordelista = Valordelista + 1
textoacambiar.set(Lan2 + str(i) + Lan3)
Tk.update(master)
master.after(900, seedleech)
# Para descargar el fichero
def Descarga():
Result = Listbox.curselection()
print(Result)
Valordescarga = 0
for i in range(len(Result)):
pb.step(100)
Tk.update(master)
Enlace = Result[Valordescarga] * 2 + 1
Nombre = Result[Valordescarga] * 2
Valordescarga = Valordescarga + 1
Porcentaje = 1 / int(len(Result)) * 100
textoacambiar.set(Lan4 + Almacen[(Nombre)] + " " + str(Valordescarga) + "/" + str(len(Result)))
URL = Almacen[(Enlace)]
Barras = URL.split('/')
Descargar = requests.get(URL)
home = str(Path.home())
pb.step(Porcentaje)
Tk.update(master)
with open(home + '\Downloads' + '\\' + Barras[4], 'wb') as f:
f.write(Descargar.content)
os.startfile(home + '\Downloads' + '\\' + Barras[4], 'open')
textoacambiar.set(Lan5 + str(Valordescarga) + "/" + str(len(Result)))
# Reabre la App
def Reiniciar():
python = sys.executable
os.execl(python, python, * sys.argv)
# Cambia el Contenido del Fichero Language.cfg a Spanish
def FicheroaEspanol():
with open((Directorio + '\\' + "Language.cfg"), "r") as file:
lineasfichero = file.readlines()
lineasfichero[0] = ("Language = Spanish")
file.close()
with open((Directorio + '\\' + "Language.cfg"), "w") as file:
file.writelines(lineasfichero)
file.close()
# Cambia el Contenido del Fichero Language.cfg a English
def FicheroaIngles():
with open((Directorio + '\\' + "Language.cfg"), "r") as file:
lineasfichero = file.readlines()
lineasfichero[0] = ("Language = English")
file.close()
with open((Directorio + '\\' + "Language.cfg"), "w") as file:
file.writelines(lineasfichero)
file.close()
# Funciones de los Botones (Lenguaje)
def Espanol():
FicheroaEspanol()
Reiniciar()
def English():
FicheroaIngles()
Reiniciar()
# Si el directorio no existe (First-Startup)
if not os.path.exists(Directorio):
os.makedirs(Directorio)
CualIdioma = open((Directorio + '\\' + "Language.cfg"), "w")
CualIdioma.write("Language = English")
CualIdioma.close()
QueIdioma = open((Directorio + '\\' + "Language.cfg"), "r")
IdiomaSeleccionado = QueIdioma.read()
if IdiomaSeleccionado == "Language = English":
IdiomaEng()
elif IdiomaSeleccionado == "Language = Spanish":
IdiomaEsp()
else:
QueIdioma.close()
shutil.rmtree(Directorio, ignore_errors=False)
raise SystemExit()
# Auto Update Leech after searching 675ms update
def seedleech():
CalcSemillas = 0
CalcLeechers = 0
global Almacen2
Result = Listbox.curselection()
master.after(675, seedleech)
CalcSemillas = Result[0] * 2
CalcLeechers = Result[0] * 2 + 1
Semillas = Almacen2[(CalcSemillas)]
Leechers = Almacen2[(CalcLeechers)]
seeders.set(Semillas)
leechers.set(Leechers)
# Config TODA LA INTERFAZ
ImgEsp = PhotoImage(file="assets/esp.png")
ImgEng = PhotoImage(file="assets/eng.png")
photo = PhotoImage(file="assets/fondo app.png")
wallpaper = Label(master,image=photo)
wallpaper.place(x=0, y=0, relwidth=1, relheight=1)
wallpaper.image=photo
textoinv = Frame(master)
textoinv.pack(anchor="nw", padx=30, pady=10)
texto1 = Label(master, text=Lan6, bg="black", fg="white", font=("Verdana", 14, "bold"))
texto1.pack(anchor="nw", padx=30, pady=5)
texto2 = Label(master, text=Lan7, bg="black", fg="white", font=("Verdana", 14, "bold"))
texto2.pack(anchor="nw", padx=30, pady=5)
texto2.place(x=500, y=30)
texto3 = Label(master, text=Lan8, bg="black", fg="white", font=("Verdana", 14, "bold"))
texto3.pack(anchor="nw", pady=5)
texto3.place(x=30, y=105)
textoaintroducir = Entry(master, width=30, bd=2, font=("Verdana", 13, "bold"))
textoaintroducir.pack(anchor="nw", padx=30)
countryvar = StringVar()
country = ttk.Combobox(textvariable=countryvar, width=32, state="readonly")
country['values'] = Lan9
country.bind('<<ComboboxSelected>>')
country.pack(anchor="n")
country.current(1)
country.place(x=500, y=63)
boton = Button(master, bd=0, command=BuscarBoton)
boton.pack(anchor="ne", pady=20)
boton.place(x=785, y=32)
botonesp = Button(master, image=ImgEsp, bd=0, command=Espanol)
botonesp.pack(anchor="ne", pady=4)
botonesp.place(x=928, y=2)
botoneng = Button(master, image=ImgEng, bd=0, command=English)
botoneng.pack(anchor="ne", pady=4)
botoneng.place(x=952, y=2)
boton.config(image=ImgBuscar, width=140, height=60)
Listbox = Listbox(master, height=30, width=100, bd=2, activestyle="dotbox", font=("Arial", 12, "bold"), selectmode=EXTENDED)
Listbox.pack(anchor="w", padx=30, pady=49)
boton2 = Button(master, bd=0, command=Descarga)
boton2.pack(anchor="ne", pady=20)
boton2.place(x=785, y=750)
boton2.config(image=ImgDescarga, width=140, height=60)
pb = ttk.Progressbar(master, orient='horizontal', mode='determinate')
pb.pack(expand=True,fill=NONE)
pb.place(x=140, y=760, width=475, height=30)
texto4 = Label(master, text=Lan10, bg="black", fg="white", font=("Verdana", 14, "bold"))
texto4.pack(anchor="nw")
texto4.place(x=22, y=760)
textoacambiar = StringVar(value=Lan11)
seeders = StringVar(value=seedersactivos)
leechers = StringVar(value=leechersactivos)
texto5 = Label(master, textvariable=textoacambiar, bg="black", fg="white", font=("Verdana", 9))
texto5.pack(anchor="nw")
texto5.place(x=144, y=798)
textoseed = Label(master, textvariable=seeders, bg="black", fg="green", font=("Verdana", 9, "bold"))
textoseed.pack(anchor="nw")
textoseed.place(x=740, y=755)
textoleech = Label(master, textvariable=leechers, bg="black", fg="red", font=("Verdana", 9, "bold"))
textoleech.pack(anchor="nw")
textoleech.place(x=740, y=785)
texto8 = Label(master, text="Seeders:", bg="black", fg="white", font=("Verdana", 9, "bold"))
texto8.pack(anchor="nw")
texto8.place(x=660, y=755)
texto9 = Label(master, text="Leechers:", bg="black", fg="white", font=("Verdana", 9, "bold"))
texto9.pack(anchor="nw")
texto9.place(x=660, y=785)
master.bind('<Return>', BuscarEnter)
mainloop()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.