ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a4af7f110daed6d72d5c17f1efcce538aa035bc | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 24 22:53:59 2018
@authors: a.pakbin, T.J. Ashby
"""
from sklearn.model_selection import StratifiedKFold
from auxiliary import grid_search,ICD9_categorizer, save_fold_data, convert_numbers_to_names, min_max_mean_auc_for_labels, train_test_one_hot_encoder, possible_values_finder,train_test_normalizer, train_test_imputer, feature_importance_saver, feature_importance_updator, save_roc_curve, data_reader, vectors_to_csv, create_subfolder_if_not_existing, feature_rankings_among_all_labels_saver
import numpy as np
import pandas as pd
from fmeasure import roc, maximize_roc
from xgboost.sklearn import XGBClassifier
import random as rnd
from sklearn.metrics import roc_auc_score
import pickle
import gc
import sys
import logging as lg
#
# NB: the original code base contains code that will trigger
# "pandas.core.common.SettingWithCopyError: A value is trying to be set on a
# copy of a slice from a DataFrame" errors if the code is run with
# pd.set_option('mode.chained_assignment', 'raise'). Hence I'm not using it.
#
def main(file_name,
data_address,
writing_address):
lg.basicConfig(stream=sys.stderr, level=lg.DEBUG)
mpl_logger = lg.getLogger('matplotlib')
mpl_logger.setLevel(lg.WARNING)
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_columns', 20)
data_address = str(data_address)
writing_address = str(writing_address)
#the address where MIMIC III tables are in .csv.gz format. The tables are: D_ICD_PROCEDURES.csv.gz, D_ITEMS.csv.gz and D_LABITEMS.csv.gz
#conversion_tables_address='../data'
conversion_tables_address = data_address
#outcome labels can contain: '24hrs' ,'48hrs','72hrs', '24hrs~72hrs','7days','30days', 'Bounceback'
outcome_labels=['24hrs' ,'48hrs','72hrs', '24hrs~72hrs','7days','30days', 'Bounceback']
normalize_data=False
save_folds_data=True
values_for_grid_search=[np.linspace(start=1, stop=6, num=6),[50,100,200,1000,1500],[0.1]]
num_of_folds=5
#################################
categorical_column_names=['ADMISSION_TYPE', 'INSURANCE', 'LANGUAGE', 'RELIGION', 'MARITAL_STATUS', 'ETHNICITY','FIRST_CAREUNIT', 'GENDER']
# Read the CSV file
# - The form of the CSV file is:
# -
data=data_reader(data_address, file_name)
# Returns a dictionary where each column name is a key, and the result is the
# set of values that can appear (with NaN etc removed)
possible_values=possible_values_finder(data, categorical_column_names)
# Fill in the target data column
data['IsReadmitted_24hrs~72hrs']=[1 if x>0 else 0 for x in (data['IsReadmitted_72hrs']-data['IsReadmitted_24hrs'])]
# List of non-feature column names
non_attribute_column_names=['HADM_ID', 'ICUSTAY_ID', 'INTIME', 'OUTTIME', 'SUBJECT_ID', 'IsReadmitted_24hrs','IsReadmitted_Bounceback','IsReadmitted_24hrs~72hrs' ,'IsReadmitted_48hrs','IsReadmitted_72hrs','IsReadmitted_7days','IsReadmitted_30days', 'Time_To_readmission', 'hospital_expire_flag']
if 'Subset' in data.columns:
#
# NB: If doing subsetting, you should NOT add the test fold from subset A to
# the real test data from subset B, otherwise you'll get better results than
# you should (as the model is trained on subset A and so will do well on the
# slice of subset A included in the test set).
#
testOnSubsetA = False
else:
#
# However, if there is no subsetting (everything is subset A), then you need
# to use the test data from subset A, otherwise there is no test data. Hence
# the flag.
#
lg.info("No subsetting in input data")
data.loc[:, 'Subset'] = 'A'
testOnSubsetA = True
non_attribute_column_names.append('Subset')
#TODO: for excludig insurance, language, religion, marital status and ethnicity from the data, uncomment the following line
#non_attribute_column_names += ['INSURANCE', 'LANGUAGE', 'RELIGION', 'MARITAL_STATUS', 'ETHNICITY']
#
# The function ICD9_categorizer() coarsens the ICD codes to a higher level
# by dropping the last code digit - but, it looks like there may be some
# issues with the original code as it treats the ICD codes as numbers rather
# than strings and so doesn't take into account the semantically meaningful
# leading and trailing zeros.
#
data=ICD9_categorizer(data)
model_type='XGB'
PREDICTIONS=list()
current_folder=writing_address
#
# Loop over target labels to predict
#
for idx, label_column_name in enumerate(['IsReadmitted_'+outcome_label for outcome_label in outcome_labels]):
#
# Original code (replaced because we need to handle subsets for the
# experiments):
# icu_stays=data['ICUSTAY_ID'].values
# y=data[label_column_name].values
# X=data.drop(non_attribute_column_names, axis=1)
#
#
# Subsetting
#
# Labels to predict (sklearn format)
y=data.loc[data['Subset'] == "A", label_column_name].values
y_testB = data.loc[data['Subset'] == "B", label_column_name].values
# Input features
X = data.loc[data['Subset'] == "A", :].drop(non_attribute_column_names, axis=1)
X_testB = data.loc[data['Subset'] == "B", :].drop(non_attribute_column_names, axis=1)
# Output folder
current_subfolder=current_folder+'/'+outcome_labels[idx]
create_subfolder_if_not_existing(current_subfolder)
auc_list=list()
ICUstayID=list()
Prediction=list()
accumulative_feature_importance=None
print ('\n',model_type, ' '*5,'LABEL: ', outcome_labels[idx])
skf=StratifiedKFold(n_splits=num_of_folds, shuffle=True, random_state=rnd.randint(1,1e6))
#
# Loop over folds
# - Each fold is a train/test split, with the test being used for the final score
#
fold_number=0
for train_index, test_index in skf.split(X, y):
fold_number+=1
print ('\n fold',fold_number)
#
# Original code (replaced because we need to handle subsets for the
# experiments):
# X_train, X_test = X.iloc[train_index], X.iloc[test_index]
# y_train, y_test = y[train_index], y[test_index]
# icustay_id_train, icustay_id_test=icu_stays[train_index],icu_stays[test_index]
#
X_train = X.iloc[train_index]
y_train = y[train_index]
if testOnSubsetA == True:
X_test = pd.concat([X_testB, X.iloc[test_index]])
y_test = np.concatenate((y_testB, y[test_index]))
else:
X_test = X_testB
y_test = y_testB
lg.debug("len X_test: {}, len y_test: {}".format(len(X_test), len(y_test)))
#
# Original code (replaced because we need to handle subsets for the
# experiments):
# icustay_id_train, icustay_id_test=icu_stays[train_index],icu_stays[test_index]
#
icustay_id_train = (data.loc[data['Subset'] == "A", 'ICUSTAY_ID'].values)[train_index]
testB = data.loc[data['Subset'] == "B", 'ICUSTAY_ID'].values
if testOnSubsetA == True:
testA = (data.loc[data['Subset'] == "A", 'ICUSTAY_ID'].values)[test_index]
icustay_id_test = np.concatenate((testB, testA))
else:
icustay_id_test = testB
lg.debug("len icustay_id_test: {}".format(len(icustay_id_test)))
# Fill in missing values in train and test sets
[X_TRAIN_IMPUTED, X_TEST_IMPUTED]=train_test_imputer(X_train, X_test, categorical_column_names)
if normalize_data:
[X_TRAIN_NORMALIZED, X_TEST_NORMALIZED]=train_test_normalizer(X_TRAIN_IMPUTED, X_TEST_IMPUTED, categorical_column_names)
else:
[X_TRAIN_NORMALIZED, X_TEST_NORMALIZED]=[X_TRAIN_IMPUTED, X_TEST_IMPUTED]
# Do one-hot encoding for categorical variables
[X_TRAIN_NORMALIZED, X_TEST_NORMALIZED]=train_test_one_hot_encoder(X_TRAIN_NORMALIZED, X_TEST_NORMALIZED, categorical_column_names, possible_values)
if save_folds_data:
# Save the train and test inputs for this fold
save_fold_data(current_subfolder, fold_number, icustay_id_train, X_TRAIN_NORMALIZED, y_train, icustay_id_test, X_TEST_NORMALIZED, y_test, convert_names=True, conversion_tables_address=conversion_tables_address)
[max_depths, n_estimators, learning_rates]=values_for_grid_search
#
# Grid search to find best hyperparams
# - Hyper params picked per fold (?)
# - Hyper params picked using nested k-fold with 2 folds (?)
#
best_settings=grid_search(X=X_TRAIN_NORMALIZED, y=y_train, num_of_folds=2, verbose=True, return_auc_values=False, first_dim=max_depths, second_dim=n_estimators, third_dim=learning_rates)
print ('{:<4s}{:<16s}: max_depth: {:<1s}, n_estimators: {:<2s}, learning_rate: {:<2s}'.format('','best hyperparameters', str(best_settings[0]), str(best_settings[1]), str(best_settings[2])))
model=XGBClassifier(max_depth=int(best_settings[0]), n_estimators=int(best_settings[1]), learning_rate=best_settings[2])
#
# Do the actual training (with the best hyperparams)
#
model.fit(X_TRAIN_NORMALIZED, y_train)
feature_importance=model.feature_importances_
accumulative_feature_importance=feature_importance_updator(accumulative_feature_importance, feature_importance)
# Dump the feature importances to file
pd.DataFrame(data={'FEATURE_NAME': convert_numbers_to_names(X_TRAIN_NORMALIZED.columns, conversion_tables_address), 'IMPORTANCE': feature_importance}).sort_values(by='IMPORTANCE', ascending=False).reset_index(drop=True).to_csv(current_subfolder+'/'+'fold_'+str(fold_number)+'_ranked_feature_importances.csv')
#
# Make the predictions on the test set
#
predictions=model.predict_proba(X_TEST_NORMALIZED)[:,1]
# Append results to an array (?)
# These variables seem to be only assigned to, never used
ICUstayID=np.append(ICUstayID,icustay_id_test)
Prediction=np.append(Prediction,predictions)
# Write stuff out...
lg.debug("Vector lengths: 1 icustay_id_test: {}, 2 predictions: {}, 3 y_test: {}".format(len(icustay_id_test), len(predictions), len(y_test)))
vectors_to_csv(current_subfolder, file_name='fold_'+str(fold_number), vector_one=icustay_id_test, label_one='ICUSTAY_ID', vector_two=predictions, label_two='PREDICTION', vector_three=y_test, label_three='LABEL')
auc=roc_auc_score(y_true=y_test, y_score=predictions)
auc_list.append(auc)
ROC=roc(predicted=predictions, labels=y_test)
ROC.to_csv(current_subfolder+'/'+'fold_'+str(fold_number)+'_roc.csv')
maximum=maximize_roc(ROC, maximization_criteria='fscore')
maximum.to_csv(current_subfolder+'/'+'fold_'+str(fold_number)+'_optimum_point.csv')
TPR, FPR = ROC['recall'].values, 1-ROC['specificity']
# Minor change here to allow different figure formats
figtype = 'png'
save_roc_curve(current_subfolder+'/'+'fold_'+str(fold_number)+'_roc_curve.'+figtype, TPR, FPR, auc)
pickle.dump(model, open(current_subfolder+'/'+'fold_'+str(fold_number)+'.model','wb'))
print (' '+'-'*30)
feature_importance_saver(address=current_subfolder, col_names=convert_numbers_to_names(X_TRAIN_NORMALIZED.columns, conversion_tables_address), accumulative_feature_importance=accumulative_feature_importance, num_of_folds=num_of_folds)
# Minor change here to avoid complications with python generator functions
vectors_to_csv(current_subfolder, file_name='folds_AUC', vector_one=auc_list, label_one='AUC', vector_two=list(range(1,num_of_folds+1)), label_two='FOLD_NUMBER')
gc.collect()
current_folder=writing_address
min_max_mean_auc_for_labels(current_folder, outcome_labels)
feature_rankings_among_all_labels_saver(current_folder,outcome_labels, conversion_tables_address)
if __name__=='__main__':
file_name = sys.argv[1]
data_address = sys.argv[2]
writing_address = sys.argv[3]
main(file_name, data_address, writing_address)
|
py | 1a4af9893054a47d662c85341d5f4a57cc99ed17 | from acmacs_py import *
from .. import utils
from .log import Log
import acmacs
# ----------------------------------------------------------------------
class MapMaker:
def __init__(self, chain_setup, minimum_column_basis, log :Log):
self.chain_setup = chain_setup
self.minimum_column_basis = minimum_column_basis
self.log = log
def individual_map_directory_name(self):
return f"i-{self.minimum_column_basis}"
def command(self, source :Path, target :Path):
"""returns command (list) or None if making is not necessary (already made)"""
target.parent.mkdir(parents=True, exist_ok=True)
if utils.older_than(target, source):
if self.process(source):
return [self.command_name(), *self.command_args(), "--grid-json", target.with_suffix(".grid.json"), self.preprocess(source, target.parent), target]
else:
self.log.info(f"{target} ignored")
return None
else:
# self.log.info(f"{target} up to date")
return None
def command_name(self):
return "chart-relax-grid"
def command_args(self):
return [
"-n", self.chain_setup.number_of_optimizations(),
"-d", self.chain_setup.number_of_dimensions(),
"-m", self.minimum_column_basis,
*self.args_keep_projections(),
*self.args_reorient(),
*self.args_disconnect()
]
def args_keep_projections(self):
return ["--keep-projections", self.chain_setup.projections_to_keep()]
def args_reorient(self):
reorient_to = self.chain_setup.reorient_to()
if reorient_to:
return ["--reorient", reorient_to]
else:
return []
def args_disconnect(self):
if not self.chain_setup.disconnect_having_few_titers():
return ["--no-disconnect-having-few-titers"]
else:
return []
def process(self, source):
return True
def preprocess(self, source :Path, output_directory :Path):
return source
@classmethod
def add_threads_to_commands(cls, threads :int, commands :list):
"""Modifies commands to make it limit threads number. Returns modified command"""
return [command + ["--threads", threads] for command in commands]
# ----------------------------------------------------------------------
class MapMakerInSteps (MapMaker):
"""
1. multiple chart-relax (without grid) to run on multiple machines (nodes)
2. combine results
3. muiltipe chart-grid-test for the best result of 2, for different sets of antigens and sera to run on multiple nodes
4. combine results, move trapped points, relax, then repeat 3
"""
# ----------------------------------------------------------------------
class IndividualMapMaker (MapMaker):
def __init__(self, *args, ignore_tables_with_too_few_sera, **kwargs):
super().__init__(*args, **kwargs)
self.ignore_tables_with_too_few_sera = ignore_tables_with_too_few_sera
def process(self, source):
return not self.ignore(source)
def preprocess(self, source :Path, output_directory :Path):
return self.chain_setup.individual_table_preprocess(source, output_directory=output_directory)
def ignore(self, source):
if self.ignore_tables_with_too_few_sera:
if isinstance(source, acmacs.Chart):
chart = source
chart_name = chart.make_name()
else:
chart = acmacs.Chart(source)
chart_name = source
if chart.number_of_antigens() < 3 or chart.number_of_sera() < 3:
self.log.info(f"chart has too few antigens ({chart.number_of_antigens()}) or sera ({chart.number_of_sera()}), ignored ({chart_name})")
return True
return False
# ----------------------------------------------------------------------
class IndividualMapWithMergeColumnBasesMaker (IndividualMapMaker):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# self.output_dir_name = output_dir_name
self.source = None # nothing to do
self.target = None # nothing to do
def prepare(self, source :Path, merge_column_bases :dict, merge_path :Path, output_dir :Path, output_prefix :str):
self.log.info(f"Individual table map ({source.name}) with column bases from the merge ({merge_path.name})")
chart = acmacs.Chart(self.preprocess(source, output_directory=output_dir))
mcb_source = output_dir.joinpath(f"{output_prefix}{chart.date()}.mcb-table{source.suffix}")
mcb_target = output_dir.joinpath(f"{output_prefix}{chart.date()}.mcb{source.suffix}")
if utils.older_than(mcb_target, source):
if not self.ignore(chart):
cb = chart.column_bases(self.minimum_column_basis)
orig_cb = str(cb)
updated = False
for sr_no, serum in chart.select_all_sera():
mcb = merge_column_bases.get(serum.name_full())
if mcb is None:
message = f"No column basis for {serum.name_full()} in the merge column bases (source: {source.name}:\n{pprint.pformat(merge_column_bases, width=200)}"
self.log.info(f"ERROR {message}")
raise RuntimeError(message)
if mcb != cb[sr_no]:
if mcb < cb[sr_no]:
self.log.info(f"Column basis for {serum.name_full()} in the merge ({mcb}) is less than in the individual table ({cb[sr_no]})")
cb[sr_no] = mcb
updated = True
if updated:
chart.column_bases(cb)
self.log.info(f"{mcb_source} <-- {source}: column basis updated from merge:\n orig: {orig_cb}\n new: {cb}")
self.source = mcb_source
self.target = mcb_target
chart.export(self.source, program_name=sys.argv[0])
else:
self.log.info("column basis in the merge are the same as in the original individual table")
# else:
# self.log.info(f"{mcb_source} up to date")
self.log.separator(newlines_before=1)
# ----------------------------------------------------------------------
class IncrementalMapMaker (MapMaker):
def command_name(self):
return "chart-relax-incremental"
def command_args(self):
return [
"-n", self.chain_setup.number_of_optimizations(),
"--grid-test",
"--remove-source-projection",
*self.args_keep_projections(),
# *self.args_reorient(),
*self.args_disconnect()
]
# ----------------------------------------------------------------------
def extract_column_bases(chart):
return {serum.name_full(): chart.column_basis(sr_no) for sr_no, serum in chart.select_all_sera()}
# ======================================================================
### Local Variables:
### eval: (if (fboundp 'eu-rename-buffer) (eu-rename-buffer))
### End:
|
py | 1a4afa39ed689f62142ac81ee23f44cc9d39ce2a | import json
import os
from flask import Flask, render_template, redirect, request
import tv
import logging
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
app = Flask(__name__)
BUTTONS = {}
@app.route('/')
def index():
return render_template('index.html',
tv_state=tv.get_state(),
buttons=BUTTONS.values())
@app.route('/off')
def hello_world():
tv.off()
return redirect("/", code=302)
@app.route('/button/<btn>')
def button(btn):
b = BUTTONS.get(btn)
if b:
tv.do_script(b['script'])
return redirect("/", code=302)
@app.route('/shutdown')
def shutdown():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
return 'Server shutting down...'
def load_buttons():
dir, file = os.path.split(os.path.abspath(__file__))
with open(os.path.join(dir, 'buttons.json')) as json_data:
btns = json.load(json_data)
for btn in btns:
BUTTONS[btn["id"]] = btn
if __name__ == "__main__":
load_buttons()
try:
app.run(host='0.0.0.0', port=5000)
finally:
tv.cleanup()
|
py | 1a4afa5ace8551380689ac00050663854528ee5b | # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
import sys
from env import gidgetConfigVars
import miscClin
import miscTCGA
import path
import tsvIO
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
NA_VALUE = -999999
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def getFeatList(fName):
fList = []
fh = file(fName)
for aLine in fh:
aLine = aLine.strip()
# print aLine
if aLine not in fList:
fList += [aLine]
fList.sort()
return (fList)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def findKey ( allClinDict, keyStr ):
for aKey in allClinDict.keys():
if ( aKey == keyStr ): return ( aKey )
tmpStr = ":" + keyStr + ":"
if ( aKey.find(tmpStr) >= 0 ): return ( aKey )
print " NOT found ? ", keyStr
keyStr = keyStr.lower()
for aKey in allClinDict.keys():
bKey = aKey.lower()
if ( bKey.find(keyStr) >= 0 ):
print " possible match: ", aKey
print " WARNING !!! failed to findKey in reParseClin_CESC "
sys.exit(-1)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def assignBMIcategory ( bmi ):
if ( bmi < 18.5 ): return ( "underweight" )
if ( bmi < 25 ): return ( "normal" )
if ( bmi < 30 ): return ( "overweight" )
return ( "obese" )
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def addBMI ( allClinDict ):
bmiVec = []
catVec = []
weightKey = findKey ( allClinDict, "weight" )
heightKey = findKey ( allClinDict, "height" )
for ii in range(len(allClinDict[weightKey])):
w = allClinDict[weightKey][ii]
h = allClinDict[heightKey][ii]
try:
bmi = float(w) / ( float(h/100.) * float(h/100.) )
bmiCat = assignBMIcategory ( bmi )
## print w, h, bmi, bmiCat
catVec += [ bmiCat ]
bmiVec += [ bmi ]
except:
## print w, h, "NA"
## if ( w != "NA" ): print " weight is not NA ??? "
## if ( h != "NA" ): print " height is not NA ??? "
bmiVec += [ "NA" ]
catVec += [ "NA" ]
allClinDict["N:CLIN:BMI:::::"] = bmiVec
allClinDict["C:CLIN:BMIcat:::::"] = catVec
( keyType, nCount, naCount, cardCount, labelList, labelCount ) = miscClin.lookAtKey ( allClinDict["N:CLIN:BMI:::::"] )
print " %s N=%d NA=%d not-NA=%d card=%d " % ( keyType, nCount, naCount, (nCount-naCount), cardCount ), labelCount
( keyType, nCount, naCount, cardCount, labelList, labelCount ) = miscClin.lookAtKey ( allClinDict["C:CLIN:BMIcat:::::"] )
print " %s N=%d NA=%d not-NA=%d card=%d " % ( keyType, nCount, naCount, (nCount-naCount), cardCount ), labelCount
print labelList
return ( allClinDict )
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def checkMenopause ( allClinDict ):
print " in checkMenopause ... "
print " "
newVec = []
menopauseKey = findKey ( allClinDict, "menopause_status" )
ageKey = findKey ( allClinDict, "age_at_initial_pathologic_diagnosis" )
for ii in range(len(allClinDict[ageKey])):
m = allClinDict[menopauseKey][ii]
a = allClinDict[ageKey][ii]
if ( m.startswith("Pre_") ):
newVec += [ "Pre" ]
elif ( m.startswith("Post_") ):
newVec += [ "Post" ]
elif ( a >= 50 ):
newVec += [ "Post" ]
else:
newVec += [ "Pre" ]
allClinDict["C:CLIN:menopause50:::::"] = newVec
( keyType, nCount, naCount, cardCount, labelList, labelCount ) = miscClin.lookAtKey ( allClinDict["C:CLIN:menopause50:::::"] )
print " %s N=%d NA=%d not-NA=%d card=%d " % ( keyType, nCount, naCount, (nCount-naCount), cardCount ), labelCount
print labelList
return ( allClinDict )
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def addAgeSplits ( allClinDict ):
print " in addAgeSplits ... "
print " "
ageKey = findKey ( allClinDict, "age_at_initial_pathologic_diagnosis" )
numP = len(allClinDict[ageKey])
youngMax = [ 30, 35, 40, 45, 50, 40, 40, 35 ]
oldMin = [ 30, 35, 40, 45, 50, 45, 50, 55 ]
numC = len(youngMax)
newVecs = [0] * numC
for iC in range(numC):
newVecs[iC] = ["NA"] * numP
for ii in range(numP):
a = allClinDict[ageKey][ii]
if ( a != "NA" ):
for iC in range(numC):
if ( a <= youngMax[iC] ):
newVecs[iC][ii] = "young"
elif ( a > oldMin[iC] ):
newVecs[iC][ii] = "old"
for iC in range(numC):
if ( youngMax[iC] == oldMin[iC] ):
keyString = "B:CLIN:ageSplit_%d:::::" % ( youngMax[iC] )
else:
keyString = "B:CLIN:ageSplit_%d_%d:::::" % ( youngMax[iC], oldMin[iC] )
print keyString
allClinDict[keyString] = newVecs[iC]
( keyType, nCount, naCount, cardCount, labelList, labelCount ) = miscClin.lookAtKey ( allClinDict[keyString] )
print " %s N=%d NA=%d not-NA=%d card=%d " % ( keyType, nCount, naCount, (nCount-naCount), cardCount ), labelCount
print labelList
return ( allClinDict )
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def checkCancerStatus ( allClinDict ):
print " in checkCancerStatus ... "
print " "
newSite = []
nteKey = findKey ( allClinDict, "new_tumor_event_after_initial_treatment" )
siteKey = findKey ( allClinDict, "new_neoplasm_event_occurrence_anatomic_site" )
typeKey = findKey ( allClinDict, "new_neoplasm_event_type" )
textKey = findKey ( allClinDict, "new_neoplasm_occurrence_anatomic_site_text" )
days2nteKey = findKey ( allClinDict, "days_to_new_tumor_event_after_initial_treatment" )
numP = len(allClinDict[nteKey])
for ii in range(numP):
if ( allClinDict[nteKey][ii] == "YES" ):
siteStr = allClinDict[siteKey][ii]
if ( 1 ):
if ( siteStr == "Other_specify" ):
siteStr = allClinDict[textKey][ii]
elif ( siteStr == "NA" ):
siteStr = allClinDict[textKey][ii]
if ( siteStr != "NA" ): siteStr = siteStr.lower()
newSite += [ siteStr ]
if ( 0 ):
print " "
print ii
## print " site : ", allClinDict[siteKey][ii]
print " type : ", allClinDict[typeKey][ii]
## print " text : ", allClinDict[textKey][ii]
print " siteStr : ", siteStr
print " days : ", allClinDict[days2nteKey][ii]
else:
newSite += [ "NA" ]
## the types of things I'm seeing are:
## type: Distant_Metastasis
## --> then 'site' sometimes gives the location, or else says "Other_specify"
## in which case the 'text' might give the location
## also note that the "days_to_nte" ranges from 62 to 2893 (the lowest numbers are 62, 77, 93, 94, 153, 178...)
keyString = "C:CLIN:nte_site:::::"
allClinDict[keyString] = newSite
( keyType, nCount, naCount, cardCount, labelList, labelCount ) = miscClin.lookAtKey ( allClinDict[keyString] )
print " %s N=%d NA=%d not-NA=%d card=%d " % ( keyType, nCount, naCount, (nCount-naCount), cardCount ), labelCount
print labelList
return ( allClinDict )
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def checkTumorStatus ( allClinDict ):
print " in checkTumorStatus ... "
print " "
newStatus1 = []
newStatus2 = []
statusKey = findKey ( allClinDict, "person_neoplasm_cancer_status" )
days2fupKey = findKey ( allClinDict, "days_to_last_followup" )
vitalKey = findKey ( allClinDict, "vital_status" )
days2deathKey = findKey ( allClinDict, "days_to_death" )
numP = len(allClinDict[statusKey])
for ii in range(numP):
days2last = -1
if ( allClinDict[days2fupKey][ii] != "NA" ):
days2last = allClinDict[days2fupKey][ii]
if ( allClinDict[days2deathKey][ii] != "NA" ):
days2last = max ( allClinDict[days2deathKey][ii], days2last )
if ( 0 ):
print " "
print " "
print ii
print " status : ", allClinDict[statusKey][ii]
print " vital : ", allClinDict[vitalKey][ii]
## print " days2fup : ", allClinDict[days2fupKey][ii]
## print " days2death : ", allClinDict[days2deathKey][ii]
print " days2last : ", days2last
if ( allClinDict[vitalKey][ii] == "Alive" ):
if ( days2last < 90 ):
print " Alive and less than 90 days ", allClinDict[statusKey][ii]
newStatus1 += [ "NA" ]
if ( allClinDict[statusKey][ii] == "TUMOR_FREE" and days2last >= 90 ):
newStatus1[-1] = "TUMOR_FREE"
elif ( allClinDict[statusKey][ii] == "WITH_TUMOR" and days2last >= 90 ):
newStatus1[-1] = "WITH_TUMOR"
newStatus2 += [ "NA" ]
if ( allClinDict[statusKey][ii] == "TUMOR_FREE" and allClinDict[vitalKey][ii] == "Alive" ):
newStatus2[-1] = "Alive_woTumor"
elif ( allClinDict[statusKey][ii] == "WITH_TUMOR" and allClinDict[vitalKey][ii] == "Dead" ):
newStatus2[-1] = "Dead_wTumor"
## as of 13aug ... there are 57 patients who are alive and have less than 90 days of follow-up
## of these: 24 are "tumor_free"
## 17 are "NA"
## 16 are "with_tumor"
keyString = "C:CLIN:tumorStatus1:::::"
allClinDict[keyString] = newStatus1
( keyType, nCount, naCount, cardCount, labelList, labelCount ) = miscClin.lookAtKey ( allClinDict[keyString] )
print " %s N=%d NA=%d not-NA=%d card=%d " % ( keyType, nCount, naCount, (nCount-naCount), cardCount ), labelCount
print labelList
keyString = "C:CLIN:tumorStatus2:::::"
allClinDict[keyString] = newStatus2
( keyType, nCount, naCount, cardCount, labelList, labelCount ) = miscClin.lookAtKey ( allClinDict[keyString] )
print " %s N=%d NA=%d not-NA=%d card=%d " % ( keyType, nCount, naCount, (nCount-naCount), cardCount ), labelCount
print labelList
return ( allClinDict )
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def checkHistologicGrade ( allClinDict ):
print " in checkHistologicGrade ... "
print " "
gradeKey = findKey ( allClinDict, "neoplasm_histologic_grade" )
numP = len(allClinDict[gradeKey])
for ii in range(numP):
if ( allClinDict[gradeKey][ii] == "G4" ):
print " changing to G3 ... ", ii, gradeKey, allClinDict[gradeKey][ii]
allClinDict[gradeKey][ii] = "G3"
return ( allClinDict )
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def checkClinicalStage ( allClinDict ):
print " in checkClinicalStage ... "
print " "
newStage = []
stageKey = findKey ( allClinDict, "clinical_stage" )
TstageKey = findKey ( allClinDict, "pathologic_T" )
barcodeKey = findKey ( allClinDict, "bcr_patient_barcode" )
numP = len(allClinDict[stageKey])
for ii in range(numP):
if ( allClinDict[TstageKey][ii].startswith("T1a") ):
allClinDict[TstageKey][ii] = "T1b1"
curStage = allClinDict[stageKey][ii]
if ( curStage.startswith("IV") ):
newStage += [ "III,IV" ]
elif ( curStage.startswith("III") ):
newStage += [ "III,IV" ]
elif ( curStage.startswith("II") ):
newStage += [ "II" ]
elif ( curStage.startswith("I") ):
newStage += [ "I" ]
else:
newStage += [ "NA" ]
if ( 0 ):
print " "
print " "
print ii, allClinDict[barcodeKey][ii], allClinDict[stageKey][ii], allClinDict[TstageKey][ii]
## as of 22sep ... there is stage info for 240 patients, and the counts
## look like this:
## 70 IB1
## 35 IB
## 34 IB2
## 33 IIIB
## 26 IIB
## 7 IIA2
## 7 IIA
## 5 IVB
## etc
## after grouping, we get 147 stage I (61%), 49 stage II (20%), and 44 stage III,IV (18%)
keyString = "C:CLIN:clinStage:::::"
allClinDict[keyString] = newStage
( keyType, nCount, naCount, cardCount, labelList, labelCount ) = miscClin.lookAtKey ( allClinDict[keyString] )
print " %s N=%d NA=%d not-NA=%d card=%d " % ( keyType, nCount, naCount, (nCount-naCount), cardCount ), labelCount
print labelList
return ( allClinDict )
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# this started out as a function to deal with lymph-node features but then
# was augmented to handle hysterectomy- and diagnosis-related features ...
def checkLymphNodes_HystDx ( allClinDict ):
print " in checkLymphNodes_HystDx ... "
print " "
newHyst = []
newDxM = []
numLNpos = []
tfLNpos = []
## here we have 138 'radical', 6 'simple', and 5 'other'
hysTypeKey = findKey ( allClinDict, "hysterectomy_performed_type" )
hysTextKey = findKey ( allClinDict, "hysterectomy_performed_text" )
dxMeth1Key = findKey ( allClinDict, "initial_pathologic_diagnosis_method" )
dxMeth2Key = findKey ( allClinDict, "init_pathology_dx_method_other" )
barKey = findKey ( allClinDict, "bcr_patient_barcode" )
LNEcountKey = findKey ( allClinDict, "lymph_node_examined_count" )
LNEposHEkey = findKey ( allClinDict, "number_of_lymphnodes_positive_by_he" )
LNEposIHCkey = findKey ( allClinDict, "number_of_lymphnodes_positive_by_ihc" )
numP = len(allClinDict[hysTypeKey])
for ii in range(numP):
if ( 0 ):
print " "
print " "
print " patient index ", ii, allClinDict[barKey][ii]
if ( allClinDict[hysTypeKey][ii] == "NA" and allClinDict[hysTextKey][ii] == "NA" ):
newHyst += [ "NO_or_NA" ]
else:
newHyst += [ "YES" ]
## here we want to figure out what method was used for diagnosis ...
newDxM += [ "NA" ]
dxMethod = "NA"
if ( allClinDict[hysTypeKey][ii].lower().find("hysterect") >= 0 ):
if ( allClinDict[hysTypeKey][ii].lower().find("radical") >= 0 ):
dxMethod = "radical_hysterectomy"
elif ( allClinDict[hysTypeKey][ii].lower().find("simple") >= 0 ):
dxMethod = "simple_hysterectomy"
elif ( allClinDict[hysTypeKey][ii].lower().find("total_abd") >= 0 ):
dxMethod = "total_abdominal_hysterectomy"
if ( allClinDict[hysTextKey][ii].lower().find("hysterect") >= 0 ):
if ( allClinDict[hysTextKey][ii].lower().find("radical") >= 0 ):
dxMethod = "radical_hysterectomy"
elif ( allClinDict[hysTextKey][ii].lower().find("simple") >= 0 ):
dxMethod = "simple_hysterectomy"
elif ( allClinDict[hysTextKey][ii].lower().find("total_abd") >= 0 ):
dxMethod = "total_abdominal_hysterectomy"
if ( dxMethod == "NA" ):
if ( allClinDict[dxMeth1Key][ii].lower().find("cone") >= 0 ):
dxMethod = "cone_biopsy"
if ( dxMethod == "NA" ):
if ( allClinDict[dxMeth2Key][ii].lower().find("cone") >= 0 ):
dxMethod = "cone_biopsy"
if ( dxMethod == "NA" ):
if ( allClinDict[dxMeth1Key][ii].lower().find("biops") >= 0 ):
dxMethod = "biopsy"
if ( dxMethod == "NA" ):
if ( allClinDict[dxMeth2Key][ii].lower().find("biops") >= 0 ):
dxMethod = "biopsy"
if ( dxMethod == "NA" ):
if ( allClinDict[hysTypeKey][ii] != "NA" ): dxMethod = "other"
if ( allClinDict[hysTextKey][ii] != "NA" ): dxMethod = "other"
if ( allClinDict[dxMeth1Key][ii] != "NA" ): dxMethod = "other"
if ( allClinDict[dxMeth2Key][ii] != "NA" ): dxMethod = "other"
if ( dxMethod == "other" ): print " setting dxMethod to OTHER ", ii, \
allClinDict[hysTypeKey][ii], allClinDict[hysTextKey][ii], \
allClinDict[dxMeth1Key][ii], allClinDict[dxMeth2Key][ii]
newDxM[-1] = dxMethod
numPos = 0
if ( allClinDict[LNEposHEkey][ii] != "NA" ):
numPos += allClinDict[LNEposHEkey][ii]
if ( allClinDict[LNEposIHCkey][ii] != "NA" ):
numPos += allClinDict[LNEposIHCkey][ii]
if ( (allClinDict[LNEposHEkey][ii] == "NA") and (allClinDict[LNEposIHCkey][ii] == "NA") ):
numLNpos += [ "NA" ]
tfLNpos += [ "NA" ]
else:
numLNpos += [ numPos ]
if ( numPos == 0 ):
tfLNpos += [ "FALSE" ]
else:
tfLNpos += [ "TRUE" ]
if ( 0 ):
if ( allClinDict[hysTypeKey][ii] == "NA" ):
if ( allClinDict[hysTextKey][ii] != "NA" ):
print " text filled out but not type "
if ( allClinDict[hysTextKey][ii] == "NA" ):
if ( allClinDict[hysTypeKey][ii] != "NA" ):
print " type filled out but not text "
print " hysTypeKey : ", allClinDict[hysTypeKey][ii]
print " hysTextKey : ", allClinDict[hysTextKey][ii]
print " lymph nodes : ", allClinDict[LNEcountKey][ii], \
allClinDict[LNEposHEkey][ii], \
allClinDict[LNEposIHCkey][ii]
print " done working through each patient ... "
print len(newHyst), len(newDxM), len(tfLNpos), len(numLNpos)
print " "
keyString = "C:CLIN:hysterectomy:::::"
allClinDict[keyString] = newHyst
print " (a) ", keyString, newHyst
( keyType, nCount, naCount, cardCount, labelList, labelCount ) = miscClin.lookAtKey ( allClinDict[keyString] )
print " %s N=%d NA=%d not-NA=%d card=%d " % ( keyType, nCount, naCount, (nCount-naCount), cardCount ), labelCount
print labelList
keyString = "C:CLIN:dx_method:::::"
allClinDict[keyString] = newDxM
print " (b) ", keyString, newDxM
( keyType, nCount, naCount, cardCount, labelList, labelCount ) = miscClin.lookAtKey ( allClinDict[keyString] )
print " %s N=%d NA=%d not-NA=%d card=%d " % ( keyType, nCount, naCount, (nCount-naCount), cardCount ), labelCount
print labelList
keyString = "C:CLIN:LNposTF:::::"
allClinDict[keyString] = tfLNpos
print " (c) ", keyString, tfLNpos
( keyType, nCount, naCount, cardCount, labelList, labelCount ) = miscClin.lookAtKey ( allClinDict[keyString] )
print " %s N=%d NA=%d not-NA=%d card=%d " % ( keyType, nCount, naCount, (nCount-naCount), cardCount ), labelCount
print labelList
keyString = "N:CLIN:numLNpos:::::"
allClinDict[keyString] = numLNpos
print " (3) ", keyString, numLNpos
( keyType, nCount, naCount, cardCount, labelList, labelCount ) = miscClin.lookAtKey ( allClinDict[keyString] )
print " %s N=%d NA=%d not-NA=%d card=%d " % ( keyType, nCount, naCount, (nCount-naCount), cardCount ), labelCount
print " DONE DONE DONE "
return ( allClinDict )
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def makeMergedDx ( allClinDict ):
print " in makeMergedDx ... "
print " "
mergeDx = []
epcReview = []
histTypeKey = findKey ( allClinDict, "histological_type" )
epcDxKey = findKey ( allClinDict, "C:CLIN:Dx_EPC" )
barKey = findKey ( allClinDict, "bcr_patient_barcode" )
numP = len(allClinDict[histTypeKey])
for ii in range(numP):
if ( 1 ):
print " "
print " "
print " patient index ", ii, allClinDict[barKey][ii], allClinDict[histTypeKey][ii], allClinDict[epcDxKey][ii]
## expected possible values for the histological_type field:
## 206 Cervical_Squamous_Cell_Carcinoma
## 23 Endocervical_Type_of_Adenocarcinoma
## 6 Mucinous_Adenocarcinoma_of_Endocervical_Type
## 5 Adenosquamous
## 4 Endometrioid_Adenocarcinoma_of_Endocervix
## 4 Endocervical_Adenocarcinoma_of_the_Usual_Type
## 70 NA
## expected values for Dx_EPC field:
## 4 Adenosquamous
## 27 Endocervical_Adeno
## 123 NA
## 99 Squamous
if ( allClinDict[epcDxKey][ii] != "NA" ):
epcReview += [ "TRUE" ]
else:
epcReview += [ "FALSE" ]
if ( allClinDict[epcDxKey][ii] != "NA" ):
mergeDx += [ allClinDict[epcDxKey][ii] ]
else:
if ( allClinDict[histTypeKey][ii] == "Cervical_Squamous_Cell_Carcinoma" ):
mergeDx += [ "Squamous" ]
elif ( allClinDict[histTypeKey][ii] == "Endocervical_Type_of_Adenocarcinoma" ):
mergeDx += [ "Adenocarcinoma" ]
elif ( allClinDict[histTypeKey][ii] == "Mucinous_Adenocarcinoma_of_Endocervical_Type" ):
mergeDx += [ "Adenocarcinoma" ]
elif ( allClinDict[histTypeKey][ii] == "Adenosquamous" ):
mergeDx += [ "Adenosquamous" ]
elif ( allClinDict[histTypeKey][ii] == "Endometrioid_Adenocarcinoma_of_Endocervix" ):
mergeDx += [ "Adenocarcinoma" ]
elif ( allClinDict[histTypeKey][ii] == "Endocervical_Adenocarcinoma_of_the_Usual_Type" ):
mergeDx += [ "Adenocarcinoma" ]
elif ( allClinDict[histTypeKey][ii] == "NA" ):
mergeDx += [ "NA" ]
else:
print " ERROR ??? we should not be here ... ", ii, allClinDict[barKey][ii], \
allClinDict[histTypeKey][ii], allClinDict[epcDxKey][ii]
## just double-checking terminology one more time ...
if ( mergeDx[-1] == "Endocervical_Adeno" ):
mergeDx[-1] = "Adenocarcinoma"
keyString = "C:CLIN:Dx_merged:::::"
allClinDict[keyString] = mergeDx
( keyType, nCount, naCount, cardCount, labelList, labelCount ) = miscClin.lookAtKey ( allClinDict[keyString] )
print " %s N=%d NA=%d not-NA=%d card=%d " % ( keyType, nCount, naCount, (nCount-naCount), cardCount ), labelCount
print labelList
keyString = "C:CLIN:EPC_review:::::"
allClinDict[keyString] = epcReview
( keyType, nCount, naCount, cardCount, labelList, labelCount ) = miscClin.lookAtKey ( allClinDict[keyString] )
print " %s N=%d NA=%d not-NA=%d card=%d " % ( keyType, nCount, naCount, (nCount-naCount), cardCount ), labelCount
print labelList
return ( allClinDict )
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
if __name__ == "__main__":
if (1):
if ( (len(sys.argv)==3) or (len(sys.argv)==4) ):
tumorString = sys.argv[1]
dateString = sys.argv[2]
featureList = sys.argv[3]
else:
print " "
print " Usage: %s <tumor-type> <run-id> <feature-list> "
print " "
print " ERROR -- bad command line arguments "
sys.exit(-1)
print " "
print " Running : %s %s %s %s " % (sys.argv[0], sys.argv[1], sys.argv[2], sys.argv[3])
print " "
print " "
listDict = {}
# read in the current clinical file ...
topDir = "%s/%s/%s" % (gidgetConfigVars['TCGAFMP_DATA_DIR'], tumorString, dateString)
clin1name = topDir + "/" + "%s.clinical.%s.tsv" % ( tumorString, dateString )
print clin1name
allClinDict = tsvIO.readTSV ( clin1name )
# find out which features are interesting ...
# BUT IS THIS REALLY COMPLETELY NOT NECESSARY ???
# was this just for debugging purposes ???
fList = getFeatList ( featureList )
for aF in fList:
print aF
for aKey in allClinDict.keys():
if ( aKey[1] == ":" ):
aTokens = aKey.split(':')
tKey = aTokens[2]
else:
tKey = aKey
if ( aF == tKey ):
( keyType, nCount, naCount, cardCount, labelList, labelCount ) = miscClin.lookAtKey ( allClinDict[aKey] )
print " %s N=%d NA=%d not-NA=%d card=%d " % ( keyType, nCount, naCount, (nCount-naCount), cardCount ), labelCount
if ( keyType != "NUMERIC" ): print labelList
print " "
print " "
# now we need to do some massaging and computing ...
try:
allClinDict = addBMI ( allClinDict )
except:
print " addBMI function failed "
try:
allClinDict = checkMenopause ( allClinDict )
except:
print " checkMenopause function failed "
try:
allClinDict = addAgeSplits ( allClinDict )
except:
print " addAgeSplits function failed "
try:
allClinDict = checkCancerStatus ( allClinDict )
except:
print " checkCancerStatus function failed "
try:
allClinDict = checkTumorStatus ( allClinDict )
except:
print " checkTumorStatus function failed "
try:
allClinDict = checkHistologicGrade ( allClinDict )
except:
print " checkHistologicGrade function failed "
try:
allClinDict = checkClinicalStage ( allClinDict )
except:
print " checkClinicalStage function failed "
try:
allClinDict = checkLymphNodes_HystDx ( allClinDict )
except:
print " checkLymphNodes_HystDx function failed "
try:
allClinDict = makeMergedDx ( allClinDict )
except:
print " makeMergedDx function failed "
print " FINISHED creating and modifying CESC features ... "
# now we're ready to re-write this ...
(naCounts, otherCounts) = miscClin.lookAtClinDict(allClinDict)
print " --> getting bestKeyOrder ... "
bestKeyOrder = miscClin.getBestKeyOrder(allClinDict, naCounts)
outName = topDir + "/" + "%s.clinical.%s.cesc.tsv" % ( tumorString, dateString )
print " --> writing output to ", outName
tsvIO.writeTSV_clinical ( allClinDict, bestKeyOrder, outName )
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
|
py | 1a4afc3fb9c1e6a81a9799dcf2a4545a681dd712 | import numpy as np
import random
import copy
from collections import namedtuple, deque
from ddpg_models import Actor, Critic
from ou_noise import OUNoise
from replay_buffer import ReplayBuffer
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e6) # replay buffer size
BATCH_SIZE = 1024 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR_ACTOR = 1e-4 # learning rate of the actor
LR_CRITIC = 1e-3 # learning rate of the critic
WEIGHT_DECAY = 0 # L2 weight decay
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, random_seed):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
random_seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(random_seed)
# Actor Network (w/ Target Network)
self.actor_local = Actor(state_size, action_size, random_seed).to(device)
self.actor_target = Actor(state_size, action_size, random_seed).to(device)
self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR)
# Critic Network (w/ Target Network)
self.critic_local = Critic(state_size, action_size, random_seed).to(device)
self.critic_target = Critic(state_size, action_size, random_seed).to(device)
self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY)
# Noise process
self.noise = OUNoise(action_size, random_seed)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, random_seed)
def step(self, states, actions, rewards, next_states, dones):
"""Save experience in replay memory, and use random sample from buffer to learn."""
# Save experience / reward
for state, action, reward, next_state, done in zip(states, actions, rewards, next_states, dones):
self.memory.add(state, action, reward, next_state, done)
# Learn, if enough samples are available in memory
if len(self.memory) > BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, add_noise=True):
"""Returns actions for given state as per current policy."""
state = torch.from_numpy(state).float().to(device)
self.actor_local.eval()
with torch.no_grad():
action = self.actor_local(state).cpu().data.numpy()
self.actor_local.train()
if add_noise:
action += self.noise.sample()
return np.clip(action, -1, 1)
def reset(self):
"""reset the noise function values"""
self.noise.reset()
def learn(self, experiences, gamma):
"""Update policy and value parameters using given batch of experience tuples.
Q_targets = r + γ * critic_target(next_state, actor_target(next_state))
where:
actor_target(state) -> action
critic_target(state, action) -> Q-value
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
# ---------------------------- update critic ---------------------------- #
# Get predicted next-state actions and Q values from target models
actions_next = self.actor_target(next_states)
Q_targets_next = self.critic_target(next_states, actions_next)
# Compute Q targets for current states (y_i)
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Compute critic loss
Q_expected = self.critic_local(states, actions)
critic_loss = F.mse_loss(Q_expected, Q_targets)
# Minimize the loss
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# ---------------------------- update actor ---------------------------- #
# Compute actor loss
actions_pred = self.actor_local(states)
actor_loss = -self.critic_local(states, actions_pred).mean()
# Minimize the loss
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# ----------------------- update target networks ----------------------- #
self.soft_update(self.critic_local, self.critic_target, TAU)
self.soft_update(self.actor_local, self.actor_target, TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model: PyTorch model (weights will be copied from)
target_model: PyTorch model (weights will be copied to)
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
|
py | 1a4afc8f03ebbb61336dd27db756f51e395a344c | """
Post Cookie Generation script(s)
These scripts are executed from the output folder.
If any error is raised, the cookie cutter creation fails and crashes
"""
import os
import subprocess as sp
cpp_driver = """#include <iostream>
#include <mpi.h>
#include <stdexcept>
#include <string.h>
#include "mdi.h"
using namespace std;
int main(int argc, char **argv) {
// Initialize the MPI environment
MPI_Comm world_comm;
MPI_Init(&argc, &argv);
// Initialize MDI
if ( MDI_Init(&argc, &argv) ) {
throw std::runtime_error("The MDI library was not initialized correctly.");
}
// Confirm that MDI was initialized successfully
int initialized_mdi;
if ( MDI_Initialized(&initialized_mdi) ) {
throw std::runtime_error("MDI_Initialized failed.");
}
if ( ! initialized_mdi ) {
throw std::runtime_error("MDI not initialized: did you provide the -mdi option?.");
}
// Get the correct MPI intra-communicator for this code
if ( MDI_MPI_get_world_comm(&world_comm) ) {
throw std::runtime_error("MDI_MPI_get_world_comm failed.");
}
// Connect to the engines
// <YOUR CODE GOES HERE>
// Perform the simulation
// <YOUR CODE GOES HERE>
// Send the "EXIT" command to each of the engines
// <YOUR CODE GOES HERE>
// Finalize MPI
MPI_Barrier(world_comm);
MPI_Finalize();
return 0;
}
"""
py_driver = """
import sys
# Import the MDI Library
try:
import mdi
except:
raise Exception("Unable to import the MDI Library")
# Import MPI Library
try:
from mpi4py import MPI
use_mpi4py = True
mpi_comm_world = MPI.COMM_WORLD
except ImportError:
use_mpi4py = False
mpi_comm_world = None
if __name__ == "__main__":
# Read the command-line options
iarg = 1
mdi_options = None
while iarg < len(sys.argv):
arg = sys.argv[iarg]
if arg == "-mdi":
mdi_options = sys.argv[iarg + 1]
iarg += 1
else:
raise Exception("Unrecognized command-line option")
iarg += 1
# Confirm that the MDI options were provided
if mdi_options is None:
raise Exception("-mdi command-line option was not provided")
# Initialize the MDI Library
mdi.MDI_Init(mdi_options)
# Get the correct MPI intra-communicator for this code
mpi_comm_world = mdi.MDI_MPI_get_world_comm()
# Connect to the engines
# Perform the simulation
# Send the "EXIT" command to each of the engines
"""
cpp_cmake = """# Compile MDI
add_subdirectory(mdi)
# Macro to convert strings to lists
macro(string_to_list _VAR _STR)
STRING(REPLACE " " " " ${_VAR} "${_STR}")
STRING(REPLACE " " ";" ${_VAR} "${_STR}")
endmacro(string_to_list _VAR _STR)
# Check for MPI
if ( NOT ( mpi STREQUAL "OFF") )
find_package(MPI)
endif()
if( NOT MPI_FOUND )
if( mpi STREQUAL "ON" )
message( WARNING "Could not find MPI. Compiling without MPI support." )
endif()
set(mpi "OFF")
endif()
# Add MPI stubs, if needed
if( mpi STREQUAL "OFF" )
list(APPEND sources "${CMAKE_CURRENT_SOURCE_DIR}/STUBS_MPI/mpi.h")
endif()
# Locate MPI
find_package(MPI)
if(MPI_FOUND)
include_directories(${MPI_INCLUDE_PATH})
else()
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/STUBS_MPI/mpi.h ${CMAKE_CURRENT_BINARY_DIR}/STUBS_MPI/mpi.h COPYONLY)
endif()
# Link to MDI
#set( MDI_LOCATION ${CMAKE_BINARY_DIR}/lib/mdi/MDI_Library/ )
set( MDI_LOCATION ${CMAKE_CURRENT_BINARY_DIR}/mdi/MDI_Library/ )
link_directories( ${MDI_LOCATION} )
include_directories(${MDI_LOCATION})
# Add the driver as a compile target
add_executable({{ cookiecutter.repo_name }}
{{ cookiecutter.repo_name }}.cpp)
# Link to the MDI Library
target_link_libraries({{ cookiecutter.repo_name }} mdi)
# Include and link to MPI
if( mpi STREQUAL "ON" )
#include MPI
string_to_list(MPI_C_COMPILE_OPTIONS "${MPI_C_COMPILE_FLAGS}")
string_to_list(MPI_C_LINK_OPTIONS "${MPI_C_LINK_FLAGS}")
target_include_directories({{ cookiecutter.repo_name }} PRIVATE ${MPI_C_INCLUDE_PATH})
target_compile_options({{ cookiecutter.repo_name }} PRIVATE ${MPI_C_COMPILE_OPTIONS})
target_link_libraries({{ cookiecutter.repo_name }} ${MPI_C_LIBRARIES} ${MPI_C_LINK_OPTIONS})
elseif( mpi STREQUAL "OFF" )
message( "Compiling without MPI." )
target_include_directories({{ cookiecutter.repo_name }} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/STUBS_MPI/)
else()
message( FATAL_ERROR "Value of mpi not recognized. Accepted values are: ON; OFF." )
endif()
"""
py_cmake = """# Compile MDI
add_subdirectory(mdi)
# Add an __init__.py to the MDI directory, so that it can be used as a package
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/mdi/__init__.py "")
# Copy the driver file into the compile directory
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/{{ cookiecutter.repo_name }}.py ${CMAKE_CURRENT_BINARY_DIR}/{{ cookiecutter.repo_name }}.py COPYONLY)
"""
def decode_string(string):
"""Helper function to covert byte-string to string, but allows normal strings"""
try:
return string.decode()
except AttributeError:
return string
def invoke_shell(command):
try:
output = sp.check_output(command, shell=True, stderr=sp.STDOUT)
except sp.CalledProcessError as e:
# Trap and print the output in a helpful way
print(decode_string(e.output), decode_string(e.returncode))
print(e.output)
raise e
print(decode_string(output))
def write_driver_file():
# Write a langauge-specific driver file
if "{{ cookiecutter.language }}" == "C++":
with open("{{ cookiecutter.repo_name }}/{{ cookiecutter.repo_name }}.cpp","w") as f:
f.write(cpp_driver)
elif "{{ cookiecutter.language }}" =="Python":
with open("{{ cookiecutter.repo_name }}/{{ cookiecutter.repo_name }}.py","w") as f:
f.write(py_driver)
else:
raise Exception("Unsupported language")
def write_cmake_file():
with open("{{ cookiecutter.repo_name }}/CMakeLists.txt","w") as f:
# Write a langauge-specific CMakeLists.txt file
if "{{ cookiecutter.language }}" == "C++":
f.write(cpp_cmake)
elif "{{ cookiecutter.language }}" =="Python":
f.write(py_cmake)
else:
raise Exception("Unsupported language")
def git_init_and_tag():
"""Invoke the initial git and tag with 0.0.0 to make an initial version for Versioneer to ID"""
# Write the language-specific files
write_driver_file()
write_cmake_file()
# Initialize git
invoke_shell("git init")
# Add files
invoke_shell("git add .")
invoke_shell(
"git commit -m \"Initial commit after CMS Cookiecutter creation, version {}\"".format(
'{{ cookiecutter._mdi_driver_cc_version }}'))
# Add MDI as a subtree
invoke_shell("git subtree add --prefix={{ cookiecutter.repo_name }}/mdi https://github.com/MolSSI/MDI_Library master --squash")
# Set the 0.0.0 tag
invoke_shell("git tag 0.0.0")
git_init_and_tag()
|
py | 1a4afd40837f534c58abeacec2c9f32406f0513e | import warnings
import rdflib
from rdflib import OWL, RDF, RDFS, BNode
from ..exceptions import NeuroLangNotImplementedError
from ..expressions import Constant, Symbol
from ..logic import Conjunction, Implication, Union
from .constraints_representation import RightImplication
class OntologyParser:
"""
This class is in charge of generating the rules that can be derived
from an ontology, both at entity and constraint levels.
"""
def __init__(self, paths, load_format="xml"):
self.namespaces_dic = None
self.owl_dic = None
if isinstance(paths, list):
self._load_ontology(paths, load_format)
else:
self._load_ontology([paths], [load_format])
self._triple = Symbol.fresh()
self._pointer = Symbol.fresh()
self._dom = Symbol.fresh()
self.parsed_restrictions = [
OWL.allValuesFrom,
OWL.hasValue,
OWL.minCardinality,
OWL.maxCardinality,
OWL.cardinality,
OWL.someValuesFrom,
]
def _load_ontology(self, paths, load_format):
g = rdflib.Graph()
for counter, path in enumerate(paths):
g.load(path, format=load_format[counter])
self.graph = g
def parse_ontology(self):
extensional_predicate_tuples, union_of_constraints_dom = (
self._load_domain()
)
union_of_constraints_prop = self._load_properties()
union_of_constraints = self._load_constraints()
union_of_constraints = Union(
union_of_constraints_dom.formulas
+ union_of_constraints_prop.formulas
+ union_of_constraints.formulas
)
return extensional_predicate_tuples, union_of_constraints
def get_triples_symbol(self):
return self._triple
def get_pointers_symbol(self):
return self._pointer
def get_domain_symbol(self):
return self._dom
def _load_domain(self):
pointers = frozenset(
(str(x),) for x in self.graph.subjects() if isinstance(x, BNode)
)
triples = frozenset(
(str(x[0]), str(x[1]), str(x[2])) for x in self.get_triples()
)
x = Symbol.fresh()
y = Symbol.fresh()
z = Symbol.fresh()
dom1 = RightImplication(self._triple(x, y, z), self._dom(x))
dom2 = RightImplication(self._triple(x, y, z), self._dom(y))
dom3 = RightImplication(self._triple(x, y, z), self._dom(z))
extensional_predicate_tuples = {}
extensional_predicate_tuples[self._triple] = triples
extensional_predicate_tuples[self._pointer] = pointers
union_of_constraints = Union((dom1, dom2, dom3))
return extensional_predicate_tuples, union_of_constraints
def _load_properties(self):
"""
Function that parse all the properties defined in
the ontology.
"""
x = Symbol.fresh()
z = Symbol.fresh()
constraints = ()
for pred in set(self.graph.predicates()):
symbol_name = str(pred)
symbol = Symbol(symbol_name)
const = Constant(symbol_name)
constraints += (
RightImplication(self._triple(x, const, z), symbol(x, z)),
)
return Union(constraints)
def _load_constraints(self):
"""
Function in charge of parsing the ontology's restrictions.
It needs a function "_process_X", where X is the name of
the restriction to be processed, to be defined.
"""
restriction_ids = [
s for s, _, _ in self.graph.triples((None, None, OWL.Restriction))
]
union_of_constraints = Union(())
for rest in restriction_ids:
cut_graph = list(self.graph.triples((rest, None, None)))
res_type = self._identify_restriction_type(cut_graph)
try:
process_restriction_method = getattr(
self, f"_process_{res_type}"
)
constraints = process_restriction_method(cut_graph)
union_of_constraints = Union(
union_of_constraints.formulas + constraints.formulas
)
except AttributeError as err:
raise NeuroLangNotImplementedError(
f"""Ontology parser doesn\'t handle
restrictions of type {res_type}"""
)
return union_of_constraints
def _identify_restriction_type(self, list_of_triples):
"""
Given a list of nodes associated to a restriction,
this function returns the name of the restriction
to be applied (hasValue, minCardinality, etc).
Parameters
----------
list_of_triples : list
List of nodes associated to a restriction.
Returns
-------
str
the name of the restriction or an empty string
if the name cannot be identified.
"""
for triple in list_of_triples:
if triple[1] == OWL.onProperty or triple[1] == RDF.type:
continue
else:
return triple[1].rsplit("#")[-1]
return ""
def _process_hasValue(self, cut_graph):
"""
A restriction containing a owl:hasValue constraint describes a class
of all individuals for which the property concerned has at least
one value semantically equal to V (it may have other values as well)
The following example describes the class of individuals
who have the individual referred to as Clinton as their parent:
<owl:Restriction>
<owl:onProperty rdf:resource="#hasParent" />
<owl:hasValue rdf:resource="#Clinton" />
</owl:Restriction>
"""
parsed_prop, restricted_node, value = self._parse_restriction_nodes(
cut_graph
)
rdfs_type = Constant(str(RDF.type))
property_symbol = Symbol(str(parsed_prop))
x = Symbol.fresh()
constraint = Union(
(
RightImplication(
self._triple(x, rdfs_type, Constant(str(restricted_node))),
property_symbol(x, Constant(str(value))),
),
)
)
return constraint
def _process_minCardinality(self, cut_graph):
"""
A restriction containing an owl:minCardinality constraint describes
a class of all individuals that have at least N semantically distinct
values (individuals or data values) for the property concerned,
where N is the value of the cardinality constraint.
The following example describes a class of individuals
that have at least two parents:
<owl:Restriction>
<owl:onProperty rdf:resource="#hasParent" />
<owl:minCardinality rdf:datatype="&xsd;nonNegativeInteger">
2
</owl:minCardinality>
</owl:Restriction>
Note that an owl:minCardinality of one or more means that all
instances of the class must have a value for the property.
"""
_, restricted_node, _ = self._parse_restriction_nodes(
cut_graph
)
warnings.warn(
f"""The restriction minCardinality cannot be
parsed for {restricted_node}."""
)
return Union(())
def _process_maxCardinality(self, cut_graph):
"""
A restriction containing an owl:maxCardinality constraint describes
a class of all individuals that have at most N semantically distinct
values (individuals or data values) for the property concerned,
where N is the value of the cardinality constraint.
The following example describes a class of individuals
that have at most two parents:
<owl:Restriction>
<owl:onProperty rdf:resource="#hasParent" />
<owl:maxCardinality rdf:datatype="&xsd;nonNegativeInteger">
2
</owl:maxCardinality>
</owl:Restriction>
"""
_, restricted_node, _ = self._parse_restriction_nodes(
cut_graph
)
warnings.warn(
f"""The restriction maxCardinality cannot be
parsed for {restricted_node}"""
)
return Union(())
def _process_cardinality(self, cut_graph):
"""
A restriction containing an owl:cardinality constraint describes
a class of all individuals that have exactly N semantically distinct
values (individuals or data values) for the property concerned,
where N is the value of the cardinality constraint.
This construct is in fact redundant as it can always be replaced
by a pair of matching owl:minCardinality and owl:maxCardinality
constraints with the same value. It is included as a convenient
shorthand for the user.
The following example describes a class of individuals that have
exactly two parents:
<owl:Restriction>
<owl:onProperty rdf:resource="#hasParent" />
<owl:cardinality rdf:datatype="&xsd;nonNegativeInteger">
2
</owl:cardinality>
</owl:Restriction>
"""
_, restricted_node, _ = self._parse_restriction_nodes(
cut_graph
)
warnings.warn(
f"""The restriction cardinality cannot be
parsed for {restricted_node}"""
)
return Union(())
def _process_someValuesFrom(self, cut_graph):
"""
It defines a class of individuals x for which there is at least one y
(either an instance of the class description or value of the data
range) such that the pair (x,y) is an instance of P. This does not
exclude that there are other instances (x,y') of P for which y' does
not belong to the class description or data range.
The following example defines a class of individuals which have at
least one parent who is a physician:
<owl:Restriction>
<owl:onProperty rdf:resource="#hasParent" />
<owl:someValuesFrom rdf:resource="#Physician" />
</owl:Restriction>
"""
parsed_prop, restricted_node, values = self._parse_restriction_nodes(
cut_graph
)
nodes_someValuesFrom = self._parse_list(values)
constraints = Union(())
property_symbol = Symbol(str(parsed_prop))
rdfs_type = Constant(str(RDF.type))
y = Symbol.fresh()
for value in nodes_someValuesFrom:
constraints = Union(
constraints.formulas
+ (
RightImplication(
self._triple(
y, rdfs_type, Constant(str(restricted_node))
),
property_symbol(y, Constant(str(value))),
),
)
)
return constraints
def _process_allValuesFrom(self, cut_graph):
"""
AllValuesFrom defines a class of individuals x
for which holds that if the pair (x,y) is an instance of
P (the property concerned), then y should be an instance
of the class description.
<owl:Restriction>
<owl:onProperty rdf:resource="#hasParent" />
<owl:allValuesFrom rdf:resource="#Human" />
</owl:Restriction>
This example describes an anonymous OWL class of all individuals
for which the hasParent property only has values of class Human
"""
parsed_prop, restricted_node, values = self._parse_restriction_nodes(
cut_graph
)
allValuesFrom = self._parse_list(values)
constraints = Union(())
property_symbol = Symbol(str(parsed_prop))
rdf_type = Constant(str(RDF.type))
rdf_symbol = Symbol(str(RDF.type))
y = Symbol.fresh()
x = Symbol.fresh()
for value in allValuesFrom:
constraints = Union(
constraints.formulas
+ (
RightImplication(
Conjunction(
(
self._triple(
y, rdf_type, Constant(str(restricted_node))
),
property_symbol(y, x),
)
),
rdf_symbol(x, Constant(str(value))),
),
)
)
return constraints
def _parse_restriction_nodes(self, cut_graph):
"""
Given the list of nodes associated with a restriction,
this function returns: The restricted node, the property that
restricts it and the value associated to it.
Parameters
----------
cut_graph : list
List of nodes associated to a restriction.
Returns
-------
parsed_property : URIRef
The node of the property.
restricted_node : URIRef
The node restricted by the property.
value : URIRef
The value of the property
"""
restricted_node = list(
self.graph.triples((None, None, cut_graph[0][0]))
)[0][0]
for triple in cut_graph:
if OWL.onProperty == triple[1]:
parsed_property = triple[2]
elif triple[1] in self.parsed_restrictions:
value = triple[2]
return parsed_property, restricted_node, value
def _parse_list(self, initial_node):
"""
This function receives an initial BNode from a list of nodes
and goes through the list collecting the values from it and
returns them as an array
Parameters
----------
initial_node : BNode
Initial node of the list that you want to go through.
Returns
-------
values : list
Array of nodes that are part of the list.
"""
if not isinstance(initial_node, BNode):
return [initial_node]
list_node = RDF.nil
values = []
for node_triples in self.graph.triples((initial_node, None, None)):
if OWL.unionOf == node_triples[1]:
list_node = node_triples[2]
else:
values.append(node_triples[0])
while list_node != RDF.nil and list_node is not None:
list_iter = self.graph.triples((list_node, None, None))
values.append(self._get_list_first_value(list_iter))
list_node = self._get_list_rest_value(list_iter)
return values
def _get_list_first_value(self, list_iter):
"""
Given a list of triples, as a result of the iteration of a list,
this function returns the node associated to the rdf:first property.
Parameters
----------
list_iter : generator
Generator that represents the list of nodes that
form a position in a list.
Returns
-------
URIRef
Node associated to the rdf:first property.
"""
for triple in list_iter:
if RDF.first == triple[1]:
return triple[2]
def _get_list_rest_value(self, list_iter):
"""
Given a list of triples, as a result of the iteration of a list,
this function returns the node associated to the rdf:rest property.
Parameters
----------
list_iter : generator
Generator that represents the list of nodes that
form a position in a list.
Returns
-------
URIRef
Node associated to the rdf:rest property.
"""
for triple in list_iter:
if RDF.rest == triple[1]:
return triple[2]
def get_triples(self):
return self.graph.triples((None, None, None))
|
py | 1a4afdbce60aefc0a3befdbcea0625c954912c12 | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.2.11.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-p_q$=$qcpwvdd1hvxq^a!9(oe@41+d%(14aa0kxg#0a2zb-z4)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'website.apps.WebsiteConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = BASE_DIR/'static'
MEDIA_URL = '/media/'
MEDIA_ROOT = BASE_DIR/'media'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
STATICFILES_DIRS = [
BASE_DIR / "statics"
]
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
py | 1a4afdcf846ced4aa8ec560838edc9b08e8de1be | import os
import json
import numpy as np
from pychemia.crystal import KPoints
from ...tasks import Task
from ..abinit import AbinitJob
__author__ = 'Guillermo Avendano-Franco'
class StaticCalculation(Task):
def __init__(self, structure, workdir='.', binary='abinit', ecut=50, kpoints=None, kp_density=1E4):
self.ecut = ecut
if kpoints is None:
kp = KPoints.optimized_grid(structure.lattice, kp_density=kp_density, force_odd=True)
self.kpoints = kp
else:
self.kpoints = kpoints
self.task_params = {'ecut': self.ecut, 'kpoints': self.kpoints.to_dict}
Task.__init__(self, structure=structure, task_params=self.task_params, workdir=workdir, binary=binary)
self.abinitjob = AbinitJob()
self.abinitjob.initialize(workdir=workdir, structure=structure, binary=binary)
def run(self, nparal=1):
self.abinitjob.set_kpoints(kpoints=self.kpoints)
self.abinitjob.job_static()
self.abinitjob.set_ecut(self.ecut)
self.abinitjob.set_psps()
self.abinitjob.write_all()
self.abinitjob.run(use_mpi=True, omp_max_threads=nparal, mpi_num_procs=nparal)
def plot(self, figname='static_calculation.pdf'):
if not self.finished:
print('The task is not finished')
return
import matplotlib.pyplot as plt
plt.switch_backend('agg')
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=0.09, bottom=0.08, right=0.95, top=0.95, wspace=None, hspace=None)
data = np.array(self.output['energies'])
plt.plot(data[:, 1], data[:, 2], 'b.-')
plt.xlabel('SCF cycle')
plt.ylabel('Energy [eV]')
a = plt.axes([.6, .6, .3, .3], axisbg='0.9')
a.semilogy(data[:, 1], data[:, 2] - np.min(data[:, 2]))
a.set_title('min energy %7.3f eV' % np.min(data[:, 2]))
if figname is not None:
plt.savefig(figname)
return plt.gcf()
def load(self, filename=None):
if filename is None:
filename = self.workdir + os.sep + 'task.json'
rf = open(filename)
data = json.load(rf)
rf.close()
self.task_params = data['task_params']
self.output = data['output']
self.ecut = self.task_params['ecut']
self.kpoints = KPoints.from_dict(self.task_params['kpoints'])
def report(self, file_format='html'):
from lxml.builder import ElementMaker, E
self.plot(figname=self.report_dir + os.sep + 'static.jpg')
element_maker = ElementMaker(namespace=None, nsmap={None: "http://www.w3.org/1999/xhtml"})
html = element_maker.html(E.head(E.title("ABINIT Static Calculation")),
E.body(E.h1("ABINIT Static Calculation"),
E.h2('Structure'),
E.pre(str(self.structure)),
E.h2('Self Consistent Field Convergence'),
E.p(E.img(src='static.jpg', width="800", height="600",
alt="Static Calculation"))
))
return self.report_end(html, file_format)
|
py | 1a4afe044eff981c63079c9be04b6643970ff191 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
r"""Script for training model.
Simple command to get up and running:
python train.py --memory_size=8192 \
--batch_size=16 --validation_length=50 \
--episode_width=5 --episode_length=30
"""
import logging
import os
import random
import numpy as np
import tensorflow as tf
import data_utils
import model
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_integer('rep_dim', 128,
'dimension of keys to use in memory')
tf.flags.DEFINE_integer('episode_length', 100, 'length of episode')
tf.flags.DEFINE_integer('episode_width', 5,
'number of distinct labels in a single episode')
tf.flags.DEFINE_integer('memory_size', None, 'number of slots in memory. '
'Leave as None to default to episode length')
tf.flags.DEFINE_integer('batch_size', 16, 'batch size')
tf.flags.DEFINE_integer('num_episodes', 100000, 'number of training episodes')
tf.flags.DEFINE_integer('validation_frequency', 20,
'every so many training episodes, '
'assess validation accuracy')
tf.flags.DEFINE_integer('validation_length', 10,
'number of episodes to use to compute '
'validation accuracy')
tf.flags.DEFINE_integer('seed', 888, 'random seed for training sampling')
tf.flags.DEFINE_string('save_dir', '', 'directory to save model to')
tf.flags.DEFINE_bool('use_lsh', False,
'use locality-sensitive hashing '
'(NOTE: not fully tested)')
class Trainer(object):
"""Class that takes care of training, validating, and checkpointing model."""
def __init__(self, train_data, valid_data, input_dim, output_dim=None):
self.train_data = train_data
self.valid_data = valid_data
self.input_dim = input_dim
self.rep_dim = FLAGS.rep_dim
self.episode_length = FLAGS.episode_length
self.episode_width = FLAGS.episode_width
self.batch_size = FLAGS.batch_size
self.memory_size = (self.episode_length * self.batch_size
if FLAGS.memory_size is None else FLAGS.memory_size)
self.use_lsh = FLAGS.use_lsh
self.output_dim = (output_dim if output_dim is not None
else self.episode_width)
def get_model(self):
# vocab size is the number of distinct values that
# could go into the memory key-value storage
vocab_size = self.episode_width * self.batch_size
return model.Model(
self.input_dim, self.output_dim, self.rep_dim, self.memory_size,
vocab_size, use_lsh=self.use_lsh)
def sample_episode_batch(self, data,
episode_length, episode_width, batch_size):
"""Generates a random batch for training or validation.
Structures each element of the batch as an 'episode'.
Each episode contains episode_length examples and
episode_width distinct labels.
Args:
data: A dictionary mapping label to list of examples.
episode_length: Number of examples in each episode.
episode_width: Distinct number of labels in each episode.
batch_size: Batch size (number of episodes).
Returns:
A tuple (x, y) where x is a list of batches of examples
with size episode_length and y is a list of batches of labels.
"""
episodes_x = [[] for _ in xrange(episode_length)]
episodes_y = [[] for _ in xrange(episode_length)]
assert len(data) >= episode_width
keys = data.keys()
for b in xrange(batch_size):
episode_labels = random.sample(keys, episode_width)
remainder = episode_length % episode_width
remainders = [0] * (episode_width - remainder) + [1] * remainder
episode_x = [
random.sample(data[lab],
r + (episode_length - remainder) / episode_width)
for lab, r in zip(episode_labels, remainders)]
episode = sum([[(x, i, ii) for ii, x in enumerate(xx)]
for i, xx in enumerate(episode_x)], [])
random.shuffle(episode)
# Arrange episode so that each distinct label is seen before moving to
# 2nd showing
episode.sort(key=lambda elem: elem[2])
assert len(episode) == episode_length
for i in xrange(episode_length):
episodes_x[i].append(episode[i][0])
episodes_y[i].append(episode[i][1] + b * episode_width)
return ([np.array(xx).astype('float32') for xx in episodes_x],
[np.array(yy).astype('int32') for yy in episodes_y])
def compute_correct(self, ys, y_preds):
return np.mean(np.equal(y_preds, np.array(ys)))
def individual_compute_correct(self, y, y_pred):
return y_pred == y
def run(self):
"""Performs training.
Trains a model using episodic training.
Every so often, runs some evaluations on validation data.
"""
train_data, valid_data = self.train_data, self.valid_data
input_dim, output_dim = self.input_dim, self.output_dim
rep_dim, episode_length = self.rep_dim, self.episode_length
episode_width, memory_size = self.episode_width, self.memory_size
batch_size = self.batch_size
train_size = len(train_data)
valid_size = len(valid_data)
logging.info('train_size (number of labels) %d', train_size)
logging.info('valid_size (number of labels) %d', valid_size)
logging.info('input_dim %d', input_dim)
logging.info('output_dim %d', output_dim)
logging.info('rep_dim %d', rep_dim)
logging.info('episode_length %d', episode_length)
logging.info('episode_width %d', episode_width)
logging.info('memory_size %d', memory_size)
logging.info('batch_size %d', batch_size)
assert all(len(v) >= float(episode_length) / episode_width
for v in train_data.itervalues())
assert all(len(v) >= float(episode_length) / episode_width
for v in valid_data.itervalues())
output_dim = episode_width
self.model = self.get_model()
self.model.setup()
sess = tf.Session()
sess.run(tf.initialize_all_variables())
saver = tf.train.Saver(max_to_keep=10)
ckpt = None
if FLAGS.save_dir:
ckpt = tf.train.get_checkpoint_state(FLAGS.save_dir)
if ckpt and ckpt.model_checkpoint_path:
logging.info('restoring from %s', ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
logging.info('starting now')
losses = []
random.seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
for i in xrange(FLAGS.num_episodes):
x, y = self.sample_episode_batch(
train_data, episode_length, episode_width, batch_size)
outputs = self.model.episode_step(sess, x, y, clear_memory=True)
loss = outputs
losses.append(loss)
if i % FLAGS.validation_frequency == 0:
logging.info('episode batch %d, avg train loss %f',
i, np.mean(losses))
losses = []
# validation
correct = []
correct_by_shot = dict((k, []) for k in xrange(self.episode_width + 1))
for _ in xrange(FLAGS.validation_length):
x, y = self.sample_episode_batch(
valid_data, episode_length, episode_width, 1)
outputs = self.model.episode_predict(
sess, x, y, clear_memory=True)
y_preds = outputs
correct.append(self.compute_correct(np.array(y), y_preds))
# compute per-shot accuracies
seen_counts = [[0] * episode_width for _ in xrange(batch_size)]
# loop over episode steps
for yy, yy_preds in zip(y, y_preds):
# loop over batch examples
for k, (yyy, yyy_preds) in enumerate(zip(yy, yy_preds)):
yyy, yyy_preds = int(yyy), int(yyy_preds)
count = seen_counts[k][yyy % self.episode_width]
if count in correct_by_shot:
correct_by_shot[count].append(
self.individual_compute_correct(yyy, yyy_preds))
seen_counts[k][yyy % self.episode_width] = count + 1
logging.info('validation overall accuracy %f', np.mean(correct))
logging.info('%d-shot: %.3f, ' * (self.episode_width + 1),
*sum([[k, np.mean(correct_by_shot[k])]
for k in xrange(self.episode_width + 1)], []))
if saver and FLAGS.save_dir:
saved_file = saver.save(sess,
os.path.join(FLAGS.save_dir, 'model.ckpt'),
global_step=self.model.global_step)
logging.info('saved model to %s', saved_file)
def main(unused_argv):
train_data, valid_data = data_utils.get_data()
trainer = Trainer(train_data, valid_data, data_utils.IMAGE_NEW_SIZE ** 2)
trainer.run()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
tf.app.run()
|
py | 1a4afeb9829d7316e288f1b8f76bf4cbada8b7d9 | """
Copyright (c) 2015 SONATA-NFV, 2017 5GTANGO
ALL RIGHTS RESERVED.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Neither the name of the SONATA-NFV, 5GTANGO
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
This work has been performed in the framework of the SONATA project,
funded by the European Commission under Grant number 671517 through
the Horizon 2020 and 5G-PPP programmes. The authors would like to
acknowledge the contributions of their colleagues of the SONATA
partner consortium (www.sonata-nfv.eu).
This work has been performed in the framework of the 5GTANGO project,
funded by the European Commission under Grant number 761493 through
the Horizon 2020 and 5G-PPP programmes. The authors would like to
acknowledge the contributions of their colleagues of the 5GTANGO
partner consortium (www.5gtango.eu).
"""
import logging
import yaml
import time
from smbase.smbase import smbase
try:
from ds import ssh
except:
import ssh
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger("fsm-ds")
LOG.setLevel(logging.DEBUG)
logging.getLogger("son-mano-base:messaging").setLevel(logging.INFO)
class dsFSM(smbase):
def __init__(self, connect_to_broker=True):
"""
:param specific_manager_type: specifies the type of specific manager
that could be either fsm or ssm.
:param service_name: the name of the service that this specific manager
belongs to.
:param function_name: the name of the function that this specific
manager belongs to, will be null in SSM case
:param specific_manager_name: the actual name of specific manager
(e.g., scaling, placement)
:param id_number: the specific manager id number which is used to
distinguish between multiple SSM/FSM that are created for the same
objective (e.g., scaling with algorithm 1 and 2)
:param version: version
:param description: description
"""
self.sm_id = "sonfsmcommunication-pilotds-vnfcss1"
self.sm_version = "0.1"
super(self.__class__, self).__init__(sm_id=self.sm_id,
sm_version=self.sm_version,
connect_to_broker=connect_to_broker)
def on_registration_ok(self):
# The fsm registration was successful
LOG.debug("Received registration ok event.")
# send the status to the SMR
status = 'Subscribed, waiting for alert message'
message = {'name': self.sm_id,
'status': status}
self.manoconn.publish(topic='specific.manager.registry.ssm.status',
message=yaml.dump(message))
# Subscribing to the topics that the fsm needs to listen on
topic = "generic.fsm." + str(self.sfuuid)
self.manoconn.subscribe(self.message_received, topic)
LOG.info("Subscribed to " + topic + " topic.")
def message_received(self, ch, method, props, payload):
"""
This method handles received messages
"""
# Decode the content of the message
request = yaml.load(payload)
# Don't trigger on non-request messages
if "fsm_type" not in request.keys():
LOG.info("Received a non-request message, ignoring...")
return
# Create the response
response = None
# the 'fsm_type' field in the content indicates for which type of
# fsm this message is intended.
if str(request["fsm_type"]) == "start":
LOG.info("Start event received: " + str(request["content"]))
response = self.start_event(request["content"])
if str(request["fsm_type"]) == "stop":
LOG.info("Stop event received: " + str(request["content"]))
response = self.stop_event(request["content"])
if str(request["fsm_type"]) == "configure":
LOG.info("Config event received: " + str(request["content"]))
response = self.configure_event(request["content"])
# If a response message was generated, send it back to the FLM
LOG.info("Response to request generated:" + str(response))
topic = "generic.fsm." + str(self.sfuuid)
corr_id = props.correlation_id
self.manoconn.notify(topic,
yaml.dump(response),
correlation_id=corr_id)
return
def start_event(self, content):
"""
This method handles a start event.
"""
# Dummy content
response = {'status': 'completed'}
return response
def stop_event(self, content):
"""
This method handles a stop event.
"""
# Dummy content
response = {'status': 'completed'}
return response
def configure_event(self, content):
"""
This method handles a configure event. The configure event changes the configuration
of the Dispatcher.
"""
# Extract VNF-DS management IP and VNF-BS internal IP
ds_ip = ''
bs_ip = ''
for vnfr in content['vnfrs']:
if vnfr['virtual_deployment_units'][0]['vdu_reference'][:2] == 'bs':
for cp in vnfr['virtual_deployment_units'][0]['vnfc_instance'][0]['connection_points']:
if cp['id'] == 'internal':
bs_ip = cp['interface']['address']
break
if vnfr['virtual_deployment_units'][0]['vdu_reference'][:2] == 'ds':
for cp in vnfr['virtual_deployment_units'][0]['vnfc_instance'][0]['connection_points']:
if cp['id'] == 'mgmt':
ds_ip = cp['interface']['address']
break
LOG.info('ds ip: ' + ds_ip)
LOG.info('bs ip: ' + bs_ip)
# Initiate SSH connection with the VM
ssh_client = ssh.Client(ds_ip, username='ubuntu', logger=LOG,
key_filename='/root/ds/sandbox.pem', retries=40)
# Enable user ubuntu in tmp folder
ssh_client.sendCommand("sudo chown -R ubuntu:ubuntu /tmp/")
# Change qss config
ssh_client.sendCommand("sudo sed -r -i '/mongodbUrl: .*$/c\ mongodbUrl: \"mongodb:\/\/" +
bs_ip + "/dispatcher\",' /opt/sippo/janus-dispatcher/janus-dispatcher-current/quobis-dispatcher-config.js")
# Restart the services
ssh_client.sendCommand(
"pm2 restart /opt/sippo/janus-dispatcher/janus-dispatcher-current/process.json")
if ssh_client.connected:
response = {'status': 'COMPLETED', 'error': 'None'}
else:
response = {'status': 'FAILED', 'error': 'FSM SSH connection failed'}
return response
def main():
dsFSM()
if __name__ == '__main__':
main()
|
py | 1a4aff2b660bee00b83b70e4e90cfe157244ff31 | """Synchronous msgpack-rpc session layer."""
import logging
from collections import deque
from traceback import format_exc
import greenlet
logger = logging.getLogger(__name__)
error, debug, info, warn = (logger.error, logger.debug, logger.info,
logger.warning,)
class Session(object):
"""Msgpack-rpc session layer that uses coroutines for a synchronous API.
This class provides the public msgpack-rpc API required by this library.
It uses the greenlet module to handle requests and notifications coming
from Nvim with a synchronous API.
"""
def __init__(self, async_session):
"""Wrap `async_session` on a synchronous msgpack-rpc interface."""
self._async_session = async_session
self._request_cb = self._notification_cb = None
self._pending_messages = deque()
self._is_running = False
self._setup_exception = None
self.loop = async_session.loop
def threadsafe_call(self, fn, *args, **kwargs):
"""Wrapper around `AsyncSession.threadsafe_call`."""
def handler():
try:
fn(*args, **kwargs)
except Exception:
warn("error caught while excecuting async callback\n%s\n",
format_exc())
def greenlet_wrapper():
gr = greenlet.greenlet(handler)
gr.switch()
self._async_session.threadsafe_call(greenlet_wrapper)
def next_message(self):
"""Block until a message(request or notification) is available.
If any messages were previously enqueued, return the first in queue.
If not, run the event loop until one is received.
"""
if self._is_running:
raise Exception('Event loop already running')
if self._pending_messages:
return self._pending_messages.popleft()
self._async_session.run(self._enqueue_request_and_stop,
self._enqueue_notification_and_stop)
if self._pending_messages:
return self._pending_messages.popleft()
def request(self, method, *args, **kwargs):
"""Send a msgpack-rpc request and block until as response is received.
If the event loop is running, this method must have been called by a
request or notification handler running on a greenlet. In that case,
send the quest and yield to the parent greenlet until a response is
available.
When the event loop is not running, it will perform a blocking request
like this:
- Send the request
- Run the loop until the response is available
- Put requests/notifications received while waiting into a queue
If the `async` flag is present and True, a asynchronous notification is
sent instead. This will never block, and the return value or error is
ignored.
"""
async = kwargs.pop('async', False)
if async:
self._async_session.notify(method, args)
return
if kwargs:
raise ValueError("request got unsupported keyword argument(s): {}"
.format(', '.join(kwargs.keys())))
if self._is_running:
v = self._yielding_request(method, args)
else:
v = self._blocking_request(method, args)
if not v:
# EOF
raise IOError('EOF')
err, rv = v
if err:
info("'Received error: %s", err)
raise self.error_wrapper(err)
return rv
def run(self, request_cb, notification_cb, setup_cb=None):
"""Run the event loop to receive requests and notifications from Nvim.
Like `AsyncSession.run()`, but `request_cb` and `notification_cb` are
inside greenlets.
"""
self._request_cb = request_cb
self._notification_cb = notification_cb
self._is_running = True
self._setup_exception = None
def on_setup():
try:
setup_cb()
except Exception as e:
self._setup_exception = e
self.stop()
if setup_cb:
# Create a new greenlet to handle the setup function
gr = greenlet.greenlet(on_setup)
gr.switch()
if self._setup_exception:
error('Setup error: {}'.format(self._setup_exception))
raise self._setup_exception
# Process all pending requests and notifications
while self._pending_messages:
msg = self._pending_messages.popleft()
getattr(self, '_on_{}'.format(msg[0]))(*msg[1:])
self._async_session.run(self._on_request, self._on_notification)
self._is_running = False
self._request_cb = None
self._notification_cb = None
if self._setup_exception:
raise self._setup_exception
def stop(self):
"""Stop the event loop."""
self._async_session.stop()
def close(self):
"""Close the event loop."""
self._async_session.close()
def _yielding_request(self, method, args):
gr = greenlet.getcurrent()
parent = gr.parent
def response_cb(err, rv):
debug('response is available for greenlet %s, switching back', gr)
gr.switch(err, rv)
self._async_session.request(method, args, response_cb)
debug('yielding from greenlet %s to wait for response', gr)
return parent.switch()
def _blocking_request(self, method, args):
result = []
def response_cb(err, rv):
result.extend([err, rv])
self.stop()
self._async_session.request(method, args, response_cb)
self._async_session.run(self._enqueue_request,
self._enqueue_notification)
return result
def _enqueue_request_and_stop(self, name, args, response):
self._enqueue_request(name, args, response)
self.stop()
def _enqueue_notification_and_stop(self, name, args):
self._enqueue_notification(name, args)
self.stop()
def _enqueue_request(self, name, args, response):
self._pending_messages.append(('request', name, args, response,))
def _enqueue_notification(self, name, args):
self._pending_messages.append(('notification', name, args,))
def _on_request(self, name, args, response):
def handler():
try:
rv = self._request_cb(name, args)
debug('greenlet %s finished executing, ' +
'sending %s as response', gr, rv)
response.send(rv)
except ErrorResponse as err:
warn("error response from request '%s %s': %s", name,
args, format_exc())
response.send(err.args[0], error=True)
except Exception as err:
warn("error caught while processing request '%s %s': %s", name,
args, format_exc())
response.send(repr(err) + "\n" + format_exc(5), error=True)
debug('greenlet %s is now dying...', gr)
# Create a new greenlet to handle the request
gr = greenlet.greenlet(handler)
debug('received rpc request, greenlet %s will handle it', gr)
gr.switch()
def _on_notification(self, name, args):
def handler():
try:
self._notification_cb(name, args)
debug('greenlet %s finished executing', gr)
except Exception:
warn("error caught while processing notification '%s %s': %s",
name, args, format_exc())
debug('greenlet %s is now dying...', gr)
gr = greenlet.greenlet(handler)
debug('received rpc notification, greenlet %s will handle it', gr)
gr.switch()
class ErrorResponse(BaseException):
"""Raise this in a request handler to respond with a given error message.
Unlike when other exceptions are caught, this gives full control off the
error response sent. When "ErrorResponse(msg)" is caught "msg" will be
sent verbatim as the error response.No traceback will be appended.
"""
pass
|
py | 1a4aff49a098b6bed511d8f8a6e4a2ea2a010385 | # -*- coding: utf-8 -*-
import re
import tempfile
from collections import Counter
from urllib.parse import urlparse
import django
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser, User
from django.test import Client, RequestFactory, TestCase
from django.test.utils import override_settings
from booru.utils import space_splitter
from booru.utils import space_joiner
from booru.utils import compare_strings
class UtilitiesTests(TestCase):
fixtures = []
@classmethod
def setUpClass(cls):
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
def test_space_splitter_generates_tags_from_string(self):
tag_string = "test1 test2 test:test_3 test_4"
generated_tags = space_splitter(tag_string)
expected_generated_tags = ["test1", "test2", "test:test_3", "test_4"]
self.assertEqual(generated_tags, expected_generated_tags)
def test_history_diff(self):
old_string = "test1 test2 test3"
new_string = "test2 test3 test4"
expected = {"added": ["test4"], "removed": ["test1"], "equal": ["test2", "test3"]}
result = compare_strings(old_string, new_string)
self.assertEqual(sorted(result["added"]), sorted(expected["added"]))
self.assertEqual(sorted(result["removed"]), sorted(expected["removed"]))
self.assertEqual(sorted(result["equal"]), sorted(expected["equal"]))
|
py | 1a4affa978732fb158c1a83516e3a31c472084ef | from django.test.testcases import TestCase
from mock import patch
from robber import expect
from data import cache_managers
class CacheManagersTestCase(TestCase):
@patch('data.cache_managers.allegation_cache_manager.cache_data')
@patch('data.cache_managers.officer_cache_manager.cache_data')
@patch('data.cache_managers.salary_cache_manager.cache_data')
@patch('activity_grid.cache_managers.activity_pair_card_cache_manager.cache_data')
def test_cache_all(
self,
salary_cache_mock,
officer_cache_mock,
allegation_cache_mock,
activity_pair_card_cache_mock
):
cache_managers.cache_all()
expect(salary_cache_mock).to.be.called_once()
expect(officer_cache_mock).to.be.called_once()
expect(allegation_cache_mock).to.be.called_once()
expect(activity_pair_card_cache_mock).to.be.called_once()
expect(len(cache_managers.managers)).to.eq(4)
|
py | 1a4affbd4af66c0b2b0a81c21bba7962b0e357c2 | #!/usr/bin/python3
import argparse
import itertools
import os
import pprint
import sys
import yaml
from PIL import Image, ImageDraw
import bs4
THUMB_MARGIN = 10
def get_polys(html):
with open(html) as f:
soup = bs4.BeautifulSoup(f.read(), features="html5lib")
out = {}
for a in soup.find_all("area"):
assert a["shape"] == "poly"
name = a["href"]
coords = a["coords"]
coords = [int(i) for i in coords.split(",")]
coords = list(zip(coords[::2], coords[1::2]))
out[name] = coords
return out
class Patch:
MARGIN = 5
def __init__(self, image, coords):
mask = Image.new("L", image.size, 0)
d = ImageDraw.Draw(mask)
d.polygon(coords, 255)
masked = Image.new("RGBA", image.size, (0,0,0,0))
masked.paste(image, (0,0), mask)
min_x = min(p[0] for p in coords) - self.MARGIN
max_x = max(p[0] for p in coords) + self.MARGIN
min_y = min(p[1] for p in coords) - self.MARGIN
max_y = max(p[1] for p in coords) + self.MARGIN
if min_x < 0: min_x = 0
if min_y < 0: min_y = 0
if max_x > image.size[0]: max_x = image.size[0]
if max_y > image.size[1]: max_y = image.size[1]
self.origin = [min_x, min_y]
self.size = [max_x - min_x, max_y - min_y]
self.image = masked.crop((min_x, min_y, max_x, max_y))
t = []
for x, y in coords:
t.append(str(x))
t.append(str(y))
self.coords_str = ",".join(t)
self.highlight = Image.new("RGBA", self.image.size, (255,255,255,0))
for ox in range(-2, 3):
for oy in range(-2, 3):
if ox in (-2,2) and oy in (-2,2): continue
self.highlight.paste((255,255,255,255), (ox,oy), self.image)
pixels = set()
for j in range(self.size[1]):
for i in range(self.size[0]):
if self.image.getpixel((i,j))[3]:
pixels.add((i,j))
elif self.highlight.getpixel((i,j))[3]:
pixels.add((i,j))
if not pixels:
self.image = None
self.highlight = None
return
min_x = min(p[0] for p in pixels)
max_x = max(p[0] for p in pixels)
min_y = min(p[1] for p in pixels)
max_y = max(p[1] for p in pixels)
w = max_x + 1 - min_x
h = max_y + 1 - min_y
self.image = self.image.crop((min_x, min_y, max_x, max_y))
self.highlight = self.highlight.crop((min_x, min_y, max_x, max_y))
self.origin = [self.origin[0] + min_x, self.origin[1] + min_y]
self.size = [w, h]
def main():
parser = argparse.ArgumentParser(
description="Extract icons from images and a map.")
parser.add_argument("--output_dir",
default=".",
help="Directory for output icons")
parser.add_argument("--max_thumb_height",
type=int, default=260,
help="Max height of thumb images")
parser.add_argument("--background_color",
default="#f8f8f8",
help="Background color for map")
parser.add_argument("--output_yaml", default="land.yaml",
help="File for yaml data output")
parser.add_argument("html",
help="Image map HTML")
parser.add_argument("source_image")
parser.add_argument("--under_image", default=None)
parser.add_argument("--under_html", default=None)
options = parser.parse_args()
assert options.background_color[0] == "#" and len(options.background_color) == 7
options.background_color = tuple(int(options.background_color[i*2+1:i*2+3], 16) for i in range(3))
html_map = get_polys(options.html)
if options.under_html:
under_map = get_polys(options.under_html)
else:
under_map = html_map
source_image = Image.open(options.source_image).convert("RGBA")
if options.under_image:
under_image = Image.open(options.under_image).convert("RGBA")
assert under_image.size == source_image.size
else:
under_image = None
size = source_image.size
icons = {}
for name, coords in html_map.items():
out = {}
icons[name] = out
patch = Patch(source_image, coords)
if patch.image:
od = {}
out["image"] = od
od["pos"] = patch.origin
od["poly"] = patch.coords_str
od["size"] = patch.size
patch.image.save(os.path.join(options.output_dir, f"image_{name}.png"))
if patch.highlight:
od = {}
out["mask"] = od
od["pos"] = patch.origin[:]
od["size"] = patch.size[:]
patch.highlight.save(os.path.join(options.output_dir, f"mask_{name}.png"))
if under_image:
under_coords = under_map.get(name)
if under_coords:
under_patch = Patch(under_image, under_coords)
if under_patch.image:
od = {}
out["under"] = od
od["pos"] = under_patch.origin
od["poly"] = under_patch.coords_str
od["size"] = under_patch.size
under_patch.image.save(os.path.join(options.output_dir, f"under_{name}.png"))
y = { "icons": icons }
with open(os.path.join(options.output_dir, options.output_yaml), "w") as f:
f.write(yaml.dump(y))
if __name__ == "__main__":
main()
|
py | 1a4b0013261062ff07dacc7231993dc6cba27c21 | """
Frequency-split parameters
==========================
Split spectra and plot parameters
"""
import matplotlib.pyplot as plt
from wavespectra import read_ww3
dset = read_ww3("../_static/ww3file.nc")
fcut = 1 / 8
sea = dset.spec.split(fmin=fcut)
swell = dset.spec.split(fmax=fcut)
plt.figure(figsize=(8, 4.5))
p1 = dset.spec.hs().isel(site=0).plot(label="Full spectrum", marker="o")
p2 = sea.spec.hs().isel(site=0).plot(label="Sea", marker="o")
p3 = swell.spec.hs().isel(site=0).plot(label="Swell", marker="o")
l = plt.legend(loc=0, fontsize=8)
plt.title("")
plt.ylabel("$Hs$ (m)")
plt.xlabel("")
|
py | 1a4b03cdebe09cf31d9883a15002a3227868cdc5 | # coded by: salism3
# 23 - 05 - 2020 23:18 (Malam Takbir)
from .checker import check_login
from .output import Output, People, Group
from . import parsing
import re
@check_login
def msgUrl(ses, next = None):
html = ses.session.get("https://mbasic.facebook.com/messages" if not next else next).text
data = parsing.parsing_href(html, "/read/")
next = parsing.parsing_href_regex(html, r"[?]pageNum.*selectable", one = True)
return Output(ses, msgUrl, items = data, next = next, html = html)
@check_login
def myGroup(ses):
html = ses.session.get("https://mbasic.facebook.com/groups/?seemore&refid=27").text
data = parsing.parsing_href_regex(html, r"/groups/\d+\W", bs4_class = True)
data = [(x.text, re.search(r"/(\d+)\W", x["href"]).group(1)) for x in data]
return Output(ses, myGroup, items = data, html = html)
def find_people(ses, name):
html = ses.session.get("https://mbasic.facebook.com/search/people/?q={}&source=filter&isTrending=0".format(name)).text
url = parsing.parsing_href(html, "__xts__", one = True)
try:
html = ses.session.get(url).text
return People(ses, html)
except:
return
def find_group(ses, name):
html = ses.session.get("https://mbasic.facebook.com/search/groups/?q={}&source=filter&isTrending=0".format(name)).text
url = parsing.parsing_href(html, "__xts__", one = True)
try:
# print("in try")
id_ = re.search(r"/(\d+)\Wrefid", url).group(1)
html = ses.session.get("https://mbasic.facebook.com/groups/{}?view=info".format(id_)).text
return Group(ses, html)
except:
return
|
py | 1a4b03edfafb63b72f5a04238f0ee69a6a0948f0 | #unfinished - small tool to automatically notify you of the latest manga releases
# from pynotifier import Notification
# import json
# import api
# from config_path import ConfigPath
# conf_path = ConfigPath('alisw','pymanga','.json')
# path = conf_path.readFolderPath()
# path.mkdir(parents=True,exist_ok=True)
# config_path = path.joinpath('config.json')
#
# if not config_path.exists():
# with open(config_path,'w') as f:
# f.write(json.dumps({
# 'ids': [],
# 'date_limit': 7
# }))
#
# config = {}
# with open(config_path,'r') as f:
# config = json.loads(f.read())
#
# for id in config['ids']:
# series = api.series(id)
# latest = series['latest_releases'][0]
# if int(latest['date'].replace(' days ago','')) < config['date_limit']:
# Notification(
# title='New ' + series['title'] + ' Chapter!',
# description='Chapter ' + latest['chapter'] + ' released.',
# duration=15,
# urgency=Notification.URGENCY_NORMAL
# ).send()
|
py | 1a4b0436baa5ad98d1b7ad3c7383f24714641da4 | #
# Script to generate json of email events for testing from enron email corpus.
# To reproduce, download the mongoDb dump from
# http://mongodb-enron-email.s3-website-us-east-1.amazonaws.com/
# and export the messages collection to a csv
#
import csv
import json
from dateutil.parser import parse
def employees(filename='./employees.txt'):
with open(filename, 'r') as intext:
with open(filename.replace('txt', 'json'), 'w') as outjson:
out = []
for line in intext:
out.append(line.split("\t")[0].strip() + "@enron.com")
json.dump(out, outjson)
def mongoClean(filename='./mongo-enron.csv'):
with open(filename, "r") as csv_file:
csv_iterator = csv.DictReader(csv_file)
out = []
with open('./mongo-enron.json', 'w') as outjson:
for row in csv_iterator:
getSet = lambda x : set(x.strip() for x in row['headers.' + x].split(','))
record = {
"to" : [x for x in (getSet('To') | getSet('Bcc') | getSet('Cc')) if x],
"from" : row['headers.From'],
"time" : int(parse(row['headers.Date']).strftime('%s'))
}
out.append(record)
json.dump(sorted(out, lambda x, y: x['time'] - y['time']), outjson)
if __name__ == '__main__':
#mongoClean()
employees()
|
py | 1a4b04498298b288ad6c951aba60b2532179ca10 | from listen_ins import Client
from simple_ctl import sctl
from voice_ctl import vctl
class Controller:
def __init__(self,sctl:sctl,vctl:vctl):
self.mode = ''
self.sctl = sctl
self.vctl = vctl
def start(self):
thread = Client(1, 'Thread-1',self)
thread.start()
thread.join()
def voice_ctl(self, data):
self.vctl.get_data(data)
def auto_slam(self):
pass
def simple_ctl(self,data):
self.sctl.get_data(data)
def set_mode(self, mode):
self.mode = mode
def process(self, data):
if data == 'voice_ctl':
self.set_mode('voice_ctl')
elif data == 'auto_slam':
self.set_mode('auto_slam')
elif data == 'simple_ctl':
self.set_mode('simple_ctl')
elif data == 'main':
self.set_mode('main')
elif self.mode == 'voice_ctl':
self.voice_ctl(data)
elif self.mode == 'simple_ctl':
self.simple_ctl(data)
if __name__ == '__main__':
pass |
py | 1a4b047282235ec7f14f5f340b9bf983ffec7148 | from django.db import connection
from django.urls import resolve
class QueryCountDebugMiddleware:
"""
This middleware will log the number of queries run
and the total time taken for each request (with a
status code of 200). It does not currently support
multi-db setups.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
current_url = resolve(request.path_info).url_name
response = self.get_response(request)
total_time = 0
for index, query in enumerate(connection.queries, 1):
query_time = query.get('time')
sql_query = query.get('sql')
if query_time is None:
query_time = query.get('duration', 0) / 1000
total_time += float(query_time)
print(f"\n{index}: ({query_time}) {sql_query}")
print(f"{current_url}: {request.get_raw_uri()}")
print(f"{len(connection.queries)} queries run, total {total_time} seconds\n", "-" * 100)
return response
|
py | 1a4b04798454da503864a64b0371ecf2ba967480 | from django import forms
from django.contrib.auth.forms import AuthenticationForm
class CustomAuthenticationForm(AuthenticationForm):
def confirm_login_allowed(self, user):
if not user.is_active or not user.is_validated:
raise forms.ValidationError('There was a problem with your login.', code='invalid_login') |
py | 1a4b049557c9deddf0a8fb2d0b80ab7acfac52b2 | #!/usr/bin/python
import sys
import usb.core
import usb.util
import uinput
import time
from array import array
try:
# hexadecimal vendor and product values
dev = usb.core.find(idVendor=0x084f, idProduct=0xee05)
if dev == None:
print("Could not detect Brigthsign Tochboard")
raise SystemExit
# first endpoint
interface = 0
endpoint = dev[0][(0,0)][0]
# if the OS kernel already claimed the device, which is most likely true
# thanks to http://stackoverflow.com/questions/8218683/pyusb-cannot-set-configuration
if dev.is_kernel_driver_active(interface) is True:
# tell the kernel to detach
dev.detach_kernel_driver(interface)
# claim the device
usb.util.claim_interface(dev, interface)
keys = {
'KEY_UP': array('B', [ 2, 0, 85, 92]),
'KEY_RIGHT': array('B', [ 32, 0, 85, 92]),
'KEY_DOWN': array('B', [ 128, 0, 85, 92]),
'KEY_LEFT': array('B', [ 8, 0, 85, 92]),
'KEY_ENTER': array('B', [ 16, 0, 85, 92]),
'KEY_ESC': array('B', [ 1, 0, 85, 92]),
'KEY_VOLUMEUP': array('B', [ 0, 2, 85, 92]),
'KEY_VOLUMEDOWN': array('B', [ 0, 4, 85, 92]),
'KEY_RELEASE': array('B', [ 0, 0, 85, 92])
}
brightsign_keys = [
uinput.KEY_UP,
uinput.KEY_RIGHT,
uinput.KEY_DOWN,
uinput.KEY_LEFT,
uinput.KEY_ENTER,
uinput.KEY_ESC,
uinput.KEY_VOLUMEUP,
uinput.KEY_VOLUMEDOWN
]
key_pressed = False
last_key = "KEY_ESC"
touchboard = uinput.Device( brightsign_keys )
while True:
try:
data = dev.read(endpoint.bEndpointAddress,endpoint.wMaxPacketSize)
for key, code in keys.items():
if code == data[0:4]:
if 'KEY_RELEASE' != key:
touchboard.emit(eval('uinput.'+key), value=1) # press key
last_key = key
else:
touchboard.emit(eval('uinput.'+last_key), value=0)
except usb.core.USBError as e:
data = None
if e.args == ('Operation timed out',):
continue
finally:
# release the device
usb.util.release_interface(dev, interface)
touchboard.destroy()
# reattach the device to the OS kernel
dev.attach_kernel_driver(interface)
|
py | 1a4b059c046ac37f6ed213df8d895bcc626ca0e5 | # Задача: От A до Z
''' Напишите функцию, которая будет принимать строку — диапазон букв английского алфавита. Функция должна возвращать строку из всех букв этого диапазона. Если в диапазоне заданы заглавные буквы, в результирующей строке тоже должны быть заглавные.
Примечания
Диапазон будет задаваться двумя буквами с дефисом между ними.
Обрабатывать ошибки не нужно (при указании диапазона обе буквы будут в одинаковом регистре и располагаться будут в алфавитном порядке).
Примеры
gimme_the_letters("a-z") ➞ "abcdefghijklmnopqrstuvwxyz"
gimme_the_letters("h-o") ➞ "hijklmno"
gimme_the_letters("Q-Z") ➞ "QRSTUVWXYZ"
gimme_the_letters("J-J") ➞ J"
'''
# Первый Вариант: Успех
def gimme_the_letters1(sp):
return "".join(chr(n) for n in range(ord(sp[0]), ord(sp[-1])+1))
gtl1 = gimme_the_letters1("a-z"), gimme_the_letters1("h-o"), gimme_the_letters1("Q-Z"), gimme_the_letters1("J-J")
print(gtl1)
# Второй Вариант: Успех
def gimme_the_letters2(spectrum1):
a = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
x, y = spectrum1.split('-')
return a[a.index(x):a.index(y) + 1]
gtl2 = gimme_the_letters2("a-z"), gimme_the_letters2("h-o"), gimme_the_letters2("Q-Z"), gimme_the_letters2("J-J")
print(gtl2)
# Трейти Вариант: Успех
def gimme_the_letters3(spectrum2):
start, end = [ord(i) for i in spectrum2.split('-')]
return ''.join(chr(i) for i in range(start, end+1))
gtl3 = gimme_the_letters3("a-z"), gimme_the_letters3("h-o"), gimme_the_letters3("Q-Z"), gimme_the_letters3("J-J")
print(gtl3) |
py | 1a4b069ade3ed45afde910ed91e95756b9cd0188 | """
``fish_http_status`` 包含最通用的一些网络状态码
https://github.com/openstack/swift/blob/master/swift/common/http.py
"""
def is_informational(status):
"""
检查状态码是否信息提示
:param:
* status: http 状态码
:return:
* result: True or False
"""
return 100 <= status <= 199
def is_success(status):
"""
检查状态码是否成功
:param:
* status: http 状态码
:return:
* result: True or False
"""
return 200 <= status <= 299
def is_redirection(status):
"""
检查状态码是否重定向
:param:
* status: http 状态码
:return:
* result: True or False
"""
return 300 <= status <= 399
def is_client_error(status):
"""
检查状态码是否客户端错误
:param:
* status: http 状态码
:return:
* result: True or False
"""
return 400 <= status <= 499
def is_server_error(status):
"""
检查状态码是否服务端错误
:param:
* status: http 状态码
:return:
* result: True or False
"""
return 500 <= status <= 599
# List of HTTP status codes
###############################################################################
# 1xx Informational
###############################################################################
HTTP_CONTINUE = 100
HTTP_SWITCHING_PROTOCOLS = 101
HTTP_PROCESSING = 102 # WebDAV
HTTP_CHECKPOINT = 103
HTTP_REQUEST_URI_TOO_LONG = 122
###############################################################################
# 2xx Success
###############################################################################
HTTP_OK = 200
HTTP_CREATED = 201
HTTP_ACCEPTED = 202
HTTP_NON_AUTHORITATIVE_INFORMATION = 203
HTTP_NO_CONTENT = 204
HTTP_RESET_CONTENT = 205
HTTP_PARTIAL_CONTENT = 206
HTTP_MULTI_STATUS = 207 # WebDAV
HTTP_IM_USED = 226
###############################################################################
# 3xx Redirection
###############################################################################
HTTP_MULTIPLE_CHOICES = 300
HTTP_MOVED_PERMANENTLY = 301
HTTP_FOUND = 302
HTTP_SEE_OTHER = 303
HTTP_NOT_MODIFIED = 304
HTTP_USE_PROXY = 305
HTTP_SWITCH_PROXY = 306
HTTP_TEMPORARY_REDIRECT = 307
HTTP_RESUME_INCOMPLETE = 308
###############################################################################
# 4xx Client Error
###############################################################################
HTTP_BAD_REQUEST = 400
HTTP_UNAUTHORIZED = 401
HTTP_PAYMENT_REQUIRED = 402
HTTP_FORBIDDEN = 403
HTTP_NOT_FOUND = 404
HTTP_METHOD_NOT_ALLOWED = 405
HTTP_NOT_ACCEPTABLE = 406
HTTP_PROXY_AUTHENTICATION_REQUIRED = 407
HTTP_REQUEST_TIMEOUT = 408
HTTP_CONFLICT = 409
HTTP_GONE = 410
HTTP_LENGTH_REQUIRED = 411
HTTP_PRECONDITION_FAILED = 412
HTTP_REQUEST_ENTITY_TOO_LARGE = 413
HTTP_REQUEST_URI_TOO_LONG = 414
HTTP_UNSUPPORTED_MEDIA_TYPE = 415
HTTP_REQUESTED_RANGE_NOT_SATISFIABLE = 416
HTTP_EXPECTATION_FAILED = 417
HTTP_IM_A_TEAPOT = 418
HTTP_UNPROCESSABLE_ENTITY = 422 # WebDAV
HTTP_LOCKED = 423 # WebDAV
HTTP_FAILED_DEPENDENCY = 424 # WebDAV
HTTP_UNORDERED_COLLECTION = 425
HTTP_UPGRADE_REQUIED = 426
HTTP_PRECONDITION_REQUIRED = 428
HTTP_TOO_MANY_REQUESTS = 429
HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE = 431
HTTP_NO_RESPONSE = 444
HTTP_RETRY_WITH = 449
HTTP_BLOCKED_BY_WINDOWS_PARENTAL_CONTROLS = 450
HTTP_CLIENT_CLOSED_REQUEST = 499
###############################################################################
# 5xx Server Error
###############################################################################
HTTP_INTERNAL_SERVER_ERROR = 500
HTTP_NOT_IMPLEMENTED = 501
HTTP_BAD_GATEWAY = 502
HTTP_SERVICE_UNAVAILABLE = 503
HTTP_GATEWAY_TIMEOUT = 504
HTTP_VERSION_NOT_SUPPORTED = 505
HTTP_VARIANT_ALSO_NEGOTIATES = 506
HTTP_INSUFFICIENT_STORAGE = 507 # WebDAV
HTTP_BANDWIDTH_LIMIT_EXCEEDED = 509
HTTP_NOT_EXTENDED = 510
HTTP_NETWORK_AUTHENTICATION_REQUIRED = 511
HTTP_NETWORK_READ_TIMEOUT_ERROR = 598 # not used in RFC
HTTP_NETWORK_CONNECT_TIMEOUT_ERROR = 599 # not used in RFC
|
py | 1a4b0765cb88fb2519ad3ca957091fcf15d87626 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
import sys
import argparse
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common import dtype as mstype
from mindspore.nn import TrainOneStepCell, WithLossCell
from mindspore.nn.optim import Adam
from mindspore.ops import operations as P
from mindspore.common.initializer import TruncatedNormal
from mindspore.parallel._ps_context import _is_role_pserver, _is_role_worker
parser = argparse.ArgumentParser(description="test_sparse_embedding")
parser.add_argument("--device_target", type=str, default="Ascend")
args, _ = parser.parse_known_args()
device_target = args.device_target
context.set_context(
mode=context.GRAPH_MODE, device_target=device_target, enable_sparse=True
)
context.set_ps_context(enable_ps=True)
def fc_with_initialize(input_channels, out_channels):
"""weight initial for fc layer"""
weight = weight_variable()
bias = weight_variable()
return nn.Dense(input_channels, out_channels, weight, bias)
def weight_variable():
"""weight initial"""
return TruncatedNormal(0.02)
class LeNet5(nn.Cell):
def __init__(self, num_class=10):
super(LeNet5, self).__init__()
self.cast = P.Cast()
self.flatten = nn.Flatten()
self.embedding = nn.EmbeddingLookup(16, 4)
self.relu = nn.ReLU()
self.fc = fc_with_initialize(12, num_class)
def construct(self, x):
x = self.cast(x, mstype.int32)
x = self.embedding(x)
x = self.flatten(x)
x = self.fc(x)
return x
def do_sparse_embedding(ps=False):
epoch = 10
net = LeNet5(10)
if ps:
net.embedding.embedding_table.set_param_ps()
optimizer = Adam(filter(lambda x: x.requires_grad, net.get_parameters()))
optimizer.sparse_opt.add_prim_attr("primitive_target", "CPU")
criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
net_with_criterion = WithLossCell(net, criterion)
train_network = TrainOneStepCell(net_with_criterion, optimizer)
train_network.set_train()
losses = []
for _ in range(epoch):
data = Tensor(np.random.randint(0, 15, (32, 3), np.int32))
label = Tensor(np.random.randint(0, 9, (32), np.int32))
if _is_role_pserver():
train_network(data, label)
sys.exit()
else:
loss = train_network(data, label).asnumpy()
losses.append(loss)
print(losses)
return losses
envs = os.environ
if __name__ == "__main__":
np.random.seed(0)
ps_loss = do_sparse_embedding(True)
if _is_role_worker():
context.reset_ps_context()
np.random.seed(0)
no_ps_loss = do_sparse_embedding()
context.set_ps_context(enable_ps=True)
assert np.allclose(ps_loss, no_ps_loss, rtol=1.0e-6, atol=1.0e-6)
|
py | 1a4b08feff9989b24cc95f9fd22b67faa971d5c5 | """write log to file."""
import logging
import os
ROOT_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def get_logger(filename, logger_name=None, on_screen=False, level=None):
"""Return logger."""
if not logger_name:
logger_name = filename
logger = logging.getLogger(logger_name)
formatter = logging.Formatter(
'[%(asctime)s] %(levelname)s - %(filename)s:%(lineno)d - %(message)s',
'%Y-%m-%d %X')
if level is None or level == "info":
level = logging.INFO
elif level == "debug":
level = logging.DEBUG
elif level == "warning":
level = logging.WARN
elif level == "error":
level = logging.ERROR
elif level == "critical":
level = logging.CRITICAL
stream_handler = logging.StreamHandler()
stream_handler.setLevel(level)
file_handler = logging.FileHandler('%s/logs/%s.log' % (ROOT_PATH, filename))
file_handler.setLevel(level)
file_handler.setFormatter(formatter)
stream_handler.setFormatter(formatter)
logger.addHandler(file_handler)
if on_screen:
logger.addHandler(stream_handler)
logger.setLevel(level)
return logger
|
py | 1a4b097edad557cc98353d4f98e1902b2a56c4ba | #
# This file made available under CC0 1.0 Universal (https://creativecommons.org/publicdomain/zero/1.0/legalcode)
#
# Created with the Rule Development Kit: https://github.com/awslabs/aws-config-rdk
# Can be used stand-alone or with the Rule Compliance Engine: https://github.com/awslabs/aws-config-engine-for-compliance-as-code
#
import sys
import unittest
try:
from unittest.mock import MagicMock, patch, ANY
except ImportError:
import mock
from mock import MagicMock, patch, ANY
import botocore
from botocore.exceptions import ClientError
##############
# Parameters #
##############
# Define the default resource to report to Config Rules
DEFAULT_RESOURCE_TYPE = 'AWS::ApiGateway::RestApi'
#############
# Main Code #
#############
config_client_mock = MagicMock()
sts_client_mock = MagicMock()
apigw_client_mock = MagicMock()
class Boto3Mock():
def client(self, client_name, *args, **kwargs):
if client_name == 'config':
return config_client_mock
elif client_name == 'sts':
return sts_client_mock
elif client_name == 'apigateway':
return apigw_client_mock
else:
raise Exception("Attempting to create an unknown client")
sys.modules['boto3'] = Boto3Mock()
rule = __import__('API_GW_NOT_EDGE_OPTIMISED')
class ParameterTest(unittest.TestCase):
get_rest_apis_private = {
'items': [{'id': 'apiid1', 'endpointConfiguration': {'types': ['PRIVATE']}},
{'id': 'apiid2', 'endpointConfiguration': {'types': ['PRIVATE']}}]
}
invalid_rule_parameters = '{"ExceptionList":"apiid-1"}'
def test_api_invalid_parameter(self):
apigw_client_mock.get_rest_apis = MagicMock(return_value=self.get_rest_apis_private)
response = rule.lambda_handler(build_lambda_scheduled_event(rule_parameters=self.invalid_rule_parameters), {})
assert_customer_error_response(
self, response, 'InvalidParameterValueException', 'Invalid value in the ExceptionList: apiid-1')
class ComplianceTest(unittest.TestCase):
rule_parameters = '{"ExceptionList":"apiid1,apiid2"}'
invoking_event_iam_role_sample = '{"configurationItem":{"relatedEvents":[],"relationships":[],"configuration":{},"tags":{},"configurationItemCaptureTime":"2018-07-02T03:37:52.418Z","awsAccountId":"123456789012","configurationItemStatus":"ResourceDiscovered","resourceType":"AWS::IAM::Role","resourceId":"some-resource-id","resourceName":"some-resource-name","ARN":"some-arn"},"notificationCreationTime":"2018-07-02T23:05:34.445Z","messageType":"ConfigurationItemChangeNotification"}'
get_rest_apis_private = {
'items': [{'id': 'apiid1', 'endpointConfiguration': {'types': ['PRIVATE']}},
{'id': 'apiid2', 'endpointConfiguration': {'types': ['PRIVATE']}}]
}
get_rest_apis_regional = {
'items': [{'id': 'apiid1', 'endpointConfiguration': {'types': ['REGIONAL']}},
{'id': 'apiid2', 'endpointConfiguration': {'types': ['REGIONAL']}}]
}
get_rest_apis_edge = {
'items': [{'id': 'apiid1', 'endpointConfiguration': {'types': ['EDGE']}},
{'id': 'apiid2', 'endpointConfiguration': {'types': ['EDGE']}}]
}
get_rest_apis_mix_compliant_only = {
'items': [{'id': 'apiid1', 'endpointConfiguration': {'types': ['REGIONAL']}},
{'id': 'apiid2', 'endpointConfiguration': {'types': ['PRIVATE']}}]
}
get_rest_apis_mix = {
'items': [{'id': 'apiid1', 'endpointConfiguration': {'types': ['EDGE']}},
{'id': 'apiid2', 'endpointConfiguration': {'types': ['REGIONAL']}},
{'id': 'apiid3', 'endpointConfiguration': {'types': ['PRIVATE']}}]
}
get_rest_apis_multi_type = {
'items': [{'id': 'apiid1', 'endpointConfiguration': {'types': ['EDGE', 'PRIVATE']}},
{'id': 'apiid2', 'endpointConfiguration': {'types': ['REGIONAL']}}]
}
def test_no_gw(self):
apigw_client_mock.get_rest_apis = MagicMock(return_value={"items": []})
response = rule.lambda_handler(build_lambda_scheduled_event(), {})
resp_expected = []
resp_expected.append(build_expected_response('NOT_APPLICABLE', '123456789012', 'AWS::::Account'))
assert_successful_evaluation(self, response, resp_expected)
def test_private_only_COMPLIANT(self):
apigw_client_mock.get_rest_apis = MagicMock(return_value=self.get_rest_apis_private)
response = rule.lambda_handler(build_lambda_scheduled_event(), {})
resp_expected = []
resp_expected.append(build_expected_response('COMPLIANT', 'apiid1'))
resp_expected.append(build_expected_response('COMPLIANT', 'apiid2'))
assert_successful_evaluation(self, response, resp_expected, 2)
def test_regional_only_COMPLIANT(self):
apigw_client_mock.get_rest_apis = MagicMock(return_value=self.get_rest_apis_regional)
response = rule.lambda_handler(build_lambda_scheduled_event(), {})
resp_expected = []
resp_expected.append(build_expected_response('COMPLIANT', 'apiid1'))
resp_expected.append(build_expected_response('COMPLIANT', 'apiid2'))
assert_successful_evaluation(self, response, resp_expected, 2)
def test_edge_only_NON_COMPLIANT(self):
apigw_client_mock.get_rest_apis = MagicMock(return_value=self.get_rest_apis_edge)
response = rule.lambda_handler(build_lambda_scheduled_event(), {})
resp_expected = []
resp_expected.append(build_expected_response('NON_COMPLIANT', 'apiid1', annotation="EDGE OPTIMIZED API Gateway is present."))
resp_expected.append(build_expected_response('NON_COMPLIANT', 'apiid2', annotation="EDGE OPTIMIZED API Gateway is present."))
assert_successful_evaluation(self, response, resp_expected, 2)
def test_mix_COMPLIANT(self):
apigw_client_mock.get_rest_apis = MagicMock(return_value=self.get_rest_apis_mix_compliant_only)
response = rule.lambda_handler(build_lambda_scheduled_event(), {})
resp_expected = []
resp_expected.append(build_expected_response('COMPLIANT', 'apiid1'))
resp_expected.append(build_expected_response('COMPLIANT', 'apiid2'))
assert_successful_evaluation(self, response, resp_expected, 2)
def test_mix(self):
apigw_client_mock.get_rest_apis = MagicMock(return_value=self.get_rest_apis_mix)
response = rule.lambda_handler(build_lambda_scheduled_event(), {})
resp_expected = []
resp_expected.append(build_expected_response('NON_COMPLIANT', 'apiid1', annotation="EDGE OPTIMIZED API Gateway is present."))
resp_expected.append(build_expected_response('COMPLIANT', 'apiid2'))
resp_expected.append(build_expected_response('COMPLIANT', 'apiid3'))
assert_successful_evaluation(self, response, resp_expected, 3)
def test_edge_exception_COMPLIANT(self):
apigw_client_mock.get_rest_apis = MagicMock(return_value=self.get_rest_apis_edge)
response = rule.lambda_handler(build_lambda_scheduled_event(rule_parameters=self.rule_parameters), {})
resp_expected = []
resp_expected.append(build_expected_response('COMPLIANT', 'apiid1', annotation="API is part of exception list."))
resp_expected.append(build_expected_response('COMPLIANT', 'apiid2', annotation="API is part of exception list."))
assert_successful_evaluation(self, response, resp_expected, 2)
def test_mix_with_exceptions(self):
apigw_client_mock.get_rest_apis = MagicMock(return_value=self.get_rest_apis_mix)
response = rule.lambda_handler(build_lambda_scheduled_event(rule_parameters=self.rule_parameters), {})
resp_expected = []
resp_expected.append(build_expected_response('COMPLIANT', 'apiid1', annotation="API is part of exception list."))
resp_expected.append(build_expected_response('COMPLIANT', 'apiid2', annotation="API is part of exception list."))
resp_expected.append(build_expected_response('COMPLIANT', 'apiid3'))
assert_successful_evaluation(self, response, resp_expected, 3)
def test_multi_type(self):
apigw_client_mock.get_rest_apis = MagicMock(return_value=self.get_rest_apis_multi_type)
response = rule.lambda_handler(build_lambda_scheduled_event(), {})
resp_expected = []
resp_expected.append(build_expected_response('NON_COMPLIANT', 'apiid1', annotation="EDGE OPTIMIZED API Gateway is present."))
resp_expected.append(build_expected_response('COMPLIANT', 'apiid2'))
assert_successful_evaluation(self, response, resp_expected, 2)
####################
# Helper Functions #
####################
def build_lambda_configurationchange_event(invoking_event, rule_parameters=None):
event_to_return = {
'configRuleName':'myrule',
'executionRoleArn':'roleArn',
'eventLeftScope': False,
'invokingEvent': invoking_event,
'accountId': '123456789012',
'configRuleArn': 'arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan',
'resultToken':'token'
}
if rule_parameters:
event_to_return['ruleParameters'] = rule_parameters
return event_to_return
def build_lambda_scheduled_event(rule_parameters=None):
invoking_event = '{"messageType":"ScheduledNotification","notificationCreationTime":"2017-12-23T22:11:18.158Z"}'
event_to_return = {
'configRuleName':'myrule',
'executionRoleArn':'roleArn',
'eventLeftScope': False,
'invokingEvent': invoking_event,
'accountId': '123456789012',
'configRuleArn': 'arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan',
'resultToken':'token'
}
if rule_parameters:
event_to_return['ruleParameters'] = rule_parameters
return event_to_return
def build_expected_response(compliance_type, compliance_resource_id, compliance_resource_type=DEFAULT_RESOURCE_TYPE, annotation=None):
if not annotation:
return {
'ComplianceType': compliance_type,
'ComplianceResourceId': compliance_resource_id,
'ComplianceResourceType': compliance_resource_type
}
return {
'ComplianceType': compliance_type,
'ComplianceResourceId': compliance_resource_id,
'ComplianceResourceType': compliance_resource_type,
'Annotation': annotation
}
def assert_successful_evaluation(testClass, response, resp_expected, evaluations_count=1):
if isinstance(response, dict):
testClass.assertEquals(resp_expected['ComplianceType'], response['ComplianceType'])
testClass.assertEquals(resp_expected['ComplianceResourceType'], response['ComplianceResourceType'])
testClass.assertEquals(resp_expected['ComplianceResourceId'], response['ComplianceResourceId'])
testClass.assertTrue(response['OrderingTimestamp'])
if 'Annotation' in resp_expected or 'Annotation' in response:
testClass.assertEquals(resp_expected['Annotation'], response['Annotation'])
elif isinstance(response, list):
testClass.assertEquals(evaluations_count, len(response))
for i, response_expected in enumerate(resp_expected):
testClass.assertEquals(response_expected['ComplianceType'], response[i]['ComplianceType'])
testClass.assertEquals(response_expected['ComplianceResourceType'], response[i]['ComplianceResourceType'])
testClass.assertEquals(response_expected['ComplianceResourceId'], response[i]['ComplianceResourceId'])
testClass.assertTrue(response[i]['OrderingTimestamp'])
if 'Annotation' in response_expected or 'Annotation' in response[i]:
testClass.assertEquals(response_expected['Annotation'], response[i]['Annotation'])
def assert_customer_error_response(testClass, response, customerErrorCode=None, customerErrorMessage=None):
if customerErrorCode:
testClass.assertEqual(customerErrorCode, response['customerErrorCode'])
if customerErrorMessage:
testClass.assertEqual(customerErrorMessage, response['customerErrorMessage'])
testClass.assertTrue(response['customerErrorCode'])
testClass.assertTrue(response['customerErrorMessage'])
if "internalErrorMessage" in response:
testClass.assertTrue(response['internalErrorMessage'])
if "internalErrorDetails" in response:
testClass.assertTrue(response['internalErrorDetails'])
def sts_mock():
assume_role_response = {
"Credentials": {
"AccessKeyId": "string",
"SecretAccessKey": "string",
"SessionToken": "string"}}
sts_client_mock.reset_mock(return_value=True)
sts_client_mock.assume_role = MagicMock(return_value=assume_role_response)
##################
# Common Testing #
##################
class TestStsErrors(unittest.TestCase):
def test_sts_unknown_error(self):
rule.ASSUME_ROLE_MODE = True
sts_client_mock.assume_role = MagicMock(side_effect=botocore.exceptions.ClientError(
{'Error': {'Code': 'unknown-code', 'Message': 'unknown-message'}}, 'operation'))
response = rule.lambda_handler(build_lambda_configurationchange_event('{}'), {})
assert_customer_error_response(
self, response, 'InternalError', 'InternalError')
def test_sts_access_denied(self):
rule.ASSUME_ROLE_MODE = True
sts_client_mock.assume_role = MagicMock(side_effect=botocore.exceptions.ClientError(
{'Error': {'Code': 'AccessDenied', 'Message': 'access-denied'}}, 'operation'))
response = rule.lambda_handler(build_lambda_configurationchange_event('{}'), {})
assert_customer_error_response(
self, response, 'AccessDenied', 'AWS Config does not have permission to assume the IAM role.')
|
py | 1a4b0a7eadc86d58a88bfd094baae6bf98463be0 | from django.db import models
from .Activite import Activite
from .Detail_Competence import Detail_Competence
class Question(models.Model):
num_question = models.CharField(max_length = 8)
point = models.IntegerField(default = 0)
activite = models.ForeignKey(Activite, on_delete = models.CASCADE)
detail_competence = models.ForeignKey(Detail_Competence, on_delete = models.CASCADE)
def __str__(self):
libelle = self.num_question + " " + self.detail_competence.code
return libelle
|
py | 1a4b0bb1abc80571e06c7e580d655c519d1dc6a4 | from setuptools import setup
from setuptools import find_packages
setup(
name='tgen',
version='0.3.0',
description='Sequence-to-sequence natural language generator',
author='Ondrej Dusek',
author_email='[email protected]',
url='https://github.com/UFAL-DSG/tgen',
download_url='https://github.com/UFAL-DSG/tgen.git',
license='Apache 2.0',
install_requires=['regex',
'unicodecsv',
'enum34',
'numpy',
'rpyc',
'pudb',
'recordclass',
'tensorflow==1.13.1',
'kenlm',
'pytreex==0.1dev'],
dependency_links=['https://github.com/kpu/kenlm/archive/master.zip#egg=kenlm',
'https://github.com/ufal/pytreex/tarball/master#egg=pytreex-0.1dev'],
packages=find_packages()
)
|
py | 1a4b0c5b622365529ba8aace0deedec817dcf059 | # -*- coding: utf-8 -*-
# (c) 2009-2018 Martin Wendt and contributors; see WsgiDAV https://github.com/mar10/wsgidav
# Original PyFileServer (c) 2005 Ho Chun Wei.
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
"""
WSGI middleware used for debugging (optional).
This module dumps request and response information to the console, depending
on current debug configuration.
On init:
Define HTTP methods and litmus tests, that should turn on the verbose mode
(currently hard coded).
For every request:
Increase value of ``environ['verbose']``, if the request should be debugged.
Also dump request and response headers and body.
Then pass the request to the next middleware.
These configuration settings are evaluated:
*verbose*
This is also used by other modules. This filter adds additional information
depending on the value.
======= ===================================================================
verbose Effect
======= ===================================================================
<= 3 No additional output (only standard request logging).
4 Dump headers of all requests and responses.
5 Dump headers and bodies of all requests and responses.
======= ===================================================================
*debug_methods*
Boost verbosity to 3 while processing certain request methods. This option
is ignored, when ``verbose < 2``.
Configured like::
debug_methods = ["PROPPATCH", "PROPFIND", "GET", "HEAD", "DELET E",
"PUT", "COPY", "MOVE", "LOCK", "UNLOCK",
]
*debug_litmus*
Boost verbosity to 3 while processing litmus tests that contain certain
substrings. This option is ignored, when ``verbose < 2``.
Configured like::
debug_litmus = ["notowner_modify", "props: 16", ]
"""
import sys
import threading
from wsgidav import compat, util
from wsgidav.middleware import BaseMiddleware
from wsgidav.util import safe_re_encode
__docformat__ = "reStructuredText"
_logger = util.get_module_logger(__name__)
class WsgiDavDebugFilter(BaseMiddleware):
def __init__(self, wsgidav_app, next_app, config):
super(WsgiDavDebugFilter, self).__init__(wsgidav_app, next_app, config)
self._config = config
# self.out = sys.stdout
self.passedLitmus = {}
# These methods boost verbose=2 to verbose=3
self.debug_methods = config.get("debug_methods", [])
# Litmus tests containing these string boost verbose=2 to verbose=3
self.debug_litmus = config.get("debug_litmus", [])
# Exit server, as soon as this litmus test has finished
self.break_after_litmus = [
# "locks: 15",
]
def __call__(self, environ, start_response):
""""""
# srvcfg = environ["wsgidav.config"]
verbose = self._config.get("verbose", 3)
method = environ["REQUEST_METHOD"]
debugBreak = False
dumpRequest = False
dumpResponse = False
if verbose >= 5:
dumpRequest = dumpResponse = True
# Process URL commands
if "dump_storage" in environ.get("QUERY_STRING", ""):
dav = environ.get("wsgidav.provider")
if dav.lockManager:
dav.lockManager._dump()
if dav.propManager:
dav.propManager._dump()
# Turn on max. debugging for selected litmus tests
litmusTag = environ.get("HTTP_X_LITMUS", environ.get("HTTP_X_LITMUS_SECOND"))
if litmusTag and verbose >= 3:
_logger.info("----\nRunning litmus test '{}'...".format(litmusTag))
for litmusSubstring in self.debug_litmus:
if litmusSubstring in litmusTag:
verbose = 5
debugBreak = True
dumpRequest = True
dumpResponse = True
break
for litmusSubstring in self.break_after_litmus:
if (
litmusSubstring in self.passedLitmus
and litmusSubstring not in litmusTag
):
_logger.info(" *** break after litmus {}".format(litmusTag))
sys.exit(-1)
if litmusSubstring in litmusTag:
self.passedLitmus[litmusSubstring] = True
# Turn on max. debugging for selected request methods
if verbose >= 3 and method in self.debug_methods:
verbose = 5
debugBreak = True
dumpRequest = True
dumpResponse = True
# Set debug options to environment
environ["wsgidav.verbose"] = verbose
# environ["wsgidav.debug_methods"] = self.debug_methods
environ["wsgidav.debug_break"] = debugBreak
environ["wsgidav.dump_request_body"] = dumpRequest
environ["wsgidav.dump_response_body"] = dumpResponse
# Dump request headers
if dumpRequest:
_logger.info("{} Request ---".format(method))
# _logger.info("<{}> --- {} Request ---".format(
# threading.currentThread().ident, method))
for k, v in environ.items():
if k == k.upper():
_logger.info("{:<20}: '{}'".format(k, safe_re_encode(v, "utf8")))
_logger.info("\n")
# Intercept start_response
#
sub_app_start_response = util.SubAppStartResponse()
nbytes = 0
first_yield = True
app_iter = self.next_app(environ, sub_app_start_response)
for v in app_iter:
# Start response (the first time)
if first_yield:
# Success!
start_response(
sub_app_start_response.status,
sub_app_start_response.response_headers,
sub_app_start_response.exc_info,
)
# Dump response headers
if first_yield and dumpResponse:
_logger.info(
"<{}> ---{} Response({}): ---".format(
threading.currentThread().ident,
method,
sub_app_start_response.status,
)
)
headersdict = dict(sub_app_start_response.response_headers)
for envitem in headersdict.keys():
_logger.info("{}: {}".format(envitem, repr(headersdict[envitem])))
_logger.info("")
# Check, if response is a binary string, otherwise we probably have
# calculated a wrong content-length
assert compat.is_bytes(v), v
# Dump response body
drb = environ.get("wsgidav.dump_response_body")
if compat.is_basestring(drb):
# Middleware provided a formatted body representation
_logger.info(drb)
drb = environ["wsgidav.dump_response_body"] = None
elif drb is True:
# Else dump what we get, (except for long GET responses)
if method == "GET":
if first_yield:
_logger.info("{}...".format(v[:50]))
elif len(v) > 0:
_logger.info(v)
nbytes += len(v)
first_yield = False
yield v
if hasattr(app_iter, "close"):
app_iter.close()
# Start response (if it hasn't been done yet)
if first_yield:
# Success!
start_response(
sub_app_start_response.status,
sub_app_start_response.response_headers,
sub_app_start_response.exc_info,
)
if dumpResponse:
_logger.info(
"<{}> --- End of {} Response ({:d} bytes) ---".format(
threading.currentThread().ident, method, nbytes
)
)
return
|
py | 1a4b0c5fc00e7000b1f9092d0ffd4ff5ec601d4c | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and methods related to model_fn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
from tensorflow.python.estimator.export.export_output import ExportOutput
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training import monitored_session
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import nest
class ModeKeys(object):
"""Standard names for model modes.
The following standard keys are defined:
* `TRAIN`: training mode.
* `EVAL`: evaluation mode.
* `PREDICT`: inference mode.
"""
TRAIN = 'train'
EVAL = 'eval'
PREDICT = 'infer'
LOSS_METRIC_KEY = 'loss'
AVERAGE_LOSS_METRIC_KEY = 'average_loss'
class EstimatorSpec(
collections.namedtuple('EstimatorSpec', [
'predictions', 'loss', 'train_op', 'eval_metric_ops',
'export_outputs', 'training_chief_hooks', 'training_hooks',
'scaffold', 'evaluation_hooks'
])):
"""Ops and objects returned from a `model_fn` and passed to an `Estimator`.
`EstimatorSpec` fully defines the model to be run by an `Estimator`.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metric_ops=None,
export_outputs=None,
training_chief_hooks=None,
training_hooks=None,
scaffold=None,
evaluation_hooks=None):
"""Creates a validated `EstimatorSpec` instance.
Depending on the value of `mode`, different arguments are required. Namely
* For `mode == ModeKeys.TRAIN`: required fields are `loss` and `train_op`.
* For `mode == ModeKeys.EVAL`: required field is `loss`.
* For `mode == ModeKeys.PREDICT`: required fields are `predictions`.
model_fn can populate all arguments independent of mode. In this case, some
arguments will be ignored by an `Estimator`. E.g. `train_op` will be
ignored in eval and infer modes. Example:
```python
def my_model_fn(mode, features, labels):
predictions = ...
loss = ...
train_op = ...
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op)
```
Alternatively, model_fn can just populate the arguments appropriate to the
given mode. Example:
```python
def my_model_fn(mode, features, labels):
if (mode == tf.estimator.ModeKeys.TRAIN or
mode == tf.estimator.ModeKeys.EVAL):
loss = ...
else:
loss = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = ...
else:
train_op = None
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = ...
else:
predictions = None
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op)
```
Args:
mode: A `ModeKeys`. Specifies if this is training, evaluation or
prediction.
predictions: Predictions `Tensor` or dict of `Tensor`.
loss: Training loss `Tensor`. Must be either scalar, or with shape `[1]`.
train_op: Op for the training step.
eval_metric_ops: Dict of metric results keyed by name. The values of the
dict are the results of calling a metric function, namely a
`(metric_tensor, update_op)` tuple.
export_outputs: Describes the output signatures to be exported to
`SavedModel` and used during serving.
A dict `{name: output}` where:
* name: An arbitrary name for this output.
* output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
Single-headed models only need to specify one entry in this dictionary.
Multi-headed models should specify one entry for each head, one of
which must be named using
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY.
training_chief_hooks: Iterable of `tf.train.SessionRunHook` objects to
run on the chief worker during training.
training_hooks: Iterable of `tf.train.SessionRunHook` objects to run
on all workers during training.
scaffold: A `tf.train.Scaffold` object that can be used to set
initialization, saver, and more to be used in training.
evaluation_hooks: Iterable of `tf.train.SessionRunHook` objects to
run during evaluation.
Returns:
A validated `EstimatorSpec` object.
Raises:
ValueError: If validation fails.
TypeError: If any of the arguments is not the expected type.
"""
# Validate train_op.
if train_op is None:
if mode == ModeKeys.TRAIN:
raise ValueError('Missing train_op.')
else:
_check_is_tensor_or_operation(train_op, 'train_op')
# Validate loss.
if loss is None:
if mode in (ModeKeys.TRAIN, ModeKeys.EVAL):
raise ValueError('Missing loss.')
else:
loss = _check_is_tensor(loss, 'loss')
loss_shape = loss.get_shape()
if loss_shape.num_elements() not in (None, 1):
raise ValueError('Loss must be scalar, given: {}'.format(loss))
if not loss_shape.is_compatible_with(tensor_shape.scalar()):
loss = array_ops.reshape(loss, [])
# Validate predictions.
if predictions is None:
if mode == ModeKeys.PREDICT:
raise ValueError('Missing predictions.')
predictions = {}
else:
if isinstance(predictions, dict):
predictions = {
k: _check_is_tensor(v, 'predictions[{}]'.format(k))
for k, v in six.iteritems(predictions)
}
else:
predictions = _check_is_tensor(predictions, 'predictions')
# Validate eval_metric_ops.
if eval_metric_ops is None:
eval_metric_ops = {}
else:
if not isinstance(eval_metric_ops, dict):
raise TypeError(
'eval_metric_ops must be a dict, given: {}'.format(eval_metric_ops))
for key, metric_value_and_update in six.iteritems(eval_metric_ops):
if (not isinstance(metric_value_and_update, tuple) or
len(metric_value_and_update) != 2):
raise TypeError(
'Values of eval_metric_ops must be (metric_value, update_op) '
'tuples, given: {} for key: {}'.format(
metric_value_and_update, key))
metric_value, metric_update = metric_value_and_update
for metric_value_member in nest.flatten(metric_value):
# Allow (possibly nested) tuples for metric values, but require that
# each of them be Tensors or Operations.
_check_is_tensor_or_operation(metric_value_member,
'eval_metric_ops[{}]'.format(key))
_check_is_tensor_or_operation(metric_update,
'eval_metric_ops[{}]'.format(key))
# Validate export_outputs.
if export_outputs is not None:
if not isinstance(export_outputs, dict):
raise TypeError('export_outputs must be dict, given: {}'.format(
export_outputs))
for v in six.itervalues(export_outputs):
if not isinstance(v, ExportOutput):
raise TypeError(
'Values in export_outputs must be ExportOutput objects. '
'Given: {}'.format(export_outputs))
# Note export_outputs is allowed to be empty.
if len(export_outputs) == 1:
(key, value), = export_outputs.items()
if key != signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
export_outputs[
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = value
if len(export_outputs) > 1:
if (signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
not in export_outputs):
raise ValueError(
'Multiple export_outputs were provided, but none of them is '
'specified as the default. Do this by naming one of them with '
'signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY.')
# Validate that all tensors and ops are from the default graph.
default_graph = ops.get_default_graph()
# We enumerate possible error causes here to aid in debugging.
error_message_template = (
'{0} with "{1}" must be from the default graph. '
'Possible causes of this error include: \n\n'
'1) {0} was created outside the context of the default graph.'
'\n\n'
'2) The object passed through to EstimatorSpec was not created '
'in the most recent call to "model_fn".')
if isinstance(predictions, dict):
for key, value in six.iteritems(predictions):
if value.graph is not default_graph:
raise ValueError(error_message_template.format(
'prediction values',
'{0}: {1}'.format(key, value.name)))
elif predictions is not None:
# 'predictions' must be a single Tensor.
if predictions.graph is not default_graph:
raise ValueError(error_message_template.format(
'prediction values', predictions.name))
if loss is not None and loss.graph is not default_graph:
raise ValueError(error_message_template.format('loss', loss.name))
if train_op is not None and train_op.graph is not default_graph:
raise ValueError(error_message_template.format('train_op', train_op.name))
for key, value in list(six.iteritems(eval_metric_ops)):
values = nest.flatten(value)
for value in values:
if value.graph is not default_graph:
raise ValueError(error_message_template.format(
'eval_metric_ops',
'{0}: {1}'.format(key, value.name)))
# Validate hooks.
training_chief_hooks = tuple(training_chief_hooks or [])
training_hooks = tuple(training_hooks or [])
evaluation_hooks = tuple(evaluation_hooks or [])
for hook in training_hooks + training_chief_hooks + evaluation_hooks:
if not isinstance(hook, session_run_hook.SessionRunHook):
raise TypeError(
'All hooks must be SessionRunHook instances, given: {}'.format(
hook))
scaffold = scaffold or monitored_session.Scaffold()
# Validate scaffold.
if not isinstance(scaffold, monitored_session.Scaffold):
raise TypeError(
'scaffold must be tf.train.Scaffold. Given: {}'.format(scaffold))
return super(EstimatorSpec, cls).__new__(
cls,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=export_outputs,
training_chief_hooks=training_chief_hooks,
training_hooks=training_hooks,
scaffold=scaffold,
evaluation_hooks=evaluation_hooks)
def _check_is_tensor_or_operation(x, name):
if not (isinstance(x, ops.Operation) or isinstance(x, ops.Tensor)):
raise TypeError('{} must be Operation or Tensor, given: {}'.format(name, x))
def _check_is_tensor(x, tensor_name):
"""Returns `x` if it is a `Tensor`, raises TypeError otherwise."""
if not isinstance(x, ops.Tensor):
raise TypeError('{} must be Tensor, given: {}'.format(tensor_name, x))
return x
|
py | 1a4b0d349b5d7e70d58ad19783e865747b2a82a9 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
def delete(start,root,sec_node,k):
if(root==None):
return start.next
if(k==0 and sec_node==None):
sec_node=root
return delete(start.next,root,sec_node.next,k)
if(k==0 and sec_node.next==None):
start.next=start.next.next
return
elif(k!=0):
return delete(start,root.next,sec_node,k-1)
return delete(start.next,root,sec_node.next,k)
class Solution:
# @param A : head node of linked list
# @param B : integer
# @return the head node in the linked list
def removeNthFromEnd(self, root, k):
if(root==None or (root.next==None and k==1)):
return None
# ans=delete(root,root,None,k)
# return root if ans==None else ans
sec_node=root
top=root
while(sec_node.next!=None):
if(k!=0):
sec_node=sec_node.next
k-=1
else:
sec_node=sec_node.next
top=top.next
if(k!=0):
return root.next
top.next=top.next.next
return root
"""
Remove Nth Node from List End
Problem Description
Given a linked list A, remove the B-th node from the end of list and return its head.
For example, Given linked list: 1->2->3->4->5, and B = 2. After removing the second node from the end, the linked list becomes 1->2->3->5.
NOTE: If B is greater than the size of the list, remove the first node of the list.
NOTE: Try doing it using constant additional space.
Problem Constraints
1 <= |A| <= 106
Input Format
The first argument of input contains a pointer to the head of the linked list.
The second argument of input contains the integer B.
Output Format
Return the head of the linked list after deleting the B-th element from the end.
Example Input
Input 1:
A = [1, 2, 3, 4, 5]
B = 2
Input 2:
A = [1]
B = 1
Example Output
Output 1:
[1, 2, 3, 5]
Output 2:
[]
Example Explanation
Explanation 1:
In the first example, 4 is the second last element.
Explanation 2:
In the second example, 1 is the first and the last element.
""" |
py | 1a4b0d6d0fa0ed0984530b650aaa64c84a0eb128 | import numpy as np
import random
import milk.supervised.svm
import milk.supervised.multi
from milk.supervised.classifier import ctransforms
from .fast_classifier import fast_classifier
import milksets.wine
features,labels = milksets.wine.load()
A = np.arange(len(features))
random.seed(9876543210)
random.shuffle(A)
features = features[A]
labels = labels[A]
labelset = set(labels)
base = ctransforms(milk.supervised.svm.svm_raw(C=2.,kernel=milk.supervised.svm.rbf_kernel(2.**-3)),milk.supervised.svm.svm_binary())
def test_one_against_rest():
M = milk.supervised.multi.one_against_rest(base)
M = M.train(features[:100,:],labels[:100])
tlabels = [M.apply(f) for f in features[100:]]
for tl in tlabels:
assert tl in labelset
def test_one_against_one():
M = milk.supervised.multi.one_against_one(base)
M = M.train(features[:100,:],labels[:100])
tlabels = [M.apply(f) for f in features[100:]]
for tl in tlabels:
assert tl in labelset
tlabels_many = M.apply_many(features[100:])
assert np.all(tlabels == tlabels_many)
def test_two_thirds():
np.random.seed(2345)
C = milk.supervised.defaultclassifier('fast')
X = np.random.rand(120,4)
X[:40] += np.random.rand(40,4)
X[:40] += np.random.rand(40,4)
X[40:80] -= np.random.rand(40,4)
X[40:80] -= np.random.rand(40,4)
Y = np.repeat(np.arange(3), 40)
model = C.train(X,Y)
Y_ = np.array([model.apply(x) for x in X])
assert (Y_ == Y).mean() * 3 > 2
def test_multi_labels():
clabels = [[lab, lab+7] for lab in labels]
multi_label = milk.supervised.multi.one_against_rest_multi(base)
model = multi_label.train(features[::2], clabels[::2])
test_vals = [model.apply(f) for f in features[1::2]]
for ts in test_vals:
if 0.0 in ts: assert 7.0 in ts
if 1.0 in ts: assert 8.0 in ts
if 2.0 in ts: assert 9.0 in ts
def test_classifier_no_set_options():
# Basically these should not raise an exception
milk.supervised.multi.one_against_rest_multi(fast_classifier())
milk.supervised.multi.one_against_rest(fast_classifier())
milk.supervised.multi.one_against_one(fast_classifier())
def test_tree():
mtree = milk.supervised.multi.multi_tree_learner(fast_classifier())
labels = [0,1,2,2,3,3,3,3]
features = np.random.random_sample((len(labels), 8))
model = mtree.train(features, labels)
counts = np.zeros(4)
for ell in labels:
counts[ell] += 1
g0,g1 = milk.supervised.multi.split(counts)
assert np.all(g0 == [3]) or np.all(g1 == [3])
def list_to_zero(v):
if isinstance(v, list):
return 1000
return v
def r(m):
if len(m) == 1: return int(m[0])
else: return sorted([r(m[1]), r(m[2])], key=list_to_zero)
assert r(model.model) == [3,[2,[0,1]]]
|
py | 1a4b0ddb9174810cb770c375a06ec9f259125ee2 | #!/usr/bin/env python
import math
import os
import sys
from PIL import Image
from escpos.printer import Serial
STRIP_WIDTH = 8
MAX_WIDTH = 540
if len(sys.argv) != 2:
print("\033[1;31;40musage: {} imagefile.png\033[0m".format(sys.argv[0]), file=sys.stderr)
sys.exit(1)
image = Image.open(sys.argv[1])
print("Loaded image: {}".format(sys.argv[1]))
print("Size: {}".format(image.size))
# Resize picture if too wide
(img_w, img_h) = image.size
if img_w > MAX_WIDTH:
img_h = int(MAX_WIDTH * img_h / float(img_w))
img_w = MAX_WIDTH
image = image.resize((img_w, img_h), Image.ANTIALIAS)
print("Too large, resizing to: {}".format((img_w, img_h)))
image = image.convert('L')
num_strips = math.ceil(img_h / STRIP_WIDTH)
print("Total Strips: {}".format(num_strips))
print("Strip size: {}".format((img_w, STRIP_WIDTH)))
strips = [None] * num_strips
for i in range(num_strips):
area = (0, STRIP_WIDTH * i, img_w, STRIP_WIDTH * (i + 1))
strips[i] = image.crop(area)
if img_h % STRIP_WIDTH != 0:
strips[-1] = strips[-1].crop((0, 0, img_w, img_h % STRIP_WIDTH))
# Dump strips into a temporary directory
if not os.path.exists('.temp'):
os.mkdir('.temp')
for i in range(num_strips):
strips[i].save(os.path.join('.temp', "strip{0:03}.png".format(i)))
# Do the printing
p = Serial(devfile='COM5', baudrate=9600, parity='N', stopbits=1, timeout=1.00, dsrdtr=True)
p.text("\033@") # Reset
p.text("\033C\20") # Set sheet eject length
p.text("\0331") # Select 1/8-inch line spacing
p.text("\033$\000\000") # Set left margin
p.text("\033a\001") # Center align
for i in range(num_strips):
p.image(os.path.join('.temp', "strip{0:03}.png".format(i)))
p.text("\033a\000") # Left align
#p.cut()
|
py | 1a4b110af335e04dce1f21a8128b8de9970ac090 | import json
from flask import render_template, url_for, redirect, request, send_from_directory, g, flash
from flask_login import current_user, login_user, logout_user, login_required
from flask_babel import _, get_locale
from flask_babel import lazy_gettext as _l
from wtforms import RadioField, TextAreaField
from wtforms.validators import DataRequired, Length
from app import app, db, moment
from app.models import Class, User, Group, Test, Result, TestResume, LogRequest, LogClick
from app.forms import EmptyForm, LoginForm, RegisterForm, AddGroupForm, UpdateGroupForm, AddTestForm, UpdateTestForm, UpdateProfileForm
from app.spec_checks import check_test_9
from datetime import datetime
# ------------------------ main pages ------------------------ #
@app.route('/')
@app.route('/index')
def index():
groups = Group.query.all()
return render_template( "index.html", title = _("All tests"), menu = _("Test by groups"), groups = groups )
@app.route('/group/<int:id>')
def group(id):
group = Group.query.get(id)
link = url_for( 'index' )
path = f"<a href='{link}'>{_('All tests')}</a> / {group.title}"
return render_template( "group.html", title = f"{group.title}", path = path, menu = "Тесты в группе", group = group )
@app.route('/test/<int:id>')
def test(id):
test = Test.query.get(id)
group = Group.query.get(test.id_group)
link0 = url_for( 'index' )
link1 = url_for( 'group', id = group.id )
path = f"<a href={link0}>{_('All tests')}</a> / <a href={link1}>{group.title}</a> / {test.name}"
return render_template( "test-base.html", title = test.name + " / " + _("Info"), path = path, test = test )
@app.route('/testing/<int:id>', methods = [ 'GET', 'POST' ])
def testing(id):
test = Test.query.get(id)
group = Group.query.get(test.id_group)
link0 = url_for( 'index' )
link1 = url_for( 'group', id = group.id )
path = f"<a href={link0}>{_('All tests')}</a> / <a href={link1}>{group.title}</a> / {test.name}"
class TestingForm(EmptyForm):
pass
for question in test.questions:
setattr( TestingForm,
str(question.id),
RadioField( question.text, choices = [ ( a.id, a.text ) for a in question.answers ] ,
validators = [ DataRequired() ] ) )
form = TestingForm()
if form.validate_on_submit():
arr = form.data
score = -1
mark = 0
quests = test.questions.count()
percent = -1
if current_user.is_authenticated:
id_user = current_user.id
else:
id_user = None
if id != 9:
# Checks usual tests
score = 0
for question in test.questions:
if arr[ str(question.id) ] == str( question.true_answer() ):
score += 1
percent = round( ( score / quests ) * 100, 1 )
if percent >= 90:
mark = 5
elif 75 < percent < 90:
mark = 4
elif 50 < percent <= 75:
mark = 3
elif percent <= 50:
mark = 2
elif id == 9:
# Check test 9
mark = check_test_9( arr )
print( mark )
result = Result( id_test = test.id, id_user = id_user, mark = mark, score = score, quests = quests, percent = percent )
db.session.add( result )
db.session.commit()
last_insert_id = result.id
return redirect( url_for( "result", id = last_insert_id ) )
return render_template( "test.html", title = test.name + " / " + _("Testing"), path = path, form = form, test = test )
@app.route('/result/<int:id>')
def result(id):
result = Result.query.get( id )
test = Test.query.get( result.id_test )
group = Group.query.get( test.id_group )
if result.id_user is None:
user = "None"
else:
user = User.query.get( result.id_user )
link0 = url_for( 'index' )
link1 = url_for( 'group', id = group.id )
path = f"<a href={link0}>{_('All tests')}</a> / <a href={link1}>{group.title}</a> / {test.name}"
return render_template( "test-result.html", title = test.name + " / " + _("Result"), path = path, result = result,
test = test, user = user )
@app.route('/edit_profile', methods = [ 'GET', 'POST' ])
# @login_required
def profile():
form = UpdateProfileForm(current_user.username)
classes = Class.query.all()
classes_list = [(c.id, c.abbr) for c in classes]
form.id_class.choices = classes_list
if form.validate_on_submit():
current_user.username = form.username.data
current_user.name = form.name.data
current_user.lastname = form.lastname.data
current_user.description = form.description.data
current_user.id_class = form.id_class.data
current_user.role = form.role.data
current_user.sex = form.sex.data
db.session.commit()
return redirect( url_for( 'profile' ) )
elif request.method == 'GET':
form.username.data = current_user.username
form.name.data = current_user.name
form.lastname.data = current_user.lastname
form.description.data = current_user.description
form.id_class.data = current_user.id_class
form.role.data = current_user.role
form.sex.data = current_user.sex
return render_template( "forms/profile.html", title = _( 'Profile' ), form = form )
# ------------------------ login system ------------------------ #
@app.route('/login', methods = [ 'GET', 'POST' ])
def login():
if current_user.is_authenticated:
return redirect( url_for( "index" ) )
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by( username = form.username.data ).first()
if user is None or not user.check_password( form.password.data ):
return redirect( url_for( "login" ) )
login_user( user, remember = form.remember_me.data )
return redirect( url_for( "index" ) )
return render_template( "login.html", title = _("Sign in"), form = form )
@app.route('/register', methods = [ 'GET', 'POST' ])
def register():
if current_user.is_authenticated:
return redirect( url_for("index") )
classes = Class.query.all()
classes_list = [ ( c.id, c.abbr ) for c in classes ]
form = RegisterForm()
form.id_class.choices = classes_list
if form.validate_on_submit():
user = User( username = form.username.data, name = form.name.data, lastname = form.lastname.data,
email = form.email.data, id_class = form.id_class.data, role = form.role.data )
user.set_password( password = form.password.data )
db.session.add( user )
db.session.commit()
return redirect( url_for( "login" ) )
return render_template( "register.html", title = _( "Register" ), form = form )
@app.route('/logout')
def logout():
logout_user()
return redirect( url_for( "index" ) )
# ------------------------ forms pages ------------------------ #
@app.route('/add_group', methods = [ 'GET', 'POST' ])
@login_required
def add_group():
form = AddGroupForm()
if form.validate_on_submit():
group = Group( title = form.title.data, description = form.description.data )
db.session.add( group )
db.session.commit()
last_insert_id = group.id
return redirect( url_for( "group", id = last_insert_id ) )
return render_template( "forms/group-add.html", title = _( "Add group" ), form = form )
@app.route('/update_group/<int:id>', methods = [ 'GET', 'POST' ])
def update_group(id):
form = UpdateGroupForm()
group = Group.query.get( id )
if form.validate_on_submit():
group.title = form.title.data
group.description = form.description.data
db.session.commit()
return redirect( url_for( "group", id = id ) )
elif request.method == 'GET':
form.title.data = group.title
form.description.data = group.description
return render_template( "forms/group-update.html", title = _('Change of group'), form = form )
@app.route('/add_test', methods = [ 'GET', 'POST' ])
@login_required
def add_test():
groups = Group.query.all()
groups_list = [ ( g.id, g.title ) for g in groups ]
form = AddTestForm()
form.id_group.choices = groups_list
if form.validate_on_submit():
test = Test( id_group = form.id_group.data, name = form.name.data, annotation = form.annotation.data,
description = form.description.data )
db.session.add( test )
db.session.commit()
last_insert_id = test.id
return redirect( url_for( "test", id = last_insert_id ) )
return render_template( "forms/test-add.html", title = _( "Add test" ), form = form )
@app.route('/update_test/<int:id>', methods = ['GET', 'POST'])
@login_required
def update_test(id):
groups = Group.query.all()
groups_list = [(g.id, g.title) for g in groups]
test = Test.query.get( id )
class UpdateSpecTestForm(UpdateTestForm):
pass
min_key = 0
max_key = 0
name_key = ""
if test.is_usual():
min_key = 2
max_key = 6
name_key = _l( "Test resume for mark " )
else:
if id == 9:
min_key = -1
max_key = 11
name_key = _l( "Test resume for key " )
for i in range( min_key, max_key ):
setattr( UpdateSpecTestForm, f'test_resume_{i}',
TextAreaField( "{}'{}'".format( name_key, i ), validators = [DataRequired(), Length( min = 32, max = 512 )] ) )
form = UpdateSpecTestForm()
form.id_group.choices = groups_list
if form.validate_on_submit():
test.id_group = form.id_group.data
test.name = form.name.data
test.difficult = form.difficult.data
test.annotation = form.annotation.data
test.description = form.description.data
for i in range(min_key, max_key):
test.set_description_mark( i, form[ f'test_resume_{i}' ].data )
db.session.commit()
return redirect( url_for( "test", id = id ) )
elif request.method == 'GET':
form.id_group.data = test.id_group
form.name.data = test.name
form.difficult.data = test.difficult
form.annotation.data = test.annotation
form.description.data = test.description
for i in range( min_key, max_key ):
form[ f'test_resume_{i}' ].data = test.get_description_mark(i)
return render_template( "forms/test-update.html", title = _('Change of test'), form = form,
min_key = min_key, max_key = max_key )
# ------------------------ admin pages ------------------------ #
@app.route('/admin/tables')
def admin_tables():
user = User
class_ = Class
group = Group
test = Test
result = Result
return render_template( "admin/tables.html", title = _('Admin-panel') + ' / ' + _('Tables'),
user = user, group = group, test = test, result = result, class_ = class_ )
@app.route('/admin/table/users')
def admin_table_classes():
classes = Class.query.all()
title = f"{_( 'Admin-panel' )} / {_( 'Tables' )} / {_( 'Classes' )}"
link0 = url_for( 'admin_tables' )
path = f"{_('Admin-panel')} / <a href='{link0}'>{_('Tables')}</a> / {_('Classes')}"
return render_template( "admin/table-classes.html", title = title, path = path, classes = classes, wide = True )
@app.route('/admin/table/users')
def admin_table_users():
users = User.query.all()
title = f"{_( 'Admin-panel' )} / {_( 'Tables' )} / {_( 'Users' )}"
link0 = url_for( 'admin_tables' )
path = f"{_('Admin-panel')} / <a href='{link0}'>{_('Tables')}</a> / {_('Users')}"
return render_template( "admin/table-users.html", title = title, path = path, users = users, wide = True )
@app.route('/admin/table/groups')
def admin_table_groups():
groups = Group.query.all()
title = f"{_( 'Admin-panel' )} / {_( 'Tables' )} / {_( 'Groups' )}"
link0 = url_for( 'admin_tables' )
path = f"{_( 'Admin-panel' )} / <a href='{link0}'>{_( 'Tables' )}</a> / {_( 'Groups' )}"
return render_template( "admin/table-groups.html", title = title, path = path, groups = groups, wide = True )
@app.route('/admin/table/tests')
def admin_table_tests():
tests = Test.query.all()
title = _( 'Admin-panel' ) + ' / ' + _( 'Tables' ) + ' / ' + _( 'Tests' )
link0 = url_for( 'admin_tables' )
path = f"{_( 'Admin-panel' )} / <a href='{link0}'>{_( 'Tables' )}</a> / {_( 'Tests' )}"
return render_template( "admin/table-tests.html", title = title, path = path, tests = tests, wide = True )
@app.route('/admin/table/results')
def admin_table_results():
results = Result.query.all()
title = _( 'Admin-panel' ) + ' / ' + _( 'Tables' ) + ' / ' + _( 'Results' )
link0 = url_for( 'admin_tables' )
path = f"{_( 'Admin-panel' )} / <a href='{link0}'>{_( 'Tables' )}</a> / {_( 'Results' )}"
return render_template( "admin/table-results.html", title = title, path = path, results = results, wide = True )
@app.route('/admin/statistic')
def admin_statistic():
clicks = LogClick.query
requests = LogRequest.query
return render_template( "admin/statistic.html", title = _('Admin-panel') + ' / ' + _('Statistic'),
clicks = clicks, requests = requests )
# ------------------------ API pages ------------------------ #
@app.route('/api')
def api():
return render_template("api.html", title = _('API methods list'))
# --- users ---
@app.route('/api/get_users_count')
def api_get_users_count():
count = User.query.count()
return str( count )
# --- groups ---
@app.route('/api/get_groups_count')
def api_get_groups_count():
count = Group.query.count()
return str( count )
@app.route('/api/get_groups_list')
def api_get_groups_list():
list = Group.query.all()
arr = []
for item in list:
arr.append( { 'id': item.id, 'title': item.title } )
return json.dumps(arr)
# --- tests ---
@app.route('/api/get_tests_count')
def api_get_tests_count():
count = Test.query.count()
return str( count )
@app.route('/api/get_tests_list')
def api_get_tests_list():
list = Test.query.all()
arr = []
for item in list:
arr.append( { 'id': item.id, 'id_group': item.id_group, 'name': item.name } )
return json.dumps( arr )
@app.route('/api/get_tests_count_by_group/<int:id>')
def api_get_tests_count_by_group(id):
if Group.query.get( id ):
count = Test.query.filter( Test.id_group == id ).count()
else:
count = 'null'
return str( count )
@app.route('/api/get_tests_list_by_group/<int:id>')
def api_get_tests_list_by_group(id):
if Group.query.get( id ):
list = Test.query.filter( Test.id_group == id ).all()
arr = []
for item in list:
arr.append( { 'id': item.id, 'id_group': item.id_group, 'name': item.name } )
return json.dumps( arr )
else:
response = 'null'
return str( response )
# --- results ---
@app.route('/api/get_results_count')
def api_get_results_count():
count = Result.query.count()
return str( count )
@app.route('/api/get_results_list')
def api_get_results_list():
list = Result.query.all()
arr = []
for item in list:
arr.append( { 'id': item.id, 'id_test': item.id_test, 'id_user': item.id_user, 'mark': item.mark } )
return json.dumps( arr )
@app.route('/api/get_results_count_by_test/<int:id>')
def api_get_results_count_by_test(id):
if Test.query.get( id ):
count = Result.query.filter( Result.id_test == id ).count()
else:
count = 'null'
return str( count )
# ------------------------ system pages ------------------------ #
@app.route('/about_system')
def about_system():
return render_template( "about-system.html", title = _('About TeSi') )
@app.route('/about_us')
def about_us():
return render_template( "about-us.html", title = _('About us') )
# ------------------------ technical pages ------------------------ #
@app.route('/null')
def null():
return "null"
@app.route('/favicon.ico')
@app.route('/robots.txt')
@app.route('/sitemap.xml')
def static_from_root():
return send_from_directory(app.static_folder, request.path[1:])
@app.errorhandler(404)
def error_404(e):
path = _('Errors') + " / 400 / " + _('Error 404')
return render_template( "errors/404.html", title = _( 'Error 404' ), path = path ), 404
@app.errorhandler(405)
def error_405(e):
path = _('Errors') + " / 400 / " + _('Error 405')
return render_template( "errors/405.html", title = _( 'Error 405' ), path = path ), 405
@app.errorhandler(500)
def error_500(e):
path = _('Errors') + " / 500 / " + _('Error 500')
return render_template( "errors/500.html", title = _( 'Error 500' ), path = path ), 500
@app.before_request
def before_request():
if current_user.is_authenticated:
current_user.datetime_last = datetime.utcnow()
db.session.commit()
g.locale = str( get_locale() )
g.theme = 'dark'
|
py | 1a4b113ee9c121f14a808f8cdf350c4033a133c7 | from dagster import Field, RepositoryDefinition, Shape, composite_solid, pipeline, seven, solid
@solid(
config={
'cluster_cfg': Shape(
{
'num_mappers': Field(int),
'num_reducers': Field(int),
'master_heap_size_mb': Field(int),
'worker_heap_size_mb': Field(int),
}
),
'name': Field(str),
}
)
def hello(context):
context.log.info(seven.json.dumps(context.solid_config['cluster_cfg']))
return 'Hello, %s!' % context.solid_config['name']
def config_mapping_fn(cfg):
return {
'hello': {
'config': {
'cluster_cfg': {
'num_mappers': 100,
'num_reducers': 20,
'master_heap_size_mb': 1024,
'worker_heap_size_mb': 8192,
},
'name': cfg['name'],
}
}
}
@composite_solid(
config_fn=config_mapping_fn,
config={'name': Field(str, is_required=False, default_value='Sam')},
)
def hello_external():
return hello()
@pipeline
def my_pipeline():
hello_external()
def define_repository():
return RepositoryDefinition('config_mapping', pipeline_defs=[my_pipeline])
|
py | 1a4b1171602687c7e03d70affdf777d210cb3a11 | #!/usr/bin/env python3.7
# coding: utf-8
from .field_rename import FieldRename
|
py | 1a4b13d2fc65c9d8faeb2ea5e39280f8d515b220 | import glob
import importlib
import itertools
import json
import logging
from io import StringIO
from os import path
from pprint import pprint
import click
import conllu
import mlflow
import pandas as pd
import spacy
from gensim.models.keyedvectors import KeyedVectors
from lemmy import Lemmatizer
from sklearn.model_selection import train_test_split
from spacy.gold import GoldParse
from spacy.scorer import Scorer
from tqdm import tqdm
import conll17_ud_eval
from model_builder.eval import lemmy_accuracy
from model_builder.io import (
parse_szk_morph,
parse_szk_dep,
sentence_repr,
read_conllu_data_for_lemmy,
RESOURCES_ROOT,
format_as_conllu,
)
from model_builder.ner import SpacyNerTrainer, DataIterator, sentence_to_str
logging.basicConfig(level=logging.INFO)
@click.group()
def cli():
pass
@cli.command()
@click.argument("from_path")
@click.argument("to_path")
def convert_vectors_to_txt(from_path, to_path):
model = KeyedVectors.load_word2vec_format(
from_path, binary=True, unicode_errors="replace"
)
model.save_word2vec_format(to_path, binary=False)
@cli.command()
@click.argument("vectors_path")
def eval_vectors(vectors_path):
model = KeyedVectors.load_word2vec_format(
vectors_path, binary=False, unicode_errors="replace"
)
analogies_result = model.wv.evaluate_word_analogies(
path.join(RESOURCES_ROOT, "questions-words-hu.txt"),
dummy4unknown=True,
restrict_vocab=None,
case_insensitive=False,
)
pprint(analogies_result[0])
@cli.command()
@click.argument("model_name")
def smoke_test(model_name):
nlp = spacy.load(model_name)
doc = nlp(
"Csiribiri csiribiri zabszalma - négy csillag közt alszom ma. "
"Csiribiri csiribiri bojtorján lélek lép a lajtorján."
)
print(nlp)
print(doc, type(doc))
pprint(
[
dict(
text=t.text,
lemma=t.lemma_,
pos=t.pos_,
tag=t.tag_,
dep=t.dep_,
head=t.head,
is_stop=t.is_stop,
has_vector=t.has_vector,
brown_cluser=t.cluster,
prob=t.prob,
)
for t in doc
]
)
@cli.command()
@click.argument("input_file")
@click.argument("output_file")
def normalize_ud_corpus(input_file, output_file):
with open(input_file) as f, open(output_file, "w") as of:
for line in tqdm(f):
stripped_line = line.strip()
if len(stripped_line) == 0 or stripped_line[0] == "#":
of.write(line)
else:
parts = stripped_line.split("\t")
dep_label = parts[7]
dep_label = dep_label.split(":")[0]
parts[7] = dep_label
of.write("\t".join(parts) + "\n")
@cli.command()
@click.argument("from_glob")
@click.argument("to_path")
@click.argument("dev_path")
@click.argument("test_path")
@click.option("--morph/--dep", default=False)
def convert_szk_to_conllu(from_glob, to_path, dev_path, test_path, morph):
ignored = []
for fpath in [dev_path, test_path]:
with open(fpath) as f:
ignored.extend(map(sentence_repr, conllu.parse(f.read())))
parser = parse_szk_morph if morph else parse_szk_dep
ignored = set(ignored)
parsed = []
for fpath in glob.glob(from_glob):
for sent in conllu.parse("\n\n".join(parser(fpath))):
if sentence_repr(sent) not in ignored:
parsed.append(sent)
logging.info("Read {} sentences".format(len(parsed)))
with open(to_path, "w") as outf:
out = "".join(sent.serialize() for sent in parsed)
outf.write(out)
@cli.command()
@click.argument("train_path")
@click.argument("test_path")
@click.argument("model_path")
def train_lemmy(train_path, test_path, model_path):
X_train, y_train = read_conllu_data_for_lemmy(train_path)
X_test, y_test = read_conllu_data_for_lemmy(test_path)
lemmatizer = Lemmatizer()
lemmatizer.fit(X_train, y_train)
lemmy_accuracy(lemmatizer, X_test, y_test)
with open(model_path, "w") as f:
json.dump(lemmatizer.rules, f)
@cli.command()
@click.argument("model_name")
@click.argument("test_data_path")
@click.argument("ner_test_data")
def benchmark_model(model_name, test_data_path, ner_test_data):
with open(test_data_path) as f:
data = conllu.parse(f.read())
text = " ".join(d.metadata["text"] for d in data)
load_model = getattr(importlib.import_module(model_name), "load")
nlp = load_model()
_parsed = StringIO(format_as_conllu(nlp(text), 1))
parsed = conll17_ud_eval.load_conllu(_parsed)
gold = conll17_ud_eval.load_conllu_file(test_data_path)
results = pd.DataFrame(
{k: v.__dict__ for k, v in conll17_ud_eval.evaluate(gold, parsed).items()}
).T
print(results)
diterator = DataIterator()
test_sents = list(itertools.islice(diterator.tagged_sentences(ner_test_data), None))
scorer = Scorer()
for sentence, annot in test_sents:
doc_gold_text = nlp.make_doc(sentence)
gold = GoldParse(doc_gold_text, entities=annot)
predicted = nlp(sentence)
scorer.score(predicted, gold)
print(scorer.scores)
@cli.command()
@click.argument("model_name")
@click.argument("output_path")
@click.argument("train_data")
@click.argument("dev_data")
@click.argument("test_data")
@click.argument("dropout")
@click.argument("n_iter")
@click.argument("patience")
def train_ner(model_name, output_path, train_data, dev_data, test_data, dropout, n_iter, patience):
mlflow.set_tracking_uri("./mlruns")
mlflow.set_experiment("Spacy NER")
mlflow.start_run(run_name="Using all")
if model_name in ["None", "False", "", "blank"]:
model_name = None
trainer = SpacyNerTrainer(model_name, output_path)
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logging.info("Reading train data")
diterator = DataIterator()
train_sentences = list(tqdm(itertools.islice(diterator.tagged_sentences(train_data), None)))
logging.info("Got {} sentences with at least one entity".format(len(train_sentences)))
logging.info("Reading test data")
test_sentences = list(tqdm(diterator.tagged_sentences(test_data)))
logging.info("Got {} sentences with at least one entity".format(len(test_sentences)))
logging.info("Reading dev data")
dev_sentences = list(tqdm(diterator.tagged_sentences(dev_data)))
logging.info("Got {} sentences with at least one entity".format(len(dev_sentences)))
trainer(train_sentences, dev_sentences, test_sentences, int(n_iter), float(dropout), int(patience))
mlflow.end_run()
@cli.command()
@click.argument("szegedner_data")
@click.argument("train_data")
@click.argument("dev_data")
@click.argument("test_data")
def split_ner_data(szegedner_data, train_data, dev_data, test_data):
diterator = DataIterator()
logging.info("Reading gold data")
gold_sents = list(tqdm(itertools.islice(diterator.sentences_with_tags(szegedner_data), None)))
train_sents, all_test_sents = train_test_split(gold_sents, test_size=.2, random_state=42)
dev_sents, test_sents = train_test_split(all_test_sents, test_size=.5, random_state=42)
logging.info("Storing training data")
with open(train_data, "w") as f:
for i, s in tqdm(enumerate(train_sents)):
f.write(sentence_to_str(s))
f.write("\n")
f.write("\n")
logging.info("Storing test data")
with open(dev_data, "w") as f:
for i, s in tqdm(enumerate(dev_sents)):
f.write(sentence_to_str(s))
f.write("\n")
f.write("\n")
logging.info("Storing test data")
with open(test_data, "w") as f:
for i, s in tqdm(enumerate(test_sents)):
f.write(sentence_to_str(s))
f.write("\n")
f.write("\n")
if __name__ == "__main__":
cli()
|
py | 1a4b1402b46c21de8ae7513374edeecd7af57c2e | import psyco; psyco.full()
from fltk import *
import copy
import numpy as np
import sys
#if '../PyCommon/modules' not in sys.path:
# sys.path.append('../PyCommon/modules')
if './modules' not in sys.path:
sys.path.append('./modules')
import Math.mmMath as mm
import Resource.ysMotionLoader as yf
import Renderer.ysRenderer as yr
import Renderer.csVpRenderer as cvr
import Simulator.csVpWorld as cvw
import Simulator.csVpModel as cvm
import GUI.ysSimpleViewer as ysv
import Optimization.ysAnalyticConstrainedOpt as yac
import ArticulatedBody.ysJacobian as yjc
import Util.ysPythonEx as ype
import ArticulatedBody.ysReferencePoints as yrp
import ArticulatedBody.ysMomentum as ymt
import ArticulatedBody.ysControl as yct
import Motion.ysHierarchyEdit as yme
import Simulator.ysPhysConfig as ypc
import numpy.linalg as npl
import mtOptimize as mot
import mtInitialize_005 as mit
contactState = 0
g_applyForce = False
g_initFlag = 0
softConstPoint = [0, 0, 0]
forceShowFrame = 0
forceApplyFrame = 0
JsysPre = 0
JsupPreL = 0
JsupPreR = 0
JsupPre = 0
stage = 0
## Constant
STATIC_BALANCING = 0
MOTION_TRACKING = 1
DYNAMIC_BALANCING = 2
POWERFUL_BALANCING = 3
POWERFUL_MOTION_TRACKING = 4
FLYING = 5
def checkAll(list, value) :
for i in range(len(list)) :
if list[i] != value :
return 0
return 1
def getDesFootLinearAcc(refModel, controlModel, footIndex, ModelOffset, CM_ref, CM, Kk, Dk) :
desLinearAcc = [0,0,0]
refPos = refModel.getBodyPositionGlobal(footIndex)
curPos = controlModel.getBodyPositionGlobal(footIndex)
refVecL = refPos - CM_ref
if stage == MOTION_TRACKING:
refPos = CM + refVecL
#refPos[1] += 0.05
#refPos[0] -= 0.05
elif stage == POWERFUL_BALANCING:
refPos = copy.copy(curPos)
refPos[1] = 0
elif stage == DYNAMIC_BALANCING:
refPos = CM + refVecL
else:
refPos[0] += ModelOffset[0]
refVel = refModel.getBodyVelocityGlobal(footIndex)
curVel = controlModel.getBodyVelocityGlobal(footIndex)
#refAcc = (0,0,0)
refAcc = refModel.getBodyAccelerationGlobal(footIndex)
if stage != MOTION_TRACKING:
refPos[1] = 0.032
#refPos[1] = 0.0416
if refPos[1] < 0.0 :
refPos[1] = 0.032
#refPos[1] = 0.0416
desLinearAcc = yct.getDesiredAcceleration(refPos, curPos, refVel, curVel, refAcc, Kk, Dk)
return desLinearAcc, refPos
def getDesFootAngularAcc(refModel, controlModel, footIndex, Kk, Dk) :
desAngularAcc = [0,0,0]
curAng = [controlModel.getBodyOrientationGlobal(footIndex)]
refAngVel = refModel.getBodyAngVelocityGlobal(footIndex)
curAngVel = controlModel.getBodyAngVelocityGlobal(footIndex)
refAngAcc = (0,0,0)
curAngY = np.dot(curAng, np.array([0,1,0]))
refAngY = np.array([0,1,0])
if stage == MOTION_TRACKING+10:
refAng = [refModel.getBodyOrientationGlobal(footIndex)]
refAngY2 = np.dot(refAng, np.array([0,1,0]))
refAngY = refAngY2[0]
aL = mm.logSO3(mm.getSO3FromVectors(curAngY[0], refAngY))
desAngularAcc = Kk*aL + Dk*(refAngVel-curAngVel)
return desAngularAcc
def main():
np.set_printoptions(precision=4, linewidth=200)
# motion, mcfg, wcfg, stepsPerFrame, config = mit.create_vchain_5()
motion, mcfg, wcfg, stepsPerFrame, config = mit.create_biped()
vpWorld = cvw.VpWorld(wcfg)
motionModel = cvm.VpMotionModel(vpWorld, motion[0], mcfg)
motionModel.recordVelByFiniteDiff()
controlModel = cvm.VpControlModel(vpWorld, motion[0], mcfg)
vpWorld.initialize()
controlModel.initializeHybridDynamics()
#ModelOffset = (1.5, -0.01, 0)
ModelOffset = (1.5, 0.0, 0)
controlModel.translateByOffset(ModelOffset)
totalDOF = controlModel.getTotalDOF()
DOFs = controlModel.getDOFs()
# parameter
Kt = config['Kt']; Dt = config['Dt'] # tracking gain
Kl = config['Kl']; Dl = config['Dl'] # linear balance gain
Kh = config['Kh']; Dh = config['Dh'] # angular balance gain
Ks = config['Ks']; Ds = config['Ds'] # penalty force spring gain
Bt = config['Bt']
Bl = config['Bl']
Bh = config['Bh']
w = mot.getTrackingWeight(DOFs, motion[0].skeleton, config['weightMap'])
w2 = mot.getTrackingWeight(DOFs, motion[0].skeleton, config['weightMap2'])
#w_IK = mot.getTrackingWeight(DOFs, motion[0].skeleton, config['IKweightMap'])
supL = motion[0].skeleton.getJointIndex(config['supLink'])
supR = motion[0].skeleton.getJointIndex(config['supLink2'])
rootB = motion[0].skeleton.getJointIndex(config['root'])
selectedBody = motion[0].skeleton.getJointIndex(config['end'])
#constBody = motion[0].skeleton.getJointIndex('LeftForeArm')
constBody = motion[0].skeleton.getJointIndex(config['const'])
# jacobian
Jsup = yjc.makeEmptyJacobian(DOFs, 1)
dJsup = Jsup.copy()
JsupPre = Jsup.copy()
Jsys = yjc.makeEmptyJacobian(DOFs, controlModel.getBodyNum())
dJsys = Jsys.copy()
JsysPre = Jsys.copy()
Jconst = yjc.makeEmptyJacobian(DOFs, 1)
dJconst = Jconst.copy()
###############
footPartNum = config['FootPartNum']
indexFootL = [None]*footPartNum
indexFootR = [None]*footPartNum
jFootL = [None]*footPartNum
dJFootL = [None]*footPartNum
jFootR = [None]*footPartNum
dJFootR = [None]*footPartNum
jointMasksFootL = [None]*footPartNum
jointMasksFootR = [None]*footPartNum
jAngFootL = [None]*footPartNum
dJAngFootL = [None]*footPartNum
jAngFootR = [None]*footPartNum
dJAngFootR = [None]*footPartNum
for i in range(footPartNum) :
jFootL[i] = yjc.makeEmptyJacobian(DOFs, 1)
dJFootL[i] = jFootL[i].copy()
jFootR[i] = yjc.makeEmptyJacobian(DOFs, 1)
dJFootR[i] = jFootR[i].copy()
jAngFootL[i] = yjc.makeEmptyJacobian(DOFs, 1, False)
dJAngFootL[i] = jAngFootL[i].copy()
jAngFootR[i] = yjc.makeEmptyJacobian(DOFs, 1, False)
dJAngFootR[i] = jAngFootR[i].copy()
indexFootL[i] = motion[0].skeleton.getJointIndex(config['FootLPart'][i])
indexFootR[i] = motion[0].skeleton.getJointIndex(config['FootRPart'][i])
jointMasksFootL[i] = [yjc.getLinkJointMask(motion[0].skeleton, indexFootL[i])]
jointMasksFootR[i] = [yjc.getLinkJointMask(motion[0].skeleton, indexFootR[i])]
constJointMasks = [yjc.getLinkJointMask(motion[0].skeleton, constBody)]
allLinkJointMasks = yjc.getAllLinkJointMasks(motion[0].skeleton)
'''
maskArray = [foreSupLJointMasks, foreSupRJointMasks, rearSupLJointMasks, rearSupRJointMasks]
parentArray = [supL, supR, supL, supR]
effectorArray = [foreSupL, foreSupR, rearSupL, rearSupR]
for j in range(4) :
for i in range(len(foreSupLJointMasks)) :
if i == parentArray[j] or i == effectorArray[j] :
maskArray[j][0][i] = 1
else :
maskArray[j][0][i] = 0
'''
# momentum matrix
linkMasses = controlModel.getBodyMasses()
totalMass = controlModel.getTotalMass()
TO = ymt.make_TO(linkMasses)
dTO = ymt.make_dTO(len(linkMasses))
# optimization
problem = yac.LSE(totalDOF, 6)
a_sup = (0,0,0, 0,0,0) #L
#a_sup2 = (0,0,0, 0,0,0)#R
a_sup2 = [0,0,0, 0,0,0]#R
a_sup_2 = [0,0,0, 0,0,0, 0,0,0, 0,0,0]
CP_old = [mm.v3(0.,0.,0.)]
# penalty method
bodyIDsToCheck = range(vpWorld.getBodyNum())
mus = [1.]*len(bodyIDsToCheck)
# flat data structure
ddth_des_flat = ype.makeFlatList(totalDOF)
dth_flat = ype.makeFlatList(totalDOF)
ddth_sol = ype.makeNestedList(DOFs)
d_th_IK = ype.makeNestedList(DOFs)
d_th_IK_L = ype.makeNestedList(DOFs)
d_th_IK_R = ype.makeNestedList(DOFs)
dd_th_IK = ype.makeNestedList(DOFs)
dd_th_IK_flat = ype.makeFlatList(totalDOF)
d_th_IK_flat = ype.makeFlatList(totalDOF)
ddth_c_flat = ype.makeFlatList(totalDOF)
# viewer
rd_footCenter = [None]
rd_footCenter_ref = [None]
rd_footCenterL = [None]
rd_footCenterR = [None]
rd_CM_plane = [None]
rd_CM_plane_ref = [None]
rd_CM_ref = [None]
rd_CM = [None]
rd_CM_vec = [None]
rd_CM_ref_vec = [None]
rd_CP = [None]
rd_CP_des = [None]
rd_dL_des_plane = [None]
rd_dH_des = [None]
rd_grf_des = [None]
rd_exf_des = [None]
rd_root_des = [None]
rd_soft_const_vec = [None]
rd_root = [None]
rd_footL_vec = [None]
rd_footR_vec = [None]
rd_CMP = [None]
rd_DesPosL = [None]
rd_DesPosR = [None]
rd_DesForePosL = [None]
rd_DesForePosR = [None]
rd_DesRearPosL = [None]
rd_DesRearPosR = [None]
rootPos = [None]
selectedBodyId = [selectedBody]
extraForce = [None]
applyedExtraForce = [None]
applyedExtraForce[0] = [0,0,0]
normalVector = [[0,2,0]]
viewer = ysv.SimpleViewer()
# viewer.record(False)
# viewer.doc.addRenderer('motion', yr.JointMotionRenderer(motion, (0,255,255), yr.LINK_BONE))
viewer.doc.addObject('motion', motion)
viewer.doc.addRenderer('motionModel', cvr.VpModelRenderer(motionModel, (150,150,255), yr.POLYGON_FILL))
viewer.doc.addRenderer('controlModel', cvr.VpModelRenderer(controlModel, (255,240,255), yr.POLYGON_FILL))
viewer.doc.addRenderer('rd_footCenter', yr.PointsRenderer(rd_footCenter))
#viewer.doc.addRenderer('rd_footCenterL', yr.PointsRenderer(rd_footCenterL))
#viewer.doc.addRenderer('rd_footCenterR', yr.PointsRenderer(rd_footCenterR))
#viewer.doc.addRenderer('rd_CM_plane', yr.PointsRenderer(rd_CM_plane, (255,255,0)))
viewer.doc.addRenderer('rd_CM', yr.PointsRenderer(rd_CM, (255,255,0)))
viewer.doc.addRenderer('rd_CP_des', yr.PointsRenderer(rd_CP_des, (0,255,0)))
#viewer.doc.addRenderer('rd_CP_des', yr.PointsRenderer(rd_CP_des, (255,0,255)))
# viewer.doc.addRenderer('rd_dL_des_plane', yr.VectorsRenderer(rd_dL_des_plane, rd_CM, (255,255,0)))
# viewer.doc.addRenderer('rd_dH_des', yr.VectorsRenderer(rd_dH_des, rd_CM, (0,255,0)))
viewer.doc.addRenderer('rd_grf_des', yr.ForcesRenderer(rd_grf_des, rd_CP, (0,255,255), .001))
viewer.doc.addRenderer('rd_exf_des', yr.ForcesRenderer(rd_exf_des, rd_root_des, (0,255,0), .009, 0.05))
#viewer.doc.addRenderer('rd_CMP', yr.PointsRenderer(rd_CMP, (0,0,255)))
viewer.doc.addRenderer('rd_DesPosL', yr.PointsRenderer(rd_DesPosL, (0,0,255)))
viewer.doc.addRenderer('rd_DesPosR', yr.PointsRenderer(rd_DesPosR, (0,100,255)))
viewer.doc.addRenderer('rd_DesForePosL', yr.PointsRenderer(rd_DesForePosL, (150,0,200)))
viewer.doc.addRenderer('rd_DesForePosR', yr.PointsRenderer(rd_DesForePosR, (150,0,250)))
viewer.doc.addRenderer('rd_DesRearPosL', yr.PointsRenderer(rd_DesRearPosL, (0,150,200)))
viewer.doc.addRenderer('rd_DesRearPosR', yr.PointsRenderer(rd_DesRearPosR, (0,150,250)))
#viewer.doc.addRenderer('softConstraint', yr.VectorsRenderer(rd_soft_const_vec, rd_CMP, (255,0,0), 3))
viewer.doc.addRenderer('rd_footLVec', yr.VectorsRenderer(rd_footL_vec, rd_footCenterL, (255,0,0), 3))
viewer.doc.addRenderer('rd_footRVec', yr.VectorsRenderer(rd_footR_vec, rd_footCenterL, (255,255,0), 3))
#viewer.doc.addRenderer('rd_footCenter_ref', yr.PointsRenderer(rd_footCenter_ref))
viewer.doc.addRenderer('rd_CM_plane_ref', yr.PointsRenderer(rd_CM_plane_ref, (255,255,0)))
viewer.doc.addRenderer('rd_refNormalVec', yr.VectorsRenderer(normalVector, rd_footCenter_ref, (255,0,0), 3))
viewer.doc.addRenderer('rd_refCMVec', yr.VectorsRenderer(rd_CM_ref_vec, rd_footCenter_ref, (255,0,255), 3))
viewer.doc.addRenderer('rd_curNormalVec', yr.VectorsRenderer(normalVector, rd_footCenter, (255,0,0), 3))
viewer.doc.addRenderer('rd_CMVec', yr.VectorsRenderer(rd_CM_vec, rd_footCenter, (255,0,255), 3))
stage = STATIC_BALANCING
def simulateCallback(frame):
global g_initFlag
global forceShowFrame
global forceApplyFrame
global JsysPre
global JsupPreL
global JsupPreR
global JsupPre
global softConstPoint
global stage
motionModel.update(motion[frame])
Kt, Kk, Kl, Kh, Ksc, Bt, Bl, Bh, Bsc = viewer.GetParam()
Dt = 2*(Kt**.5)
Dk = 2*(Kk**.5)
Dl = 2*(Kl**.5)
Dh = 2*(Kh**.5)
Dsc = 2*(Ksc**.5)
if Bsc == 0.0 :
viewer.doc.showRenderer('softConstraint', False)
viewer.motionViewWnd.update(1, viewer.doc)
else:
viewer.doc.showRenderer('softConstraint', True)
renderer1 = viewer.doc.getRenderer('softConstraint')
renderer1.rc.setLineWidth(0.1+Bsc*3)
viewer.motionViewWnd.update(1, viewer.doc)
# tracking
th_r = motion.getDOFPositions(frame)
th = controlModel.getDOFPositions()
dth_r = motion.getDOFVelocities(frame)
dth = controlModel.getDOFVelocities()
ddth_r = motion.getDOFAccelerations(frame)
ddth_des = yct.getDesiredDOFAccelerations(th_r, th, dth_r, dth, ddth_r, Kt, Dt)
ddth_c = controlModel.getDOFAccelerations()
ype.flatten(ddth_des, ddth_des_flat)
ype.flatten(dth, dth_flat)
ype.flatten(ddth_c, ddth_c_flat)
# jacobian
refFootL = motionModel.getBodyPositionGlobal(supL)
refFootR = motionModel.getBodyPositionGlobal(supR)
positionFootL = [None]*footPartNum
positionFootR = [None]*footPartNum
for i in range(footPartNum):
positionFootL[i] = controlModel.getBodyPositionGlobal(indexFootL[i])
positionFootR[i] = controlModel.getBodyPositionGlobal(indexFootR[i])
linkPositions = controlModel.getBodyPositionsGlobal()
linkVelocities = controlModel.getBodyVelocitiesGlobal()
linkAngVelocities = controlModel.getBodyAngVelocitiesGlobal()
linkInertias = controlModel.getBodyInertiasGlobal()
jointPositions = controlModel.getJointPositionsGlobal()
jointAxeses = controlModel.getDOFAxeses()
CM = yrp.getCM(linkPositions, linkMasses, totalMass)
dCM = yrp.getCM(linkVelocities, linkMasses, totalMass)
CM_plane = copy.copy(CM); CM_plane[1]=0.
dCM_plane = copy.copy(dCM); dCM_plane[1]=0.
linkPositions_ref = motionModel.getBodyPositionsGlobal()
CM_ref = yrp.getCM(linkPositions_ref, linkMasses, totalMass)
CM_plane_ref = copy.copy(CM_ref)
CM_plane_ref[1] = 0.
P = ymt.getPureInertiaMatrix(TO, linkMasses, linkPositions, CM, linkInertias)
dP = ymt.getPureInertiaMatrixDerivative(dTO, linkMasses, linkVelocities, dCM, linkAngVelocities, linkInertias)
yjc.computeJacobian2(Jsys, DOFs, jointPositions, jointAxeses, linkPositions, allLinkJointMasks)
yjc.computeJacobianDerivative2(dJsys, DOFs, jointPositions, jointAxeses, linkAngVelocities, linkPositions, allLinkJointMasks)
if g_initFlag == 0:
softConstPoint = controlModel.getBodyPositionGlobal(constBody)
softConstPoint[1] -= .3
g_initFlag = 1
yjc.computeJacobian2(jFootL[0], DOFs, jointPositions, jointAxeses, [positionFootL[0]], jointMasksFootL[0])
yjc.computeJacobianDerivative2(dJFootL[0], DOFs, jointPositions, jointAxeses, linkAngVelocities, [positionFootL[0]], jointMasksFootL[0], False)
yjc.computeJacobian2(jFootR[0], DOFs, jointPositions, jointAxeses, [positionFootR[0]], jointMasksFootR[0])
yjc.computeJacobianDerivative2(dJFootR[0], DOFs, jointPositions, jointAxeses, linkAngVelocities, [positionFootR[0]], jointMasksFootR[0], False)
yjc.computeAngJacobian2(jAngFootL[0], DOFs, jointPositions, jointAxeses, [positionFootL[0]], jointMasksFootL[0])
yjc.computeAngJacobianDerivative2(dJAngFootL[0], DOFs, jointPositions, jointAxeses, linkAngVelocities, [positionFootL[0]], jointMasksFootL[0], False)
yjc.computeAngJacobian2(jAngFootR[0], DOFs, jointPositions, jointAxeses, [positionFootR[0]], jointMasksFootR[0])
yjc.computeAngJacobianDerivative2(dJAngFootR[0], DOFs, jointPositions, jointAxeses, linkAngVelocities, [positionFootR[0]], jointMasksFootR[0], False)
bodyIDs, contactPositions, contactPositionLocals, contactForces = vpWorld.calcPenaltyForce(bodyIDsToCheck, mus, Ks, Ds)
CP = yrp.getCP(contactPositions, contactForces)
for i in range(len(bodyIDsToCheck)) :
controlModel.SetBodyColor(bodyIDsToCheck[i], 0, 0, 0)
contactFlagFootL = [0]*footPartNum
contactFlagFootR = [0]*footPartNum
for i in range(len(bodyIDs)) :
controlModel.SetBodyColor(bodyIDs[i], 255, 105, 105)
index = controlModel.id2index(bodyIDs[i])
for j in range(len(indexFootL)):
if index == indexFootL[j]:
contactFlagFootL[j] = 1
if j != 0:
yjc.computeJacobian2(jFootL[j], DOFs, jointPositions, jointAxeses, [positionFootL[j]], jointMasksFootL[j])
yjc.computeJacobianDerivative2(dJFootL[j], DOFs, jointPositions, jointAxeses, linkAngVelocities, [positionFootL[j]], jointMasksFootL[j], False)
break
for j in range(len(indexFootR)):
if index == indexFootR[j]:
contactFlagFootR[j] = 1
if j != 0:
yjc.computeJacobian2(jFootR[j], DOFs, jointPositions, jointAxeses, [positionFootR[j]], jointMasksFootR[j])
yjc.computeJacobianDerivative2(dJFootR[j], DOFs, jointPositions, jointAxeses, linkAngVelocities, [positionFootR[j]], jointMasksFootR[j], False)
break
for j in range(len(indexFootL)):
yjc.computeAngJacobian2(jAngFootL[j], DOFs, jointPositions, jointAxeses, [positionFootL[j]], jointMasksFootL[j])
yjc.computeAngJacobianDerivative2(dJAngFootL[j], DOFs, jointPositions, jointAxeses, linkAngVelocities, [positionFootL[j]], jointMasksFootL[j], False)
yjc.computeAngJacobian2(jAngFootR[j], DOFs, jointPositions, jointAxeses, [positionFootR[j]], jointMasksFootR[j])
yjc.computeAngJacobianDerivative2(dJAngFootR[j], DOFs, jointPositions, jointAxeses, linkAngVelocities, [positionFootR[j]], jointMasksFootR[j], False)
'''
if frame < 100 :
if stage == POWERFUL_BALANCING:
#if stage != MOTION_TRACKING:
footCenterL = controlModel.getBodyPositionGlobal(supL)
footCenterR = controlModel.getBodyPositionGlobal(supR)
else:
footCenterL = controlModel.getBodyPositionGlobal(indexFootL[1])
footCenterR = controlModel.getBodyPositionGlobal(indexFootR[1])
else:
'''
if footPartNum == 1:
footCenterL = controlModel.getBodyPositionGlobal(supL)
footCenterR = controlModel.getBodyPositionGlobal(supR)
else:
if ((contactFlagFootL[3] == 1 or contactFlagFootL[4] == 1) and contactFlagFootL[0] == 0) or ((contactFlagFootR[3] == 1 or contactFlagFootR[4] == 1) and contactFlagFootR[0] == 0):
footCenterL = (controlModel.getBodyPositionGlobal(supL) + controlModel.getBodyPositionGlobal(indexFootL[1]))/2.0
footCenterR = (controlModel.getBodyPositionGlobal(supR) + controlModel.getBodyPositionGlobal(indexFootR[1]))/2.0
#footCenterL = controlModel.getBodyPositionGlobal(indexFootL[1])
#footCenterR = controlModel.getBodyPositionGlobal(indexFootR[1])
else :
footCenterL = (controlModel.getBodyPositionGlobal(supL) + controlModel.getBodyPositionGlobal(indexFootL[1]))/2.0
footCenterR = (controlModel.getBodyPositionGlobal(supR) + controlModel.getBodyPositionGlobal(indexFootR[1]))/2.0
#footCenterL = controlModel.getBodyPositionGlobal(indexFootL[1])
#footCenterR = controlModel.getBodyPositionGlobal(indexFootR[1])
footCenter = footCenterL + (footCenterR - footCenterL)/2.0
footCenter[1] = 0.
footCenter_ref = refFootL + (refFootR - refFootL)/2.0
#footCenter_ref[1] = 0.
#
if checkAll(contactFlagFootL, 0) == 1 and checkAll(contactFlagFootR, 0) == 1:
footCenter = footCenter
elif checkAll(contactFlagFootL, 0) == 1 :
footCenter = footCenterR
elif checkAll(contactFlagFootR, 0) == 1 :
footCenter = footCenterL
footCenter[1] = 0.
desForeSupLAcc = [0,0,0]
desForeSupRAcc = [0,0,0]
totalNormalForce = [0,0,0]
for i in range(len(contactForces)):
totalNormalForce[0] += contactForces[i][0]
totalNormalForce[1] += contactForces[i][1]
totalNormalForce[2] += contactForces[i][2]
# linear momentum
CM_ref_plane = footCenter
dL_des_plane = Kl*totalMass*(CM_ref_plane - CM_plane) - Dl*totalMass*dCM_plane
# angular momentum
CP_ref = footCenter
timeStep = 30.
if CP_old[0]==None or CP==None:
dCP = None
else:
dCP = (CP - CP_old[0])/(1/timeStep)
CP_old[0] = CP
if CP!=None and dCP!=None:
ddCP_des = Kh*(CP_ref - CP) - Dh*(dCP)
CP_des = CP + dCP*(1/timeStep) + .5*ddCP_des*((1/timeStep)**2)
dH_des = np.cross((CP_des - CM), (dL_des_plane + totalMass*mm.s2v(wcfg.gravity)))
#dH_des = np.cross((CP_des - CM_plane), (dL_des_plane + totalMass*mm.s2v(wcfg.gravity)))
else:
dH_des = None
# momentum matrix
RS = np.dot(P, Jsys)
R, S = np.vsplit(RS, 2)
rs = np.dot((np.dot(dP, Jsys) + np.dot(P, dJsys)), dth_flat)
r_bias, s_bias = np.hsplit(rs, 2)
##############################
# soft point constraint
P_des = softConstPoint
P_cur = controlModel.getBodyPositionGlobal(constBody)
dP_des = [0, 0, 0]
dP_cur = controlModel.getBodyVelocityGlobal(constBody)
ddP_des1 = Ksc*(P_des - P_cur) - Dsc*(dP_cur - dP_des)
r = P_des - P_cur
I = np.vstack(([1,0,0],[0,1,0],[0,0,1]))
Z = np.hstack((I, mm.getCrossMatrixForm(-r)))
yjc.computeJacobian2(Jconst, DOFs, jointPositions, jointAxeses, [softConstPoint], constJointMasks)
JL, JA = np.vsplit(Jconst, 2)
Q1 = np.dot(Z, Jconst)
q1 = np.dot(JA, dth_flat)
q2 = np.dot(mm.getCrossMatrixForm(q1), np.dot(mm.getCrossMatrixForm(q1), r))
yjc.computeJacobianDerivative2(dJconst, DOFs, jointPositions, jointAxeses, linkAngVelocities, [softConstPoint], constJointMasks, False)
q_bias1 = np.dot(np.dot(Z, dJconst), dth_flat) + q2
##############################
flagContact = True
if dH_des==None or np.any(np.isnan(dH_des)) == True:
flagContact = False
viewer.doc.showRenderer('rd_grf_des', False)
viewer.motionViewWnd.update(1, viewer.doc)
else:
viewer.doc.showRenderer('rd_grf_des', True)
viewer.motionViewWnd.update(1, viewer.doc)
'''
0 : initial
1 : contact
2 : fly
3 : landing
'''
#MOTION = FORWARD_JUMP
if mit.MOTION == mit.FORWARD_JUMP :
frame_index = [136, 100]
#frame_index = [100000, 100000]
elif mit.MOTION == mit.TAEKWONDO:
frame_index = [130, 100]
#frame_index = [100000, 100000]
elif mit.MOTION == mit.TAEKWONDO2:
frame_index = [130+40, 100]
else :
frame_index = [1000000, 1000000]
#MOTION = TAEKWONDO
#frame_index = [135, 100]
'''
if frame > 300 :
if stage != DYNAMIC_BALANCING:
print("#", frame,"-DYNAMIC_BALANCING")
stage = DYNAMIC_BALANCING
Kk = Kk*1
Dk = 2*(Kk**.5)
'''
if frame > frame_index[0] :
if stage != POWERFUL_BALANCING:
print("#", frame,"-POWERFUL_BALANCING")
stage = POWERFUL_BALANCING
Kk = Kk*2
Dk = 2*(Kk**.5)
elif frame > frame_index[1]:
if stage != MOTION_TRACKING:
print("#", frame,"-MOTION_TRACKING")
stage = MOTION_TRACKING
trackingW = w
if stage == MOTION_TRACKING:
trackingW = w2
Bt = Bt*2
# optimization
mot.addTrackingTerms(problem, totalDOF, Bt, trackingW, ddth_des_flat)
mot.addSoftPointConstraintTerms(problem, totalDOF, Bsc, ddP_des1, Q1, q_bias1)
if flagContact == True:
if stage != MOTION_TRACKING+10:
mot.addLinearTerms(problem, totalDOF, Bl, dL_des_plane, R, r_bias)
mot.addAngularTerms(problem, totalDOF, Bh, dH_des, S, s_bias)
a_sup_2 = [None]
Jsup_2 = [None]
dJsup_2 = [None]
##############################
# Hard constraint
if stage != MOTION_TRACKING:
Kk2 = Kk * 2.0
else :
Kk2 = Kk * 1.5
Dk2 = 2*(Kk2**.5)
'''
desLinearAccL, desPosL = getDesFootLinearAcc(motionModel, controlModel, supL, ModelOffset, CM_ref, CM, Kk2, Dk2)
desLinearAccR, desPosR = getDesFootLinearAcc(motionModel, controlModel, supR, ModelOffset, CM_ref, CM, Kk2, Dk2)
desAngularAccL = getDesFootAngularAcc(motionModel, controlModel, supL, Kk2, Dk2)
desAngularAccR = getDesFootAngularAcc(motionModel, controlModel, supR, Kk2, Dk2)
'''
if stage != MOTION_TRACKING:
idx = 0 #LEFT/RIGHT_TOES
desLinearAccL, desPosL = getDesFootLinearAcc(motionModel, controlModel, indexFootL[idx], ModelOffset, CM_ref, CM, Kk2, Dk2)
desLinearAccR, desPosR = getDesFootLinearAcc(motionModel, controlModel, indexFootR[idx], ModelOffset, CM_ref, CM, Kk2, Dk2)
desAngularAccL = getDesFootAngularAcc(motionModel, controlModel, indexFootL[idx], Kk2, Dk2)
desAngularAccR = getDesFootAngularAcc(motionModel, controlModel, indexFootR[idx], Kk2, Dk2)
a_sup_2 = np.hstack(( np.hstack((desLinearAccL, desAngularAccL)), np.hstack((desLinearAccR, desAngularAccR)) ))
Jsup_2 = np.vstack((jFootL[idx], jFootR[idx]))
dJsup_2 = np.vstack((dJFootL[idx], dJFootR[idx]))
rd_DesPosL[0] = desPosL.copy()
rd_DesPosR[0] = desPosR.copy()
else:
if footPartNum == 5:
idx = 3
desAngularAccL = getDesFootAngularAcc(motionModel, controlModel, indexFootL[idx], Kk2, Dk2)
desAngularAccR = getDesFootAngularAcc(motionModel, controlModel, indexFootR[idx], Kk2, Dk2)
a_sup_2 = np.hstack(( desAngularAccL, desAngularAccR ))
Jsup_2 = np.vstack((jAngFootL[idx], jAngFootR[idx]))
dJsup_2 = np.vstack((dJAngFootL[idx], dJAngFootR[idx]))
else:
idx = 1
desAngularAccL = getDesFootAngularAcc(motionModel, controlModel, indexFootL[idx], Kk2, Dk2)
desAngularAccR = getDesFootAngularAcc(motionModel, controlModel, indexFootR[idx], Kk2, Dk2)
a_sup_2 = np.hstack(( desAngularAccL, desAngularAccR ))
Jsup_2 = np.vstack((jAngFootL[idx], jAngFootR[idx]))
dJsup_2 = np.vstack((dJAngFootL[idx], dJAngFootR[idx]))
##############################
##############################
# Additional constraint
if stage != MOTION_TRACKING:
#Kk2 = Kk * 2.5
Kk2 = Kk * 2.5
Dk2 = 2*(Kk2**.5)
desForePosL = [0,0,0]
desForePosR = [0,0,0]
desRearPosL = [0,0,0]
desRearPosR = [0,0,0]
for i in range(1, footPartNum) :
if contactFlagFootL[i] == 1:
desLinearAccL, desForePosL = getDesFootLinearAcc(motionModel, controlModel, indexFootL[i], ModelOffset, CM_ref, CM, Kk2, Dk2)
desAngularAccL = getDesFootAngularAcc(motionModel, controlModel, indexFootL[i], Kk2, Dk2)
a_sup_2 = np.hstack(( a_sup_2, np.hstack((desLinearAccL, desAngularAccL)) ))
Jsup_2 = np.vstack(( Jsup_2, jFootL[i] ))
dJsup_2 = np.vstack(( dJsup_2, dJFootL[i] ))
if contactFlagFootR[i] == 1:
desLinearAccR, desForePosR = getDesFootLinearAcc(motionModel, controlModel, indexFootR[i], ModelOffset, CM_ref, CM, Kk2, Dk2)
desAngularAccR = getDesFootAngularAcc(motionModel, controlModel, indexFootR[i], Kk2, Dk2)
a_sup_2 = np.hstack(( a_sup_2, np.hstack((desLinearAccR, desAngularAccR)) ))
Jsup_2 = np.vstack(( Jsup_2, jFootR[i] ))
dJsup_2 = np.vstack(( dJsup_2, dJFootR[i] ))
rd_DesForePosL[0] = desForePosL
rd_DesForePosR[0] = desForePosR
rd_DesRearPosL[0] = desRearPosL
rd_DesRearPosR[0] = desRearPosR
##############################
mot.setConstraint(problem, totalDOF, Jsup_2, dJsup_2, dth_flat, a_sup_2)
r = problem.solve()
problem.clear()
ype.nested(r['x'], ddth_sol)
rootPos[0] = controlModel.getBodyPositionGlobal(selectedBody)
localPos = [[0, 0, 0]]
for i in range(stepsPerFrame):
# apply penalty force
bodyIDs, contactPositions, contactPositionLocals, contactForces = vpWorld.calcPenaltyForce(bodyIDsToCheck, mus, Ks, Ds)
vpWorld.applyPenaltyForce(bodyIDs, contactPositionLocals, contactForces)
extraForce[0] = viewer.GetForce()
if (extraForce[0][0] != 0 or extraForce[0][1] != 0 or extraForce[0][2] != 0) :
forceApplyFrame += 1
#vpWorld.applyPenaltyForce(selectedBodyId, localPos, extraForce)
controlModel.applyBodyForceGlobal(selectedBody, extraForce[0])
applyedExtraForce[0] = extraForce[0]
if forceApplyFrame*wcfg.timeStep > 0.1:
viewer.ResetForce()
forceApplyFrame = 0
controlModel.setDOFAccelerations(ddth_sol)
controlModel.solveHybridDynamics()
'''
extraForce[0] = viewer.GetForce()
if (extraForce[0][0] != 0 or extraForce[0][1] != 0 or extraForce[0][2] != 0) :
forceApplyFrame += 1
vpWorld.applyPenaltyForce(selectedBodyId, localPos, extraForce)
applyedExtraForce[0] = extraForce[0]
if forceApplyFrame*wcfg.timeStep > 0.1:
viewer.ResetForce()
forceApplyFrame = 0
'''
vpWorld.step()
# rendering
rd_footCenter[0] = footCenter
rd_CM[0] = CM.copy()
rd_CM_plane[0] = CM_plane.copy()
rd_footCenter_ref[0] = footCenter_ref
rd_CM_plane_ref[0] = CM_ref.copy()
rd_CM_ref[0] = CM_ref.copy()
rd_CM_ref_vec[0] = (CM_ref - footCenter_ref)*3.
rd_CM_vec[0] = (CM - footCenter)*3
#rd_CM_plane[0][1] = 0.
if CP!=None and dCP!=None:
rd_CP[0] = CP
rd_CP_des[0] = CP_des
rd_dL_des_plane[0] = dL_des_plane
rd_dH_des[0] = dH_des
rd_grf_des[0] = totalNormalForce - totalMass*mm.s2v(wcfg.gravity)#dL_des_plane - totalMass*mm.s2v(wcfg.gravity)
rd_exf_des[0] = applyedExtraForce[0]
rd_root_des[0] = rootPos[0]
rd_CMP[0] = softConstPoint
rd_soft_const_vec[0] = controlModel.getBodyPositionGlobal(constBody)-softConstPoint
if (forceApplyFrame == 0) :
applyedExtraForce[0] = [0, 0, 0]
viewer.setSimulateCallback(simulateCallback)
viewer.startTimer(1/60.)
viewer.show()
Fl.run()
main() |
py | 1a4b14d360745395bb9240691022b554e331bb6a | # SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2021 Vít Labuda. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Class diagram:
# A A
# / \ |
# / \ |
# B C B
# \ / |
# \ / |
# D1 D2
class A:
def __init__(self):
print("A")
# In the case of the D1 class, this method gets called twice (because both B and C call A's __init__).
class B(A):
def __init__(self, x):
print("B ({})".format(x))
A.__init__(self)
class C(A):
def __init__(self, x):
print("C ({})".format(x))
A.__init__(self)
class D1(B, C):
def __init__(self, x):
print("D1 ({})".format(x))
B.__init__(self, x)
C.__init__(self, x)
class D2(B):
def __init__(self, x):
print("D2 ({})".format(x))
B.__init__(self, x)
if __name__ == '__main__':
print(D1.__mro__)
D1("x")
print()
print(D2.__mro__)
D2("x")
# Output:
# (<class '__main__.D1'>, <class '__main__.B'>, <class '__main__.C'>, <class '__main__.A'>, <class 'object'>)
# D1 (x)
# B (x)
# A
# C (x)
# A
#
# (<class '__main__.D2'>, <class '__main__.B'>, <class '__main__.A'>, <class 'object'>)
# D2 (x)
# B (x)
# A
|
py | 1a4b151f2b4099c594567aab99433c1c9358ebf2 | import unittest
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.tests.utils import (
SpatialRefSys, oracle, postgis, spatialite,
)
from django.db import connection
from django.test import skipUnlessDBFeature
from django.utils import six
test_srs = ({
'srid': 4326,
'auth_name': ('EPSG', True),
'auth_srid': 4326,
# Only the beginning, because there are differences depending on installed libs
'srtext': 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84"',
# +ellps=WGS84 has been removed in the 4326 proj string in proj-4.8
'proj4_re': r'\+proj=longlat (\+ellps=WGS84 )?(\+datum=WGS84 |\+towgs84=0,0,0,0,0,0,0 )\+no_defs ',
'spheroid': 'WGS 84', 'name': 'WGS 84',
'geographic': True, 'projected': False, 'spatialite': True,
# From proj's "cs2cs -le" and Wikipedia (semi-minor only)
'ellipsoid': (6378137.0, 6356752.3, 298.257223563),
'eprec': (1, 1, 9),
}, {
'srid': 32140,
'auth_name': ('EPSG', False),
'auth_srid': 32140,
'srtext': (
'PROJCS["NAD83 / Texas South Central",GEOGCS["NAD83",'
'DATUM["North_American_Datum_1983",SPHEROID["GRS 1980"'
),
'proj4_re': r'\+proj=lcc \+lat_1=30.28333333333333 \+lat_2=28.38333333333333 \+lat_0=27.83333333333333 '
r'\+lon_0=-99 \+x_0=600000 \+y_0=4000000 (\+ellps=GRS80 )?'
r'(\+datum=NAD83 |\+towgs84=0,0,0,0,0,0,0 )?\+units=m \+no_defs ',
'spheroid': 'GRS 1980', 'name': 'NAD83 / Texas South Central',
'geographic': False, 'projected': True, 'spatialite': False,
# From proj's "cs2cs -le" and Wikipedia (semi-minor only)
'ellipsoid': (6378137.0, 6356752.31414, 298.257222101),
'eprec': (1, 5, 10),
})
@unittest.skipUnless(HAS_GDAL, "SpatialRefSysTest needs gdal support")
@skipUnlessDBFeature("has_spatialrefsys_table")
class SpatialRefSysTest(unittest.TestCase):
def test_retrieve(self):
"""
Test retrieval of SpatialRefSys model objects.
"""
for sd in test_srs:
srs = SpatialRefSys.objects.get(srid=sd['srid'])
self.assertEqual(sd['srid'], srs.srid)
# Some of the authority names are borked on Oracle, e.g., SRID=32140.
# also, Oracle Spatial seems to add extraneous info to fields, hence the
# the testing with the 'startswith' flag.
auth_name, oracle_flag = sd['auth_name']
if postgis or (oracle and oracle_flag):
self.assertEqual(True, srs.auth_name.startswith(auth_name))
self.assertEqual(sd['auth_srid'], srs.auth_srid)
# No proj.4 and different srtext on oracle backends :(
if postgis:
self.assertTrue(srs.wkt.startswith(sd['srtext']))
six.assertRegex(self, srs.proj4text, sd['proj4_re'])
def test_osr(self):
"""
Test getting OSR objects from SpatialRefSys model objects.
"""
for sd in test_srs:
sr = SpatialRefSys.objects.get(srid=sd['srid'])
self.assertEqual(True, sr.spheroid.startswith(sd['spheroid']))
self.assertEqual(sd['geographic'], sr.geographic)
self.assertEqual(sd['projected'], sr.projected)
if not (spatialite and not sd['spatialite']):
# Can't get 'NAD83 / Texas South Central' from PROJ.4 string
# on SpatiaLite
self.assertEqual(True, sr.name.startswith(sd['name']))
# Testing the SpatialReference object directly.
if postgis or spatialite:
srs = sr.srs
six.assertRegex(self, srs.proj4, sd['proj4_re'])
# No `srtext` field in the `spatial_ref_sys` table in SpatiaLite < 4
if not spatialite or connection.ops.spatial_version[0] >= 4:
self.assertTrue(srs.wkt.startswith(sd['srtext']))
def test_ellipsoid(self):
"""
Test the ellipsoid property.
"""
for sd in test_srs:
# Getting the ellipsoid and precision parameters.
ellps1 = sd['ellipsoid']
prec = sd['eprec']
# Getting our spatial reference and its ellipsoid
srs = SpatialRefSys.objects.get(srid=sd['srid'])
ellps2 = srs.ellipsoid
for i in range(3):
self.assertAlmostEqual(ellps1[i], ellps2[i], prec[i])
@skipUnlessDBFeature('supports_add_srs_entry')
def test_add_entry(self):
"""
Test adding a new entry in the SpatialRefSys model using the
add_srs_entry utility.
"""
from django.contrib.gis.utils import add_srs_entry
add_srs_entry(3857)
self.assertTrue(
SpatialRefSys.objects.filter(srid=3857).exists()
)
srs = SpatialRefSys.objects.get(srid=3857)
self.assertTrue(
SpatialRefSys.get_spheroid(srs.wkt).startswith('SPHEROID[')
)
|
py | 1a4b16ef9b032c1470c034ef4393bbdc745bb489 | from typing import Any, Type
def subclasses_of(klass: Type[Any]):
subclasses = []
stack = [klass]
while stack:
parent = stack.pop()
for subclass in parent.__subclasses__():
if subclass not in subclasses:
stack.append(subclass)
subclasses.append(subclass)
return subclasses
|
py | 1a4b17c11d81f583aef77264eaac0e9de73fdc19 | # Copyright 2018 Google. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to preprocess images for the Inception networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import random_ops
flags.DEFINE_float(
'cb_distortion_range', 0.1, 'Cb distortion range +/-')
flags.DEFINE_float(
'cr_distortion_range', 0.1, 'Cr distortion range +/-')
flags.DEFINE_boolean(
'use_fast_color_distort', True,
'apply fast color/chroma distortion if True, else apply'
'brightness/saturation/hue/contrast distortion')
FLAGS = flags.FLAGS
def apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([
func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
for case in range(num_cases)])[0]
def distort_color(image, color_ordering=0, fast_mode=True, scope=None):
"""Distort the color of a Tensor image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: 3-D Tensor containing single image in [0, 1].
color_ordering: Python int, a type of distortion (valid values: 0-3).
fast_mode: Avoids slower ops (random_hue and random_contrast)
scope: Optional scope for name_scope.
Returns:
3-D Tensor color-distorted image on range [0, 1]
Raises:
ValueError: if color_ordering not in [0, 3]
"""
with tf.name_scope(scope, 'distort_color', [image]):
if fast_mode:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
else:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
elif color_ordering == 2:
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
elif color_ordering == 3:
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
raise ValueError('color_ordering must be in [0, 3]')
# The random_* ops do not necessarily clamp.
return tf.minimum(tf.maximum(image, 0.0), 1.0)
def distort_color_fast(image, scope=None):
"""Distort the color of a Tensor image.
Distort brightness and chroma values of input image
Args:
image: 3-D Tensor containing single image in [0, 1].
scope: Optional scope for name_scope.
Returns:
3-D Tensor color-distorted image on range [0, 1]
"""
with tf.name_scope(scope, 'distort_color', [image]):
br_delta = random_ops.random_uniform([], -32./255., 32./255., seed=None)
cb_factor = random_ops.random_uniform(
[], -FLAGS.cb_distortion_range, FLAGS.cb_distortion_range, seed=None)
cr_factor = random_ops.random_uniform(
[], -FLAGS.cr_distortion_range, FLAGS.cr_distortion_range, seed=None)
channels = tf.split(axis=2, num_or_size_splits=3, value=image)
red_offset = 1.402 * cr_factor + br_delta
green_offset = -0.344136 * cb_factor - 0.714136 * cr_factor + br_delta
blue_offset = 1.772 * cb_factor + br_delta
channels[0] += red_offset
channels[1] += green_offset
channels[2] += blue_offset
image = tf.concat(axis=2, values=channels)
image = tf.minimum(tf.maximum(image, 0.), 1.)
return image
def distorted_bounding_box_crop(image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(3./4., 4./3.),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using a one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image: 3-D Tensor of image (it will be converted to floats in [0, 1]).
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax]. If num_boxes is 0 then it would use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding box
supplied.
aspect_ratio_range: An optional list of `floats`. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `floats`. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional scope for name_scope.
Returns:
A tuple, a 3-D Tensor cropped_image and the distorted bbox
"""
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]):
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
# A large fraction of image datasets contain a human-annotated bounding
# box delineating the region of the image containing the object of interest.
# We choose to create a new bounding box for the object which is a randomly
# distorted version of the human-annotated bounding box that obeys an
# allowed range of aspect ratios, sizes and overlap with the human-annotated
# bounding box. If no box is supplied, then we assume the bounding box is
# the entire image.
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
cropped_image = tf.slice(image, bbox_begin, bbox_size)
return cropped_image, distort_bbox
def preprocess_for_train(image, height, width, bbox,
fast_mode=True,
scope=None,
add_image_summaries=True):
"""Distort one image for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Additionally it would create image_summaries to display the different
transformations applied to the image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax].
fast_mode: Optional boolean, if True avoids slower transformations (i.e.
bi-cubic resizing, random_hue or random_contrast).
scope: Optional scope for name_scope.
add_image_summaries: Enable image summaries.
Returns:
3-D float Tensor of distorted image used for training with range [-1, 1].
"""
with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]):
if bbox is None:
bbox = tf.constant([0.0, 0.0, 1.0, 1.0],
dtype=tf.float32,
shape=[1, 1, 4])
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
if add_image_summaries:
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
bbox)
tf.summary.image('image_with_bounding_boxes', image_with_box)
distorted_image, distorted_bbox = distorted_bounding_box_crop(image, bbox)
# Restore the shape since the dynamic slice based upon the bbox_size loses
# the third dimension.
distorted_image.set_shape([None, None, 3])
if add_image_summaries:
image_with_distorted_box = tf.image.draw_bounding_boxes(
tf.expand_dims(image, 0), distorted_bbox)
tf.summary.image('images_with_distorted_bounding_box',
image_with_distorted_box)
# This resizing operation may distort the images because the aspect
# ratio is not respected. We select a resize method in a round robin
# fashion based on the thread number.
# Note that ResizeMethod contains 4 enumerated resizing methods.
# We select only 1 case for fast_mode bilinear.
num_resize_cases = 1 if fast_mode else 4
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, method: tf.image.resize_images(x, [height, width], method),
num_cases=num_resize_cases)
if add_image_summaries:
tf.summary.image('cropped_resized_image',
tf.expand_dims(distorted_image, 0))
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Randomly distort the colors. There are 1 or 4 ways to do it.
if FLAGS.use_fast_color_distort:
distorted_image = distort_color_fast(distorted_image)
else:
num_distort_cases = 1 if fast_mode else 4
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, ordering: distort_color(x, ordering, fast_mode),
num_cases=num_distort_cases)
if add_image_summaries:
tf.summary.image('final_distorted_image',
tf.expand_dims(distorted_image, 0))
return distorted_image
def preprocess_for_eval(image, height, width,
central_fraction=0.875, scope=None):
"""Prepare one image for evaluation.
If height and width are specified it would output an image with that size by
applying resize_bilinear.
If central_fraction is specified it would crop the central fraction of the
input image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
central_fraction: Optional Float, fraction of the image to crop.
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of prepared image.
"""
with tf.name_scope(scope, 'eval_image', [image, height, width]):
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Crop the central region of the image with an area containing 87.5% of
# the original image.
if central_fraction:
image = tf.image.central_crop(image, central_fraction=central_fraction)
if height and width:
# Resize the image to the specified height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width],
align_corners=False)
image = tf.squeeze(image, [0])
image.set_shape([height, width, 3])
return image
def preprocess_image(image,
output_height,
output_width,
is_training=False,
scaled_images=True,
bbox=None,
fast_mode=True,
add_image_summaries=False):
"""Pre-process one image for training or evaluation.
Args:
image: 3-D Tensor [height, width, channels] with the image. If dtype is
tf.float32 then the range should be [0, 1], otherwise it would converted
to tf.float32 assuming that the range is [0, MAX], where MAX is largest
positive representable number for int(8/16/32) data type (see
`tf.image.convert_image_dtype` for details).
output_height: integer, image expected height.
output_width: integer, image expected width.
is_training: Boolean. If true it would transform an image for train,
otherwise it would transform it for evaluation.
scaled_images: Whether to scale pixel values to the range [-1, 1].
If set to false, pixel values are in the range [0, 1].
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
fast_mode: Optional boolean, if True avoids slower transformations.
add_image_summaries: Enable image summaries.
Returns:
3-D float Tensor containing an appropriately scaled image
Raises:
ValueError: if user does not provide bounding box
"""
if is_training:
image = preprocess_for_train(
image,
output_height,
output_width,
bbox,
fast_mode,
add_image_summaries=add_image_summaries)
else:
image = preprocess_for_eval(image, output_height, output_width)
if scaled_images:
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
|
py | 1a4b19a05e56869b42761fcd2ca329ea17634c3c | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.contrib.python.checks.checker.pyflakes import PyflakesChecker
from pants.contrib.python.checks.tasks.checkstyle.plugin_subsystem_base import PluginSubsystemBase
class FlakeCheckSubsystem(PluginSubsystemBase):
options_scope = 'pycheck-pyflakes'
@classmethod
def register_plugin_options(cls, register):
register('--ignore', fingerprint=True, type=list, default=[],
help='List of warning codes to ignore.')
@classmethod
def plugin_type(cls):
return PyflakesChecker
|
py | 1a4b1c6cfc234a56db2c08874e8120eb12aa50a2 | import sys
sys.path.append('../lib')
sys.path.append('../')
import gridgenerator
def test_grid():
g = gridgenerator.define_clfs_params('tiny')
skmodels, params = g
for k,v in params.items():
print(k,v)
def main(inpath, outpath, models=None, params_size="test"):
if models is not None and isinstance(models, list):
models_to_run = models
else:
models_to_run=['RF','LR','DT', 'KNN']
df = pd.read_csv(inpath)
print(df.columns)
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Missing parameters, pass inpath and outpath')
pass
elif len(sys.argv) == 5:
main(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
elif len(sys.argv) == 4:
main(sys.argv[1], sys.argv[2], sys.argv[3])
elif len(sys.argv) == 3:
main(sys.argv[1], sys.argv[2]) |
py | 1a4b1ce38a6c352941f171d9add0089dfbac48d3 | #! /usr/bin/env python
'''
This script calculates average values per sliding window.
#Example input:
CHROM POS sample1 sample2 sample3 sample4 sample5
chr1 2923 0 16 13 24 27
chr1 4696 1 3 5 13 6
chr1 6240 5 10 5 15 19
chr1 6244 5 10 5 16 20
chr1 6527 9 20 12 20 36
chr1 6544 NA 21 16 20 36
chr1 6665 5 17 12 15 32
chr1 6676 5 22 14 18 31
chr1 6677 5 22 14 18 31
chr1 8017 14 19 9 20 33
chr1 8374 12 5 16 13 24
chr1 8618 7 13 10 25 21
chr1 8986 16 19 10 34 20
chr1 9185 15 31 18 42 44
chr1 9218 15 30 21 45 45
chr1 9374 16 28 18 45 43
chr1 9378 16 27 19 43 42
chr1 9411 18 24 NA 50 42
chr1 10743 10 17 16 34 28
chr1 11105 47 36 46 66 69
chr1 11162 14 24 32 43 55
chr1 11331 45 34 82 41 87
chr1 11368 51 41 107 57 101
chr1 13956 17 15 33 38 32
chr1 14548 5 4 10 9 8
chr1 14670 22 16 51 NA 22
chr1 14686 22 35 57 63 42
chr1 19796 54 32 43 57 49
chr1 19798 54 32 45 56 48
#Example output:
CHROM POS sample1 sample2 sample3 sample4 sample5
chr1 2500 0.5 9.5 9.0 18.5 16.5
chr1 7500 12.0 20.5 14.0 20.0 32.5
chr1 12500 22.0 24.0 46.0 42.0 42.0
chr1 17500 54.0 32.0 44.0 56.5 48.5
#command:
$ python calculate_MedianPerWindow.py -i input.tab -o output.tab -w 5000
#contact:
Dmytro Kryvokhyzha [email protected]
'''
############################# modules #############################
import calls # my custom module
from numpy import median
############################# options #############################
parser = calls.CommandLineParser()
parser.add_argument(
'-i', '--input', help='name of the input file', type=str, required=True)
parser.add_argument(
'-o', '--output', help='name of the output file', type=str, required=True)
parser.add_argument(
'-w', '--window', help='sliding window size', type=int, required=True)
args = parser.parse_args()
############################# functions #############################
def meanWindow(dictList):
''' calculates median of a window'''
for k in dictList:
values = []
for val in dictList[k]:
if val != 'NA':
values.append(float(val))
if len(values) > 0:
medianValue = median(values)
dictList[k] = medianValue
else:
dictList[k] = 'NA'
return dictList
def createNewDict(NamesList):
''' creates a new empty dictionary with sample names as keys'''
newDict = {}
for k in NamesList:
newDict[k] = []
return newDict
def printWindow(inputDict, orderedNames):
''' creates print string from a dictionary with mean values'''
newList = []
for n in orderedNames:
newList.append(inputDict[n])
newListP = '\t'.join(str(el) for el in newList)
return newListP
############################# program #############################
print('Opening the file...')
windSize = args.window
windPosEnd = windSize
counter = 0
with open(args.input) as datafile:
header_line = datafile.readline()
# make output header
outputFile = open(args.output, 'w')
outputFile.write(header_line)
# make samples dict
header_words = header_line.split()
sampleNames = header_words[2:]
windowDict = createNewDict(sampleNames)
print('Processing the data ...')
ChrPrevious = ''
posS = ''
posE = ''
for line in datafile:
words = line.split()
Chr = words[0]
pos = int(words[1])
indVal = words[2:]
# to store the values of a previous line
if not ChrPrevious:
ChrPrevious = Chr
if not posS:
posS = windPosEnd - windSize
if not posE:
posE = windPosEnd
# if window size is reached output the results
if Chr != ChrPrevious: # if end of a chromosome
meanValWindow = meanWindow(windowDict)
meanValWindowP = printWindow(meanValWindow, sampleNames)
calls.processWindow(ChrPrevious, posS, posE,
meanValWindowP, outputFile)
windPosEnd = windSize
windowDict = createNewDict(sampleNames)
posS = windPosEnd - windSize
elif pos > windPosEnd: # if end of a window
meanValWindow = meanWindow(windowDict)
meanValWindowP = printWindow(meanValWindow, sampleNames)
calls.processWindow(Chr, posS, posE,
meanValWindowP, outputFile)
windPosEnd = windPosEnd + windSize
windowDict = createNewDict(sampleNames)
posS = windPosEnd - windSize
while pos > windPosEnd: # gap is larger than window size
windPosEnd = windPosEnd + windSize
ChrPrevious = Chr
posE = windPosEnd
# append values
for s in xrange(len(sampleNames)):
windowDict[sampleNames[s]].append(indVal[s])
# track progress
counter += 1
if counter % 1000000 == 0:
print str(counter), "lines processed"
# process the last window
meanValWindow = meanWindow(windowDict)
meanValWindowP = printWindow(meanValWindow, sampleNames)
calls.processWindow(Chr, posS, windPosEnd,
meanValWindowP, outputFile)
datafile.close()
outputFile.close()
print('Done!')
|
py | 1a4b1e659e8bc569aed071179cf4d8c7adbbbd3b | from cereal import car
from opendbc.can.parser import CANParser
from opendbc.can.can_define import CANDefine
from selfdrive.config import Conversions as CV
from selfdrive.car.interfaces import CarStateBase
from selfdrive.car.chrysler.values import DBC, STEER_THRESHOLD
class CarState(CarStateBase):
def __init__(self, CP):
super().__init__(CP)
can_define = CANDefine(DBC[CP.carFingerprint]['pt'])
self.shifter_values = can_define.dv["GEAR"]['PRNDL']
def update(self, cp, cp_cam):
ret = car.CarState.new_message()
self.frame = int(cp.vl["EPS_STATUS"]['COUNTER'])
ret.doorOpen = any([cp.vl["DOORS"]['DOOR_OPEN_FL'],
cp.vl["DOORS"]['DOOR_OPEN_FR'],
cp.vl["DOORS"]['DOOR_OPEN_RL'],
cp.vl["DOORS"]['DOOR_OPEN_RR']])
ret.seatbeltUnlatched = cp.vl["SEATBELT_STATUS"]['SEATBELT_DRIVER_UNLATCHED'] == 1
ret.brakePressed = cp.vl["BRAKE_2"]['BRAKE_PRESSED_2'] == 5 # human-only
ret.brake = 0
ret.brakeLights = ret.brakePressed
ret.gas = cp.vl["ACCEL_GAS_134"]['ACCEL_134']
ret.gasPressed = ret.gas > 1e-5
ret.espDisabled = (cp.vl["TRACTION_BUTTON"]['TRACTION_OFF'] == 1)
ret.wheelSpeeds.fl = cp.vl['WHEEL_SPEEDS']['WHEEL_SPEED_FL']
ret.wheelSpeeds.rr = cp.vl['WHEEL_SPEEDS']['WHEEL_SPEED_RR']
ret.wheelSpeeds.rl = cp.vl['WHEEL_SPEEDS']['WHEEL_SPEED_RL']
ret.wheelSpeeds.fr = cp.vl['WHEEL_SPEEDS']['WHEEL_SPEED_FR']
ret.vEgoRaw = (cp.vl['SPEED_1']['SPEED_LEFT'] + cp.vl['SPEED_1']['SPEED_RIGHT']) / 2.
ret.vEgo, ret.aEgo = self.update_speed_kf(ret.vEgoRaw)
ret.standstill = not ret.vEgoRaw > 0.001
ret.leftBlinker = cp.vl["STEERING_LEVERS"]['TURN_SIGNALS'] == 1
ret.rightBlinker = cp.vl["STEERING_LEVERS"]['TURN_SIGNALS'] == 2
ret.steeringAngle = cp.vl["STEERING"]['STEER_ANGLE']
ret.steeringRate = cp.vl["STEERING"]['STEERING_RATE']
ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(cp.vl['GEAR']['PRNDL'], None))
ret.cruiseState.enabled = cp.vl["ACC_2"]['ACC_STATUS_2'] == 7 # ACC is green.
ret.cruiseState.available = ret.cruiseState.enabled # FIXME: for now same as enabled
ret.cruiseState.speed = cp.vl["DASHBOARD"]['ACC_SPEED_CONFIG_KPH'] * CV.KPH_TO_MS
ret.steeringTorque = cp.vl["EPS_STATUS"]["TORQUE_DRIVER"]
ret.steeringTorqueEps = cp.vl["EPS_STATUS"]["TORQUE_MOTOR"]
ret.steeringPressed = abs(ret.steeringTorque) > STEER_THRESHOLD
steer_state = cp.vl["EPS_STATUS"]["LKAS_STATE"]
ret.steerError = steer_state == 4 or (steer_state == 0 and ret.vEgo > self.CP.minSteerSpeed)
ret.genericToggle = bool(cp.vl["STEERING_LEVERS"]['HIGH_BEAM_FLASH'])
self.lkas_counter = cp_cam.vl["LKAS_COMMAND"]['COUNTER']
self.lkas_car_model = cp_cam.vl["LKAS_HUD"]['CAR_MODEL']
self.lkas_status_ok = cp_cam.vl["LKAS_HEARTBIT"]['LKAS_STATUS_OK']
return ret
@staticmethod
def get_can_parser(CP):
signals = [
# sig_name, sig_address, default
("PRNDL", "GEAR", 0),
("DOOR_OPEN_FL", "DOORS", 0),
("DOOR_OPEN_FR", "DOORS", 0),
("DOOR_OPEN_RL", "DOORS", 0),
("DOOR_OPEN_RR", "DOORS", 0),
("BRAKE_PRESSED_2", "BRAKE_2", 0),
("ACCEL_134", "ACCEL_GAS_134", 0),
("SPEED_LEFT", "SPEED_1", 0),
("SPEED_RIGHT", "SPEED_1", 0),
("WHEEL_SPEED_FL", "WHEEL_SPEEDS", 0),
("WHEEL_SPEED_RR", "WHEEL_SPEEDS", 0),
("WHEEL_SPEED_RL", "WHEEL_SPEEDS", 0),
("WHEEL_SPEED_FR", "WHEEL_SPEEDS", 0),
("STEER_ANGLE", "STEERING", 0),
("STEERING_RATE", "STEERING", 0),
("TURN_SIGNALS", "STEERING_LEVERS", 0),
("ACC_STATUS_2", "ACC_2", 0),
("HIGH_BEAM_FLASH", "STEERING_LEVERS", 0),
("ACC_SPEED_CONFIG_KPH", "DASHBOARD", 0),
("TORQUE_DRIVER", "EPS_STATUS", 0),
("TORQUE_MOTOR", "EPS_STATUS", 0),
("LKAS_STATE", "EPS_STATUS", 1),
("COUNTER", "EPS_STATUS", -1),
("TRACTION_OFF", "TRACTION_BUTTON", 0),
("SEATBELT_DRIVER_UNLATCHED", "SEATBELT_STATUS", 0),
]
checks = [
# sig_address, frequency
("BRAKE_2", 50),
("EPS_STATUS", 100),
("SPEED_1", 100),
("WHEEL_SPEEDS", 50),
("STEERING", 100),
("ACC_2", 50),
]
return CANParser(DBC[CP.carFingerprint]['pt'], signals, checks, 0)
@staticmethod
def get_cam_can_parser(CP):
signals = [
# sig_name, sig_address, default
("COUNTER", "LKAS_COMMAND", -1),
("CAR_MODEL", "LKAS_HUD", -1),
("LKAS_STATUS_OK", "LKAS_HEARTBIT", -1)
]
checks = []
return CANParser(DBC[CP.carFingerprint]['pt'], signals, checks, 2)
|
py | 1a4b1e9736dc83c812ca0da0e250fe9b97612bcd | from random import randint
from typing import Dict
from uuid import uuid4
import pytest
from pydantic import BaseModel, ValidationError
from geojson_pydantic.features import Feature, FeatureCollection
from geojson_pydantic.geometries import Geometry, MultiPolygon, Polygon
class GenericProperties(BaseModel):
id: str
description: str
size: int
properties = {
"id": str(uuid4()),
"description": str(uuid4()),
"size": randint(0, 1000),
}
polygon = {
"type": "Polygon",
"coordinates": [
[
[13.38272, 52.46385],
[13.42786, 52.46385],
[13.42786, 52.48445],
[13.38272, 52.48445],
[13.38272, 52.46385],
]
],
}
test_feature = {
"type": "Feature",
"geometry": polygon,
"properties": properties,
}
def test_geometry_collection_iteration():
"""test if feature collection is iterable"""
gc = FeatureCollection(features=[test_feature, test_feature])
iter(gc)
def test_generic_properties_is_dict():
feature = Feature(**test_feature)
assert feature.properties["id"] == test_feature["properties"]["id"]
assert type(feature.properties) == dict
assert not hasattr(feature.properties, "id")
def test_generic_properties_is_object():
feature = Feature[Geometry, GenericProperties](**test_feature)
assert feature.properties.id == test_feature["properties"]["id"]
assert type(feature.properties) == GenericProperties
assert hasattr(feature.properties, "id")
def test_generic_geometry():
feature = Feature[Polygon, GenericProperties](**test_feature)
assert feature.properties.id == test_feature["properties"]["id"]
assert type(feature.geometry) == Polygon
assert type(feature.properties) == GenericProperties
assert hasattr(feature.properties, "id")
feature = Feature[Polygon, Dict](**test_feature)
assert type(feature.geometry) == Polygon
assert feature.properties["id"] == test_feature["properties"]["id"]
assert type(feature.properties) == dict
assert not hasattr(feature.properties, "id")
with pytest.raises(ValidationError):
Feature[MultiPolygon, Dict](**({"type": "Feature", "geometry": polygon}))
def test_generic_properties_should_raise_for_string():
with pytest.raises(ValidationError):
Feature(
**({"type": "Feature", "geometry": polygon, "properties": "should raise"})
)
def test_feature_collection_generic():
fc = FeatureCollection[Polygon, GenericProperties](
features=[test_feature, test_feature]
)
assert len(fc) == 2
assert type(fc[0].properties) == GenericProperties
assert type(fc[0].geometry) == Polygon
def test_geo_interface_protocol():
class Pointy:
__geo_interface__ = {"type": "Point", "coordinates": (0.0, 0.0)}
feat = Feature(geometry=Pointy())
assert feat.geometry.dict() == Pointy.__geo_interface__
|
py | 1a4b1f90f5e284578dd1a477868d0fb5fdefbb74 | import torch
import torch.nn as nn
import torch.nn.functional as F
def kl_loss(x, mu, logsigma, beta):
kl = -0.5 * torch.sum(1 + logsigma - mu.pow(2) - logsigma.exp())
return beta * (kl / torch.numel(x))
def vae_loss(x, mu, logsigma, recon_x, beta=1):
recon_loss = F.mse_loss(x, recon_x, reduction='mean')
kl = kl_loss(x, mu, logsigma, beta)
return recon_loss + kl
def reparameterize(mu, logsigma):
std = torch.exp(0.5*logsigma)
eps = torch.randn_like(std)
return mu + eps*std
def carracing_encoder(input_channel):
return nn.Sequential(
nn.Conv2d(input_channel, 32, 4, stride=2), nn.ReLU(),
nn.Conv2d(32, 64, 4, stride=2), nn.ReLU(),
nn.Conv2d(64, 128, 4, stride=2), nn.ReLU(),
nn.Conv2d(128, 256, 4, stride=2), nn.ReLU()
)
def carracing_decoder(flatten_size):
return nn.Sequential(
nn.ConvTranspose2d(flatten_size, 128, 5, stride=2), nn.ReLU(),
nn.ConvTranspose2d(128, 64, 5, stride=2), nn.ReLU(),
nn.ConvTranspose2d(64, 32, 6, stride=2), nn.ReLU(),
nn.ConvTranspose2d(32, 3, 6, stride=2), nn.Sigmoid()
) |
py | 1a4b1fa2a88c6d3f4f11f66beb64bdb562d95994 | from collections import namedtuple, deque
import difflib
import pygments.formatters
import pygments.lexers
import pygments.token
import re
from typing import List, Tuple, Optional, Iterator, Iterable
from literate.annot import Span, Annot, SpanMerger, \
cut_annot, merge_annot, sub_annot, fill_annot
from literate.file import File, Line, Diff, DiffBlock, Hunk, OutputLine
from literate.points import Point, cut_annot_at_points
# Regex for finding runs of identical non-space characters
RUN_RE = re.compile(r'([^ \n])\1*')
def parse_intra_annot(s: str) -> Annot[str]:
'''Parse an `ndiff` detail (`?`) line and convert it to an annotation
indicating intraline edits in the text of the preceding line. The
annotation labels inserted, deleted, and changed characters with `'ins'`,
`'del'`, and `'chg'` respectively.'''
spans = []
for m in RUN_RE.finditer(s):
c = m.group(1)
# Map the symbols used by `ndiff` to something more meaningful.
label = {
'+': 'ins',
'-': 'del',
'^': 'chg',
}[c]
spans.append(Span(m.start(), m.end(), label))
return spans
DiffLine = Tuple[bool, bool, Optional[Annot[str]], Optional[Annot[str]]]
def diff_lines(old_lines: List[str], new_lines: List[str]) -> Iterator[DiffLine]:
'''Compute a diff of `old` and `new`, and yield a sequence of (old_line,
new_line, old_detail, new_detail). Each `line` is a boolean indicating
whether there is a line present in the old/new file, and each `detail` is
an intraline edit annotation (see `parse_intra_annot`).
Possible outputs:
- (True, True, None, None): Unmodified/context line
- (True, False, None, None): Deletion of a line from the old text.
- (False, True, None, None): Insertion of a line in the new text.
- (True, True, [...], [...]): Changed line, modified via the indicated
intraline insertions and deletions.
'''
# We buffer up to two previous result tuples. This lets us handle
# intraline change markers, and in particular, the nasty '-+?' case, where
# we don't find out that we're in an intraline change ('?') until we've
# seen both the '-' and '+' lines.
buf = deque()
for dl in difflib.ndiff(old_lines, new_lines):
prefix = dl[0:2]
if prefix == ' ':
# Context line. Flush the whole buffer.
while buf:
yield buf.popleft()
yield (True, True, None, None)
elif prefix == '- ':
while buf:
yield buf.popleft()
buf.append((True, False, None, None))
elif prefix == '+ ':
# Try to fold into a previous intraline edit quad, if one exists.
if len(buf) > 0:
old_line, new_line, old_detail, new_detail = buf[-1]
if not new_line and old_detail is not None:
# Previously saw a '-' and a '?'. Fold in this '+'.
assert not new_line
buf[-1] = (old_line, True, old_detail, None)
continue
# If there's no old_detail ('?'), then we aren't in an
# intraline edit. If there's a new_line, then the intraline
# edit is already finished. In either case, we want to do the
# default action of just adding the '+' on its own.
while len(buf) > 2:
yield buf.popleft()
buf.append((False, True, None, None))
elif prefix == '? ':
detail = parse_intra_annot(dl[2:])
# Add this detail to the previous buffered line. We may also need
# to merge a pair of previous '-' and '+' lines, if we didn't
# previously know that they were part of an intraline change quad.
assert len(buf) > 0
old_line, new_line, old_detail, new_detail = buf.pop()
if new_line:
if old_line:
# The previous line is a rollup of a '-' and a '+'.
# (Context lines are not included in the buffer.)
assert old_detail is not None
buf.append((True, True, old_detail, detail))
else:
# The previous line is just a '+'. There must be a '-'
# before it, so roll up both of those together with the new
# detail.
old_line2, new_line2, old_detail2, new_detail2 = buf.pop()
assert old_line2
assert not new_line2
assert old_detail2 is None
assert new_detail2 is None
buf.append((True, True, None, detail))
else:
# The previous line is just a '-'. Roll this detail into it.
# Next we should see a '+', which will get rolled in, so this
# bogus (True, False, [...], None) entry will never be yielded.
buf.append((True, False, detail, None))
# Flush any remaining buffered entries.
while buf:
yield buf.popleft()
def adjust_closing_brace(old_lines: List[str], new_lines: List[str],
diff: Iterable[DiffLine]) -> Iterator[DiffLine]:
'''Adjust the output of `diff_lines` to turn this:
fn f() {
...
+}
+fn g() {
+ ...
}
into this:
fn f() {
...
}
+fn g() {
+ ...
+}
'''
# Specifically: at the end of every run of insertions or deletions, if the
# first context line after the run consists of solely a '}' character (with
# whitespace), then we scan from the top of the run for an identical
# inserted line. If found, we change the earlier line from an insertion to
# context, and change the context line to an insertion.
mode = None
buf = []
buf_start = None
old_i = -1
new_i = -1
for dl in diff:
old_line, new_line, old_detail, new_detail = dl
if old_line and not new_line:
new_mode = 'del'
old_i += 1
elif not old_line and new_line:
new_mode = 'ins'
new_i += 1
else:
new_mode = None
old_i += 1
new_i += 1
if new_mode != mode:
if new_mode is None:
# Switching from ins or del mode to context mode. If the
# current line is a '}', we try to do the block adjustment.
check_lines = new_lines if mode == 'ins' else old_lines
i = new_i if mode == 'ins' else old_i
if check_lines[i].strip() == '}':
# Yield everything from buf, while scanning for an earlier
# matching line.
found_dl = None
for j, buf_dl in enumerate(buf):
if check_lines[buf_start + j] == check_lines[i]:
found_dl = buf_dl
yield (True, True, None, None)
# We're stopping early, so yield the remaining
# elements.
yield from buf[j + 1:]
break
else:
yield buf_dl
if found_dl:
yield found_dl
else:
yield (True, True, None, None)
else:
yield from buf
yield dl
mode = None
buf = []
buf_start = None
# We already yielded the correct info, so don't fall through to
# the default logic.
continue
else:
if mode is not None:
yield from buf
mode = new_mode
buf = []
buf_start = new_i if mode == 'ins' else old_i
if mode is None:
yield dl
else:
buf.append(dl)
# There are no more lines, so there can't be a `}` line following `buf` to
# trigger our heuristic. That means we can blindly dump everything in
# `buf`.
yield from buf
WORD_BREAK_RE = re.compile(r'\b')
def token_annot(line: Line) -> Annot[None]:
'''Annotate the tokens of `l`. Each token (and some sub-token strings)
gets a separate span. This is a helper function for
`calc_tokenized_intra`.'''
annot = fill_annot(line.highlight, len(line.text))
# Special cases: treat word boundaries inside strings and comments as token
# breaks. This essentially gives us the behavior of `git`'s `--word-diff`
# feature.
extra_cuts = []
for span in annot:
# We don't handle String subtypes (only String itself) because we don't
# want to break up `\x00` and similar escapes.
if span.label == pygments.token.String or \
span.label in pygments.token.Comment:
text = line.text[span.start : span.end]
for m in WORD_BREAK_RE.finditer(text):
extra_cuts.append(Point(span.start + m.start()))
return cut_annot_at_points(annot, extra_cuts)
def calc_tokenized_intra(l1: Line, l2: Line) -> Tuple[Annot[str], Annot[str]]:
'''Calculate token-based intraline edit annotations for `l1` and `l2`.
`difflib.ndiff` does a pretty good job of matching up similar lines, but it
computes intraline changes character-by-character, which often produces bad
results. For example, it might turn `unsafe` into `malloc` by replacing
`uns` -> `m` and `fe` -> `lloc`, instead of doing `unsafe` -> `malloc` in
one go.
Here we calculate some intraline edits that are easier to read, using the
tokenization provided by `pygments` to align edit boundaries to the
boundaries of source tokens.'''
annot1 = token_annot(l1)
annot2 = token_annot(l2)
tokens1 = [l1.text[s.start : s.end] for s in annot1]
tokens2 = [l2.text[s.start : s.end] for s in annot2]
intra1 = []
intra2 = []
sm = difflib.SequenceMatcher(a=tokens1, b=tokens2)
for tag, i1, i2, j1, j2 in sm.get_opcodes():
if tag == 'equal':
continue
while i1 < i2 and tokens1[i1].isspace():
i1 += 1
while i2 > i1 and tokens1[i2 - 1].isspace():
i2 -= 1
while j1 < j2 and tokens2[j1].isspace():
j1 += 1
while j2 > j1 and tokens2[j2 - 1].isspace():
j2 -= 1
if i1 != i2:
intra1.append(Span(annot1[i1].start, annot1[i2 - 1].end,
'chg' if tag == 'replace' else 'del'))
if j1 != j2:
intra2.append(Span(annot2[j1].start, annot2[j2 - 1].end,
'chg' if tag == 'replace' else 'ins'))
return (intra1, intra2)
def diff_files(f1: File, f2: File) -> Diff:
'''Diff two files, returning a `Diff` between them and also setting the
`intra` annotation on the lines of both files.'''
dls = diff_lines(f1.line_text, f2.line_text)
dls = adjust_closing_brace(f1.line_text, f2.line_text, dls)
# Accumulator for diff blocks.
diff_blocks = []
# Start and current position of the current block.
old_start = 0
old_cur = 0
new_start = 0
new_cur = 0
# Is the current block a change? (If not, it's context.)
changed = True
def flush():
nonlocal old_start, new_start
# This check means we can blindly call `flush()` without worrying about
# cluttering the output with zero-length blocks.
if old_cur - old_start > 0 or new_cur - new_start > 0:
diff_blocks.append(DiffBlock(changed,
Span(old_start, old_cur),
Span(new_start, new_cur)))
old_start = old_cur
new_start = new_cur
for old_line, new_line, old_detail, new_detail in dls:
next_changed = not (old_line and new_line and
old_detail is None and new_detail is None)
has_intra = old_detail is not None or new_detail is not None
if next_changed != changed:
flush()
if has_intra:
# Emit each `intra` line as its own block, to ensure they're
# aligned in the output.
flush()
intra1, intra2 = calc_tokenized_intra(
f1.lines[old_cur], f2.lines[new_cur])
if len(intra1) > 0:
f1.lines[old_cur].set_intra(intra1)
if len(intra2) > 0:
f2.lines[new_cur].set_intra(intra2)
flush()
if old_line:
old_cur += 1
if new_line:
new_cur += 1
changed = next_changed
flush()
return Diff(f1, f2, diff_blocks)
def context_annot(blocks: List[DiffBlock], new: bool, context_lines: int) -> Annot[None]:
'''Generate an annotation of the old or new file's lines, indicating which
lines are changes or context for changes (within `context_lines`
distance).'''
result = SpanMerger()
for (changed, old_span, new_span) in blocks:
if not changed:
continue
span = new_span if new else old_span
result.add(Span(
span.start - context_lines,
span.end + context_lines))
return result.finish()
def split_hunks(blocks: List[DiffBlock]) -> List[Hunk]:
'''Split the output of `filter_unchanged` into hunks, anywhere there's a
gap in the old or new line numbers.'''
last_old = 0
last_new = 0
cur = []
hunks = []
def flush():
nonlocal cur
if len(cur) > 0:
hunks.append(Hunk(cur))
cur = []
for b in blocks:
changed, old_span, new_span = b
if old_span.start != last_old or new_span.start != last_new:
flush()
cur.append(b)
last_old = old_span.end
last_new = new_span.end
flush()
return hunks
def annotate_blocks(blocks: List[DiffBlock]) \
-> Tuple[Annot[Span[None]], Annot[Span[None]]]:
'''Return annotations on the old and new files, labeling each line with the
block that contains it.'''
old = []
new = []
for b in blocks:
old.append(Span(b.old_span.start, b.old_span.end, b))
new.append(Span(b.new_span.start, b.new_span.end, b))
return old, new
def build_diff_hunks(d: Diff, context_diff: bool=True):
'''Build a list of output hunks, and assign it to `d.hunks`.
If `d.old_file` or `d.new_file` has a `keep_mark_lines` annotation, all
annotated lines will be kept as additional context.'''
# Find the set of lines each file wants to keep.
def calc_file_keep(f, is_new):
if context_diff:
keep = context_annot(d.blocks, is_new, 5)
if f.keep_mark_lines is not None:
keep = merge_annot(keep, f.keep_mark_lines)
else:
if len(f.line_annot) > 0:
keep = [Span(0, f.line_annot[-1].end)]
else:
keep = []
if f.drop_irrelevant_lines is not None:
keep = sub_annot(keep, f.drop_irrelevant_lines)
return keep
keep_old = calc_file_keep(d.old_file, False)
keep_new = calc_file_keep(d.new_file, True)
# In unchanged blocks, add each file's keep lines to the other file's set.
# This works because unchanged blocks have the same number of lines on each
# side.
old_blocks, new_blocks = annotate_blocks(d.blocks)
extra_keep_old = []
extra_keep_new = []
for block_span, keep_spans in cut_annot(keep_old, old_blocks):
if block_span.label.changed:
continue
base = block_span.label.new_span.start
extra_keep_new.extend(s + base for s in keep_spans)
for block_span, keep_spans in cut_annot(keep_new, new_blocks):
if block_span.label.changed:
continue
base = block_span.label.old_span.start
extra_keep_old.extend(s + base for s in keep_spans)
keep_old = merge_annot(keep_old, extra_keep_old)
keep_new = merge_annot(keep_new, extra_keep_new)
# For changed blocks, we can't match up lines from different files, so we
# just hope for the best. (Normally all changed lines are kept, so there's
# no need to match - the only exception is when the `irrelevant_*_regex`
# options are set.)
# Build the filtered list of blocks. There can be different numbers of
# blocks on the old and new sides. We use a fairly naive strategy to match
# them up, but it generally seems to work okay.
blocks = []
for (old_block, old_keeps), (new_block, new_keeps) in zip(
cut_annot(keep_old, old_blocks),
cut_annot(keep_new, new_blocks)):
# `old_blocks` and `new_blocks` have corresponding entries (based on
# the same block) at corresponding positions.
assert old_block.label is new_block.label
block = old_block.label
# Match up `old_keeps` and `new_keeps` entries by position. In most
# cases, the two lists will have the same length.
for old_keep, new_keep in zip(old_keeps, new_keeps):
blocks.append(DiffBlock(block.changed,
old_keep + block.old_span.start,
new_keep + block.new_span.start))
for old_keep in old_keeps[len(new_keeps):]:
blocks.append(DiffBlock(block.changed,
old_keep + block.old_span.start,
Span(block.new_span.end, block.new_span.end)))
for new_keep in new_keeps[len(old_keeps):]:
blocks.append(DiffBlock(block.changed,
Span(block.old_span.end, block.old_span.end),
new_keep + block.new_span.start))
# Split the new blocks into hunks, and save them in the `Diff`.
hunks = split_hunks(blocks)
d.set_hunks(hunks)
def hunk_output_lines(h: Hunk) -> List[OutputLine]:
result = []
for changed, old_span, new_span in h.blocks:
common_lines = min(len(old_span), len(new_span))
for i in range(0, common_lines):
result.append(OutputLine(changed, old_span.start + i, new_span.start + i))
for i in range(common_lines, len(old_span)):
result.append(OutputLine(changed, old_span.start + i, None))
for i in range(common_lines, len(new_span)):
result.append(OutputLine(changed, None, new_span.start + i))
return result
def build_output_lines(d: Diff):
'''Build a list of two-column output lines for each hunk of `d`, and set
the `Hunk.output_lines` fields.'''
for h in d.hunks:
output_lines = hunk_output_lines(h)
h.set_output_lines(output_lines)
|
py | 1a4b1fbf8bab0282ff9c4fbb73ddb3a04c7c192c | """Validate coverage files."""
from __future__ import annotations
from pathlib import Path
from .model import Config, Integration
DONT_IGNORE = (
"config_flow.py",
"device_action.py",
"device_condition.py",
"device_trigger.py",
"group.py",
"intent.py",
"logbook.py",
"media_source.py",
"scene.py",
)
# They were violating when we introduced this check
# Need to be fixed in a future PR.
ALLOWED_IGNORE_VIOLATIONS = {
("ambient_station", "config_flow.py"),
("cast", "config_flow.py"),
("daikin", "config_flow.py"),
("doorbird", "config_flow.py"),
("doorbird", "logbook.py"),
("elkm1", "config_flow.py"),
("elkm1", "scene.py"),
("fibaro", "scene.py"),
("flume", "config_flow.py"),
("hangouts", "config_flow.py"),
("harmony", "config_flow.py"),
("hisense_aehw4a1", "config_flow.py"),
("home_connect", "config_flow.py"),
("huawei_lte", "config_flow.py"),
("ifttt", "config_flow.py"),
("ios", "config_flow.py"),
("iqvia", "config_flow.py"),
("knx", "scene.py"),
("konnected", "config_flow.py"),
("lcn", "scene.py"),
("life360", "config_flow.py"),
("lifx", "config_flow.py"),
("lutron", "scene.py"),
("mobile_app", "config_flow.py"),
("nest", "config_flow.py"),
("plaato", "config_flow.py"),
("point", "config_flow.py"),
("rachio", "config_flow.py"),
("sense", "config_flow.py"),
("sms", "config_flow.py"),
("solarlog", "config_flow.py"),
("sonos", "config_flow.py"),
("speedtestdotnet", "config_flow.py"),
("spider", "config_flow.py"),
("starline", "config_flow.py"),
("tado", "config_flow.py"),
("tahoma", "scene.py"),
("totalconnect", "config_flow.py"),
("tradfri", "config_flow.py"),
("tuya", "config_flow.py"),
("tuya", "scene.py"),
("upnp", "config_flow.py"),
("velux", "scene.py"),
("wemo", "config_flow.py"),
("wiffi", "config_flow.py"),
("wink", "scene.py"),
}
def validate(integrations: dict[str, Integration], config: Config):
"""Validate coverage."""
coverage_path = config.root / ".coveragerc"
not_found = []
checking = False
with coverage_path.open("rt") as fp:
for line in fp:
line = line.strip()
if not line or line.startswith("#"):
continue
if not checking:
if line == "omit =":
checking = True
continue
# Finished
if line == "[report]":
break
path = Path(line)
# Discard wildcard
path_exists = path
while "*" in path_exists.name:
path_exists = path_exists.parent
if not path_exists.exists():
not_found.append(line)
continue
if (
not line.startswith("homeassistant/components/")
or len(path.parts) != 4
or path.parts[-1] != "*"
):
continue
integration_path = path.parent
integration = integrations[integration_path.name]
for check in DONT_IGNORE:
if (integration_path.name, check) in ALLOWED_IGNORE_VIOLATIONS:
continue
if (integration_path / check).exists():
integration.add_error(
"coverage",
f"{check} must not be ignored by the .coveragerc file",
)
if not not_found:
return
errors = []
if not_found:
errors.append(
f".coveragerc references files that don't exist: {', '.join(not_found)}."
)
raise RuntimeError(" ".join(errors))
|
py | 1a4b1ff4651ae31c18788bf61ec897881f8e70e7 | # -*- coding: utf-8 -*-
############################ Copyrights and license ############################
# #
# Copyright 2012 Steve English <[email protected]> #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Cameron White <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# Copyright 2013 poulp <[email protected]> #
# Copyright 2014 Tomas Radej <[email protected]> #
# Copyright 2014 Vincent Jacques <[email protected]> #
# Copyright 2016 E. Dunham <[email protected]> #
# Copyright 2016 Jannis Gebauer <[email protected]> #
# Copyright 2016 Peter Buckley <[email protected]> #
# Copyright 2017 Balázs Rostás <[email protected]> #
# Copyright 2017 Jannis Gebauer <[email protected]> #
# Copyright 2017 Simon <[email protected]> #
# Copyright 2018 Wan Liuyang <[email protected]> #
# Copyright 2018 bryanhuntesl <[email protected]> #
# Copyright 2018 sfdye <[email protected]> #
# Copyright 2018 itsbruce <[email protected]> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import datetime
import github.GithubObject
import github.PaginatedList
import github.Gist
import github.Repository
import github.NamedUser
import github.Plan
import github.Organization
import github.UserKey
import github.Issue
import github.Event
import github.Authorization
import github.Notification
import github.Migration
from . import Consts
class AuthenticatedUser(github.GithubObject.CompletableGithubObject):
"""
This class represents AuthenticatedUsers as returned by https://developer.github.com/v3/users/#get-the-authenticated-user
An AuthenticatedUser object can be created by calling ``get_user()`` on a Github object.
"""
def __repr__(self):
return self.get__repr__({"login": self._login.value})
@property
def avatar_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._avatar_url)
return self._avatar_url.value
@property
def bio(self):
"""
:type: string
"""
self._completeIfNotSet(self._bio)
return self._bio.value
@property
def blog(self):
"""
:type: string
"""
self._completeIfNotSet(self._blog)
return self._blog.value
@property
def collaborators(self):
"""
:type: integer
"""
self._completeIfNotSet(self._collaborators)
return self._collaborators.value
@property
def company(self):
"""
:type: string
"""
self._completeIfNotSet(self._company)
return self._company.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def disk_usage(self):
"""
:type: integer
"""
self._completeIfNotSet(self._disk_usage)
return self._disk_usage.value
@property
def email(self):
"""
:type: string
"""
self._completeIfNotSet(self._email)
return self._email.value
@property
def events_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._events_url)
return self._events_url.value
@property
def followers(self):
"""
:type: integer
"""
self._completeIfNotSet(self._followers)
return self._followers.value
@property
def followers_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._followers_url)
return self._followers_url.value
@property
def following(self):
"""
:type: integer
"""
self._completeIfNotSet(self._following)
return self._following.value
@property
def following_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._following_url)
return self._following_url.value
@property
def gists_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._gists_url)
return self._gists_url.value
@property
def gravatar_id(self):
"""
:type: string
"""
self._completeIfNotSet(self._gravatar_id)
return self._gravatar_id.value
@property
def hireable(self):
"""
:type: bool
"""
self._completeIfNotSet(self._hireable)
return self._hireable.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def location(self):
"""
:type: string
"""
self._completeIfNotSet(self._location)
return self._location.value
@property
def login(self):
"""
:type: string
"""
self._completeIfNotSet(self._login)
return self._login.value
@property
def name(self):
"""
:type: string
"""
self._completeIfNotSet(self._name)
return self._name.value
@property
def organizations_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._organizations_url)
return self._organizations_url.value
@property
def owned_private_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._owned_private_repos)
return self._owned_private_repos.value
@property
def plan(self):
"""
:type: :class:`github.Plan.Plan`
"""
self._completeIfNotSet(self._plan)
return self._plan.value
@property
def private_gists(self):
"""
:type: integer
"""
self._completeIfNotSet(self._private_gists)
return self._private_gists.value
@property
def public_gists(self):
"""
:type: integer
"""
self._completeIfNotSet(self._public_gists)
return self._public_gists.value
@property
def public_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._public_repos)
return self._public_repos.value
@property
def received_events_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._received_events_url)
return self._received_events_url.value
@property
def repos_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._repos_url)
return self._repos_url.value
@property
def site_admin(self):
"""
:type: bool
"""
self._completeIfNotSet(self._site_admin)
return self._site_admin.value
@property
def starred_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._starred_url)
return self._starred_url.value
@property
def subscriptions_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._subscriptions_url)
return self._subscriptions_url.value
@property
def total_private_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._total_private_repos)
return self._total_private_repos.value
@property
def type(self):
"""
:type: string
"""
self._completeIfNotSet(self._type)
return self._type.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def add_to_emails(self, *emails):
"""
:calls: `POST /user/emails <http://developer.github.com/v3/users/emails>`_
:param email: string
:rtype: None
"""
assert all(isinstance(element, str) for element in emails), emails
post_parameters = emails
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/user/emails",
input=post_parameters
)
def add_to_following(self, following):
"""
:calls: `PUT /user/following/:user <http://developer.github.com/v3/users/followers>`_
:param following: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(following, github.NamedUser.NamedUser), following
headers, data = self._requester.requestJsonAndCheck(
"PUT",
"/user/following/" + following._identity
)
def add_to_starred(self, starred):
"""
:calls: `PUT /user/starred/:owner/:repo <http://developer.github.com/v3/activity/starring>`_
:param starred: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(starred, github.Repository.Repository), starred
headers, data = self._requester.requestJsonAndCheck(
"PUT",
"/user/starred/" + starred._identity
)
def add_to_subscriptions(self, subscription):
"""
:calls: `PUT /user/subscriptions/:owner/:repo <http://developer.github.com/v3/activity/watching>`_
:param subscription: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(subscription, github.Repository.Repository), subscription
headers, data = self._requester.requestJsonAndCheck(
"PUT",
"/user/subscriptions/" + subscription._identity
)
def add_to_watched(self, watched):
"""
:calls: `PUT /repos/:owner/:repo/subscription <http://developer.github.com/v3/activity/watching>`_
:param watched: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(watched, github.Repository.Repository), watched
headers, data = self._requester.requestJsonAndCheck(
"PUT",
"/repos/" + watched._identity + "/subscription",
input={"subscribed": True}
)
def create_authorization(self, scopes=github.GithubObject.NotSet, note=github.GithubObject.NotSet, note_url=github.GithubObject.NotSet, client_id=github.GithubObject.NotSet, client_secret=github.GithubObject.NotSet, onetime_password=None):
"""
:calls: `POST /authorizations <http://developer.github.com/v3/oauth>`_
:param scopes: list of string
:param note: string
:param note_url: string
:param client_id: string
:param client_secret: string
:param onetime_password: string
:rtype: :class:`github.Authorization.Authorization`
"""
assert scopes is github.GithubObject.NotSet or all(isinstance(element, str) for element in scopes), scopes
assert note is github.GithubObject.NotSet or isinstance(note, str), note
assert note_url is github.GithubObject.NotSet or isinstance(note_url, str), note_url
assert client_id is github.GithubObject.NotSet or isinstance(client_id, str), client_id
assert client_secret is github.GithubObject.NotSet or isinstance(client_secret, str), client_secret
assert onetime_password is None or isinstance(onetime_password, str), onetime_password
post_parameters = dict()
if scopes is not github.GithubObject.NotSet:
post_parameters["scopes"] = scopes
if note is not github.GithubObject.NotSet:
post_parameters["note"] = note
if note_url is not github.GithubObject.NotSet:
post_parameters["note_url"] = note_url
if client_id is not github.GithubObject.NotSet:
post_parameters["client_id"] = client_id
if client_secret is not github.GithubObject.NotSet:
post_parameters["client_secret"] = client_secret
if onetime_password is not None:
request_header = {Consts.headerOTP: onetime_password} # pragma no cover (Should be covered)
else:
request_header = None
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/authorizations",
input=post_parameters,
headers=request_header,
)
return github.Authorization.Authorization(self._requester, headers, data, completed=True)
def create_fork(self, repo):
"""
:calls: `POST /repos/:owner/:repo/forks <http://developer.github.com/v3/repos/forks>`_
:param repo: :class:`github.Repository.Repository`
:rtype: :class:`github.Repository.Repository`
"""
assert isinstance(repo, github.Repository.Repository), repo
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/repos/" + repo.owner.login + "/" + repo.name + "/forks"
)
return github.Repository.Repository(self._requester, headers, data, completed=True)
def create_gist(self, public, files, description=github.GithubObject.NotSet):
"""
:calls: `POST /gists <http://developer.github.com/v3/gists>`_
:param public: bool
:param files: dict of string to :class:`github.InputFileContent.InputFileContent`
:param description: string
:rtype: :class:`github.Gist.Gist`
"""
assert isinstance(public, bool), public
assert all(isinstance(element, github.InputFileContent) for element in files.values()), files
assert description is github.GithubObject.NotSet or isinstance(description, str), description
post_parameters = {
"public": public,
"files": dict((key, value._identity) for key, value in files.items()),
}
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/gists",
input=post_parameters
)
return github.Gist.Gist(self._requester, headers, data, completed=True)
def create_key(self, title, key):
"""
:calls: `POST /user/keys <http://developer.github.com/v3/users/keys>`_
:param title: string
:param key: string
:rtype: :class:`github.UserKey.UserKey`
"""
assert isinstance(title, str), title
assert isinstance(key, str), key
post_parameters = {
"title": title,
"key": key,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/user/keys",
input=post_parameters
)
return github.UserKey.UserKey(self._requester, headers, data, completed=True)
def create_repo(self, name, description=github.GithubObject.NotSet, homepage=github.GithubObject.NotSet,
private=github.GithubObject.NotSet, has_issues=github.GithubObject.NotSet,
has_wiki=github.GithubObject.NotSet, has_downloads=github.GithubObject.NotSet,
has_projects=github.GithubObject.NotSet, auto_init=github.GithubObject.NotSet, license_template=github.GithubObject.NotSet,
gitignore_template=github.GithubObject.NotSet, allow_squash_merge=github.GithubObject.NotSet,
allow_merge_commit=github.GithubObject.NotSet, allow_rebase_merge=github.GithubObject.NotSet):
"""
:calls: `POST /user/repos <http://developer.github.com/v3/repos>`_
:param name: string
:param description: string
:param homepage: string
:param private: bool
:param has_issues: bool
:param has_wiki: bool
:param has_downloads: bool
:param has_projects: bool
:param auto_init: bool
:param license_template: string
:param gitignore_template: string
:param allow_squash_merge: bool
:param allow_merge_commit: bool
:param allow_rebase_merge: bool
:rtype: :class:`github.Repository.Repository`
"""
assert isinstance(name, str), name
assert description is github.GithubObject.NotSet or isinstance(description, str), description
assert homepage is github.GithubObject.NotSet or isinstance(homepage, str), homepage
assert private is github.GithubObject.NotSet or isinstance(private, bool), private
assert has_issues is github.GithubObject.NotSet or isinstance(has_issues, bool), has_issues
assert has_wiki is github.GithubObject.NotSet or isinstance(has_wiki, bool), has_wiki
assert has_downloads is github.GithubObject.NotSet or isinstance(has_downloads, bool), has_downloads
assert has_projects is github.GithubObject.NotSet or isinstance(has_projects, bool), has_projects
assert auto_init is github.GithubObject.NotSet or isinstance(auto_init, bool), auto_init
assert license_template is github.GithubObject.NotSet or isinstance(license_template, str), license_template
assert gitignore_template is github.GithubObject.NotSet or isinstance(gitignore_template, str), gitignore_template
assert allow_squash_merge is github.GithubObject.NotSet or isinstance(allow_squash_merge, bool), allow_squash_merge
assert allow_merge_commit is github.GithubObject.NotSet or isinstance(allow_merge_commit, bool), allow_merge_commit
assert allow_rebase_merge is github.GithubObject.NotSet or isinstance(allow_rebase_merge, bool), allow_rebase_merge
post_parameters = {
"name": name,
}
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
if homepage is not github.GithubObject.NotSet:
post_parameters["homepage"] = homepage
if private is not github.GithubObject.NotSet:
post_parameters["private"] = private
if has_issues is not github.GithubObject.NotSet:
post_parameters["has_issues"] = has_issues
if has_wiki is not github.GithubObject.NotSet:
post_parameters["has_wiki"] = has_wiki
if has_downloads is not github.GithubObject.NotSet:
post_parameters["has_downloads"] = has_downloads
if has_projects is not github.GithubObject.NotSet:
post_parameters["has_projects"] = has_projects
if auto_init is not github.GithubObject.NotSet:
post_parameters["auto_init"] = auto_init
if license_template is not github.GithubObject.NotSet:
post_parameters["license_template"] = license_template
if gitignore_template is not github.GithubObject.NotSet:
post_parameters["gitignore_template"] = gitignore_template
if allow_squash_merge is not github.GithubObject.NotSet:
post_parameters["allow_squash_merge"] = allow_squash_merge
if allow_merge_commit is not github.GithubObject.NotSet:
post_parameters["allow_merge_commit"] = allow_merge_commit
if allow_rebase_merge is not github.GithubObject.NotSet:
post_parameters["allow_rebase_merge"] = allow_rebase_merge
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/user/repos",
input=post_parameters
)
return github.Repository.Repository(self._requester, headers, data, completed=True)
def edit(self, name=github.GithubObject.NotSet, email=github.GithubObject.NotSet, blog=github.GithubObject.NotSet, company=github.GithubObject.NotSet, location=github.GithubObject.NotSet, hireable=github.GithubObject.NotSet, bio=github.GithubObject.NotSet):
"""
:calls: `PATCH /user <http://developer.github.com/v3/users>`_
:param name: string
:param email: string
:param blog: string
:param company: string
:param location: string
:param hireable: bool
:param bio: string
:rtype: None
"""
assert name is github.GithubObject.NotSet or isinstance(name, str), name
assert email is github.GithubObject.NotSet or isinstance(email, str), email
assert blog is github.GithubObject.NotSet or isinstance(blog, str), blog
assert company is github.GithubObject.NotSet or isinstance(company, str), company
assert location is github.GithubObject.NotSet or isinstance(location, str), location
assert hireable is github.GithubObject.NotSet or isinstance(hireable, bool), hireable
assert bio is github.GithubObject.NotSet or isinstance(bio, str), bio
post_parameters = dict()
if name is not github.GithubObject.NotSet:
post_parameters["name"] = name
if email is not github.GithubObject.NotSet:
post_parameters["email"] = email
if blog is not github.GithubObject.NotSet:
post_parameters["blog"] = blog
if company is not github.GithubObject.NotSet:
post_parameters["company"] = company
if location is not github.GithubObject.NotSet:
post_parameters["location"] = location
if hireable is not github.GithubObject.NotSet:
post_parameters["hireable"] = hireable
if bio is not github.GithubObject.NotSet:
post_parameters["bio"] = bio
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
"/user",
input=post_parameters
)
self._useAttributes(data)
def get_authorization(self, id):
"""
:calls: `GET /authorizations/:id <http://developer.github.com/v3/oauth>`_
:param id: integer
:rtype: :class:`github.Authorization.Authorization`
"""
assert isinstance(id, int), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/authorizations/" + str(id)
)
return github.Authorization.Authorization(self._requester, headers, data, completed=True)
def get_authorizations(self):
"""
:calls: `GET /authorizations <http://developer.github.com/v3/oauth>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Authorization.Authorization`
"""
return github.PaginatedList.PaginatedList(
github.Authorization.Authorization,
self._requester,
"/authorizations",
None
)
def get_emails(self):
"""
:calls: `GET /user/emails <http://developer.github.com/v3/users/emails>`_
:rtype: list of string
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/user/emails"
)
return data
def get_events(self):
"""
:calls: `GET /events <http://developer.github.com/v3/activity/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
"""
return github.PaginatedList.PaginatedList(
github.Event.Event,
self._requester,
"/events",
None
)
def get_followers(self):
"""
:calls: `GET /user/followers <http://developer.github.com/v3/users/followers>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
"/user/followers",
None
)
def get_following(self):
"""
:calls: `GET /user/following <http://developer.github.com/v3/users/followers>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
"/user/following",
None
)
def get_gists(self, since=github.GithubObject.NotSet):
"""
:calls: `GET /gists <http://developer.github.com/v3/gists>`_
:param since: datetime.datetime format YYYY-MM-DDTHH:MM:SSZ
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Gist.Gist`
"""
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
url_parameters = dict()
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
return github.PaginatedList.PaginatedList(
github.Gist.Gist,
self._requester,
"/gists",
url_parameters
)
def get_issues(self, filter=github.GithubObject.NotSet, state=github.GithubObject.NotSet, labels=github.GithubObject.NotSet, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, since=github.GithubObject.NotSet):
"""
:calls: `GET /issues <http://developer.github.com/v3/issues>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Issue.Issue`
:param filter: string
:param state: string
:param labels: list of :class:`github.Label.Label`
:param sort: string
:param direction: string
:param since: datetime.datetime
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Issue.Issue`
"""
assert filter is github.GithubObject.NotSet or isinstance(filter, str), filter
assert state is github.GithubObject.NotSet or isinstance(state, str), state
assert labels is github.GithubObject.NotSet or all(isinstance(element, github.Label.Label) for element in labels), labels
assert sort is github.GithubObject.NotSet or isinstance(sort, str), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, str), direction
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
url_parameters = dict()
if filter is not github.GithubObject.NotSet:
url_parameters["filter"] = filter
if state is not github.GithubObject.NotSet:
url_parameters["state"] = state
if labels is not github.GithubObject.NotSet:
url_parameters["labels"] = ",".join(label.name for label in labels)
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
return github.PaginatedList.PaginatedList(
github.Issue.Issue,
self._requester,
"/issues",
url_parameters
)
def get_user_issues(self, filter=github.GithubObject.NotSet, state=github.GithubObject.NotSet, labels=github.GithubObject.NotSet, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, since=github.GithubObject.NotSet):
"""
:calls: `GET /user/issues <http://developer.github.com/v3/issues>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Issue.Issue`
:param filter: string
:param state: string
:param labels: list of :class:`github.Label.Label`
:param sort: string
:param direction: string
:param since: datetime.datetime
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Issue.Issue`
"""
assert filter is github.GithubObject.NotSet or isinstance(filter, str), filter
assert state is github.GithubObject.NotSet or isinstance(state, str), state
assert labels is github.GithubObject.NotSet or all(isinstance(element, github.Label.Label) for element in labels), labels
assert sort is github.GithubObject.NotSet or isinstance(sort, str), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, str), direction
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
url_parameters = dict()
if filter is not github.GithubObject.NotSet:
url_parameters["filter"] = filter
if state is not github.GithubObject.NotSet:
url_parameters["state"] = state
if labels is not github.GithubObject.NotSet:
url_parameters["labels"] = ",".join(label.name for label in labels)
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
return github.PaginatedList.PaginatedList(
github.Issue.Issue,
self._requester,
"/issues",
url_parameters
)
def get_key(self, id):
"""
:calls: `GET /user/keys/:id <http://developer.github.com/v3/users/keys>`_
:param id: integer
:rtype: :class:`github.UserKey.UserKey`
"""
assert isinstance(id, int), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/user/keys/" + str(id)
)
return github.UserKey.UserKey(self._requester, headers, data, completed=True)
def get_keys(self):
"""
:calls: `GET /user/keys <http://developer.github.com/v3/users/keys>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.UserKey.UserKey`
"""
return github.PaginatedList.PaginatedList(
github.UserKey.UserKey,
self._requester,
"/user/keys",
None
)
def get_notification(self, id):
"""
:calls: `GET /notifications/threads/:id <http://developer.github.com/v3/activity/notifications>`_
:rtype: :class:`github.Notification.Notification`
"""
assert isinstance(id, str), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/notifications/threads/" + id
)
return github.Notification.Notification(self._requester, headers, data, completed=True)
def get_notifications(self, all=github.GithubObject.NotSet, participating=github.GithubObject.NotSet):
"""
:calls: `GET /notifications <http://developer.github.com/v3/activity/notifications>`_
:param all: bool
:param participating: bool
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Notification.Notification`
"""
assert all is github.GithubObject.NotSet or isinstance(all, bool), all
assert participating is github.GithubObject.NotSet or isinstance(participating, bool), participating
params = dict()
if all is not github.GithubObject.NotSet:
params["all"] = all
if participating is not github.GithubObject.NotSet:
params["participating"] = participating
# TODO: implement parameter "since"
return github.PaginatedList.PaginatedList(
github.Notification.Notification,
self._requester,
"/notifications",
params
)
def get_organization_events(self, org):
"""
:calls: `GET /users/:user/events/orgs/:org <http://developer.github.com/v3/activity/events>`_
:param org: :class:`github.Organization.Organization`
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
"""
assert isinstance(org, github.Organization.Organization), org
return github.PaginatedList.PaginatedList(
github.Event.Event,
self._requester,
"/users/" + self.login + "/events/orgs/" + org.login,
None
)
def get_orgs(self):
"""
:calls: `GET /user/orgs <http://developer.github.com/v3/orgs>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Organization.Organization`
"""
return github.PaginatedList.PaginatedList(
github.Organization.Organization,
self._requester,
"/user/orgs",
None
)
def get_repo(self, name):
"""
:calls: `GET /repos/:owner/:repo <http://developer.github.com/v3/repos>`_
:param name: string
:rtype: :class:`github.Repository.Repository`
"""
assert isinstance(name, str), name
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/repos/" + self.login + "/" + name
)
return github.Repository.Repository(self._requester, headers, data, completed=True)
def get_repos(self, visibility=github.GithubObject.NotSet, affiliation=github.GithubObject.NotSet, type=github.GithubObject.NotSet, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet):
"""
:calls: `GET /user/repos <http://developer.github.com/v3/repos>`
:param visibility: string
:param affiliation: string
:param type: string
:param sort: string
:param direction: string
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
assert visibility is github.GithubObject.NotSet or isinstance(visibility, str), visibility
assert affiliation is github.GithubObject.NotSet or isinstance(affiliation, str), affiliation
assert type is github.GithubObject.NotSet or isinstance(type, str), type
assert sort is github.GithubObject.NotSet or isinstance(sort, str), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, str), direction
url_parameters = dict()
if visibility is not github.GithubObject.NotSet:
url_parameters["visibility"] = visibility
if affiliation is not github.GithubObject.NotSet:
url_parameters["affiliation"] = affiliation
if type is not github.GithubObject.NotSet:
url_parameters["type"] = type
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
"/user/repos",
url_parameters
)
def get_starred(self):
"""
:calls: `GET /user/starred <http://developer.github.com/v3/activity/starring>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
"/user/starred",
None
)
def get_starred_gists(self):
"""
:calls: `GET /gists/starred <http://developer.github.com/v3/gists>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Gist.Gist`
"""
return github.PaginatedList.PaginatedList(
github.Gist.Gist,
self._requester,
"/gists/starred",
None
)
def get_subscriptions(self):
"""
:calls: `GET /user/subscriptions <http://developer.github.com/v3/activity/watching>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
"/user/subscriptions",
None
)
def get_teams(self):
"""
:calls: `GET /user/teams <http://developer.github.com/v3/orgs/teams>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Team.Team`
"""
return github.PaginatedList.PaginatedList(
github.Team.Team,
self._requester,
"/user/teams",
None
)
def get_watched(self):
"""
:calls: `GET /user/subscriptions <http://developer.github.com/v3/activity/watching>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
"/user/subscriptions",
None
)
def has_in_following(self, following):
"""
:calls: `GET /user/following/:user <http://developer.github.com/v3/users/followers>`_
:param following: :class:`github.NamedUser.NamedUser`
:rtype: bool
"""
assert isinstance(following, github.NamedUser.NamedUser), following
status, headers, data = self._requester.requestJson(
"GET",
"/user/following/" + following._identity
)
return status == 204
def has_in_starred(self, starred):
"""
:calls: `GET /user/starred/:owner/:repo <http://developer.github.com/v3/activity/starring>`_
:param starred: :class:`github.Repository.Repository`
:rtype: bool
"""
assert isinstance(starred, github.Repository.Repository), starred
status, headers, data = self._requester.requestJson(
"GET",
"/user/starred/" + starred._identity
)
return status == 204
def has_in_subscriptions(self, subscription):
"""
:calls: `GET /user/subscriptions/:owner/:repo <http://developer.github.com/v3/activity/watching>`_
:param subscription: :class:`github.Repository.Repository`
:rtype: bool
"""
assert isinstance(subscription, github.Repository.Repository), subscription
status, headers, data = self._requester.requestJson(
"GET",
"/user/subscriptions/" + subscription._identity
)
return status == 204
def has_in_watched(self, watched):
"""
:calls: `GET /repos/:owner/:repo/subscription <http://developer.github.com/v3/activity/watching>`_
:param watched: :class:`github.Repository.Repository`
:rtype: bool
"""
assert isinstance(watched, github.Repository.Repository), watched
status, headers, data = self._requester.requestJson(
"GET",
"/repos/" + watched._identity + "/subscription"
)
return status == 200
def mark_notifications_as_read(self, last_read_at=datetime.datetime.utcnow()):
"""
:calls: `PUT /notifications <https://developer.github.com/v3/activity/notifications>`_
:param last_read_at: datetime
"""
assert isinstance(last_read_at, datetime.datetime)
put_parameters = {
"last_read_at": last_read_at.strftime('%Y-%m-%dT%H:%M:%SZ')
}
headers, data = self._requester.requestJsonAndCheck(
"PUT",
"/notifications",
input=put_parameters
)
def remove_from_emails(self, *emails):
"""
:calls: `DELETE /user/emails <http://developer.github.com/v3/users/emails>`_
:param email: string
:rtype: None
"""
assert all(isinstance(element, str) for element in emails), emails
post_parameters = emails
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
"/user/emails",
input=post_parameters
)
def remove_from_following(self, following):
"""
:calls: `DELETE /user/following/:user <http://developer.github.com/v3/users/followers>`_
:param following: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(following, github.NamedUser.NamedUser), following
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
"/user/following/" + following._identity
)
def remove_from_starred(self, starred):
"""
:calls: `DELETE /user/starred/:owner/:repo <http://developer.github.com/v3/activity/starring>`_
:param starred: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(starred, github.Repository.Repository), starred
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
"/user/starred/" + starred._identity
)
def remove_from_subscriptions(self, subscription):
"""
:calls: `DELETE /user/subscriptions/:owner/:repo <http://developer.github.com/v3/activity/watching>`_
:param subscription: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(subscription, github.Repository.Repository), subscription
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
"/user/subscriptions/" + subscription._identity
)
def remove_from_watched(self, watched):
"""
:calls: `DELETE /repos/:owner/:repo/subscription <http://developer.github.com/v3/activity/watching>`_
:param watched: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(watched, github.Repository.Repository), watched
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
"/repos/" + watched._identity + "/subscription"
)
def accept_invitation(self, invitation):
"""
:calls: `PATCH /user/repository_invitations/:invitation_id <https://developer.github.com/v3/repos/invitations/>`
:param invitation: :class:`github.Invitation.Invitation` or int
:rtype: None
"""
assert isinstance(invitation, github.Invitation.Invitation) or isinstance(invitation, int)
if isinstance(invitation, github.Invitation.Invitation):
invitation = invitation.id
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
"/user/repository_invitations/" + str(invitation),
input={}
)
def create_migration(self, repos, lock_repositories=github.GithubObject.NotSet, exclude_attachments=github.GithubObject.NotSet):
"""
:calls: `POST /user/migrations`_
:param repos: list or tuple of str
:param lock_repositories: bool
:param exclude_attachments: bool
:rtype: :class:`github.Migration.Migration`
"""
assert isinstance(repos, (list, tuple)), repos
assert all(isinstance(repo, str) for repo in repos), repos
assert lock_repositories is github.GithubObject.NotSet or isinstance(lock_repositories, bool), lock_repositories
assert exclude_attachments is github.GithubObject.NotSet or isinstance(exclude_attachments, bool), exclude_attachments
post_parameters = {
"repositories": repos
}
if lock_repositories is not github.GithubObject.NotSet:
post_parameters["lock_repositories"] = lock_repositories
if exclude_attachments is not github.GithubObject.NotSet:
post_parameters["exclude_attachments"] = exclude_attachments
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/user/migrations",
input=post_parameters,
headers={
"Accept": Consts.mediaTypeMigrationPreview
}
)
return github.Migration.Migration(self._requester, headers, data, completed=True)
def get_migrations(self):
"""
:calls: `GET /user/migrations`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Migration.Migration`
"""
return github.PaginatedList.PaginatedList(
github.Migration.Migration,
self._requester,
"/user/migrations",
None,
headers={
"Accept": Consts.mediaTypeMigrationPreview
}
)
def _initAttributes(self):
self._avatar_url = github.GithubObject.NotSet
self._bio = github.GithubObject.NotSet
self._blog = github.GithubObject.NotSet
self._collaborators = github.GithubObject.NotSet
self._company = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._disk_usage = github.GithubObject.NotSet
self._email = github.GithubObject.NotSet
self._events_url = github.GithubObject.NotSet
self._followers = github.GithubObject.NotSet
self._followers_url = github.GithubObject.NotSet
self._following = github.GithubObject.NotSet
self._following_url = github.GithubObject.NotSet
self._gists_url = github.GithubObject.NotSet
self._gravatar_id = github.GithubObject.NotSet
self._hireable = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._location = github.GithubObject.NotSet
self._login = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._organizations_url = github.GithubObject.NotSet
self._owned_private_repos = github.GithubObject.NotSet
self._plan = github.GithubObject.NotSet
self._private_gists = github.GithubObject.NotSet
self._public_gists = github.GithubObject.NotSet
self._public_repos = github.GithubObject.NotSet
self._received_events_url = github.GithubObject.NotSet
self._repos_url = github.GithubObject.NotSet
self._site_admin = github.GithubObject.NotSet
self._starred_url = github.GithubObject.NotSet
self._subscriptions_url = github.GithubObject.NotSet
self._total_private_repos = github.GithubObject.NotSet
self._type = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "avatar_url" in attributes: # pragma no branch
self._avatar_url = self._makeStringAttribute(attributes["avatar_url"])
if "bio" in attributes: # pragma no branch
self._bio = self._makeStringAttribute(attributes["bio"])
if "blog" in attributes: # pragma no branch
self._blog = self._makeStringAttribute(attributes["blog"])
if "collaborators" in attributes: # pragma no branch
self._collaborators = self._makeIntAttribute(attributes["collaborators"])
if "company" in attributes: # pragma no branch
self._company = self._makeStringAttribute(attributes["company"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "disk_usage" in attributes: # pragma no branch
self._disk_usage = self._makeIntAttribute(attributes["disk_usage"])
if "email" in attributes: # pragma no branch
self._email = self._makeStringAttribute(attributes["email"])
if "events_url" in attributes: # pragma no branch
self._events_url = self._makeStringAttribute(attributes["events_url"])
if "followers" in attributes: # pragma no branch
self._followers = self._makeIntAttribute(attributes["followers"])
if "followers_url" in attributes: # pragma no branch
self._followers_url = self._makeStringAttribute(attributes["followers_url"])
if "following" in attributes: # pragma no branch
self._following = self._makeIntAttribute(attributes["following"])
if "following_url" in attributes: # pragma no branch
self._following_url = self._makeStringAttribute(attributes["following_url"])
if "gists_url" in attributes: # pragma no branch
self._gists_url = self._makeStringAttribute(attributes["gists_url"])
if "gravatar_id" in attributes: # pragma no branch
self._gravatar_id = self._makeStringAttribute(attributes["gravatar_id"])
if "hireable" in attributes: # pragma no branch
self._hireable = self._makeBoolAttribute(attributes["hireable"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "location" in attributes: # pragma no branch
self._location = self._makeStringAttribute(attributes["location"])
if "login" in attributes: # pragma no branch
self._login = self._makeStringAttribute(attributes["login"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "organizations_url" in attributes: # pragma no branch
self._organizations_url = self._makeStringAttribute(attributes["organizations_url"])
if "owned_private_repos" in attributes: # pragma no branch
self._owned_private_repos = self._makeIntAttribute(attributes["owned_private_repos"])
if "plan" in attributes: # pragma no branch
self._plan = self._makeClassAttribute(github.Plan.Plan, attributes["plan"])
if "private_gists" in attributes: # pragma no branch
self._private_gists = self._makeIntAttribute(attributes["private_gists"])
if "public_gists" in attributes: # pragma no branch
self._public_gists = self._makeIntAttribute(attributes["public_gists"])
if "public_repos" in attributes: # pragma no branch
self._public_repos = self._makeIntAttribute(attributes["public_repos"])
if "received_events_url" in attributes: # pragma no branch
self._received_events_url = self._makeStringAttribute(attributes["received_events_url"])
if "repos_url" in attributes: # pragma no branch
self._repos_url = self._makeStringAttribute(attributes["repos_url"])
if "site_admin" in attributes: # pragma no branch
self._site_admin = self._makeBoolAttribute(attributes["site_admin"])
if "starred_url" in attributes: # pragma no branch
self._starred_url = self._makeStringAttribute(attributes["starred_url"])
if "subscriptions_url" in attributes: # pragma no branch
self._subscriptions_url = self._makeStringAttribute(attributes["subscriptions_url"])
if "total_private_repos" in attributes: # pragma no branch
self._total_private_repos = self._makeIntAttribute(attributes["total_private_repos"])
if "type" in attributes: # pragma no branch
self._type = self._makeStringAttribute(attributes["type"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
|
py | 1a4b204cd4c0936c02ef671851f86c1376ffb0f5 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
layers = tf.keras.layers
def resnetv1_bottleneck(bottom, filters, strides, kernel_size=3, conv_shortcut=False, name=None, conv_trainable=True, bn_trainable=True):
if conv_shortcut == True:
shortcut = layers.Conv2D(4*filters, 1, strides, 'same', name=name+'_0_conv', trainable=conv_trainable)(bottom)
shortcut = layers.BatchNormalization(3, epsilon=1.001e-5, name=name+'_0_bn', trainable=bn_trainable)(shortcut)
else:
shortcut = bottom
conv = layers.Conv2D(filters, 1, strides, 'same', name=name+'_1_conv', trainable=conv_trainable)(bottom)
conv = layers.BatchNormalization(3, epsilon=1.001e-5, name=name+'_1_bn', trainable=bn_trainable)(conv)
conv = layers.Activation('relu', name=name+'_1_relu')(conv)
conv = layers.Conv2D(filters, kernel_size, 1, 'same', name=name+'_2_conv', trainable=conv_trainable)(conv)
conv = layers.BatchNormalization(3, epsilon=1.001e-5, name=name+'_2_bn', trainable=bn_trainable)(conv)
conv = layers.Activation('relu', name=name+'_2_relu')(conv)
conv = layers.Conv2D(4*filters, 1, 1, 'same', name=name+'_3_conv', trainable=conv_trainable)(conv)
conv = layers.BatchNormalization(3, epsilon=1.001e-5, name=name+'_3_bn', trainable=bn_trainable)(conv)
add = layers.Add(name=name+'_add')([shortcut, conv])
relu = layers.Activation('relu', name=name+'_out')(add)
return relu
def stack_resnetv1_bottleneck(bottom, filters, num_blocks, strides, kernel_size=3, name=None, conv_trainable=True, bn_trainable=True):
block = resnetv1_bottleneck(bottom, filters, strides, kernel_size, conv_shortcut=True, name=name+'_block1', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
for i in range(2, num_blocks+1):
block = resnetv1_bottleneck(block, filters, 1, kernel_size, name=name+'_block'+str(i), conv_trainable=conv_trainable, bn_trainable=bn_trainable)
return block
def resnetv2_bottleneck(bottom, filters, strides, kernel_size=3, conv_shortcut=False, name=None, conv_trainable=True, bn_trainable=True):
preact = layers.BatchNormalization(3, epsilon=1.001e-5, name=name+'_preact_bn', trainable=bn_trainable)(bottom)
preact = layers.Activation('relu', name=name+'_preact_relu')(preact)
if conv_shortcut is True:
shortcut = layers.Conv2D(4*filters, 1, strides, 'same', name=name+'_0_conv', trainable=conv_trainable)(preact)
else:
shortcut = bottom
conv = layers.Conv2D(filters, 1, strides, 'same', use_bias=False, name=name+'_1_conv', trainable=conv_trainable)(preact)
conv = layers.BatchNormalization(3, epsilon=1.001e-5, name=name+'_1_bn', trainable=bn_trainable)(conv)
conv = layers.Activation('relu', name=name+'_1_relu')(conv)
conv = layers.Conv2D(filters, kernel_size, 1, 'same', use_bias=False, name=name+'_2_conv', trainable=conv_trainable)(conv)
conv = layers.BatchNormalization(3, epsilon=1.001e-5, name=name+'_2_bn', trainable=bn_trainable)(conv)
conv = layers.Activation('relu', name=name+'_2_relu')(conv)
conv = layers.Conv2D(4*filters, 1, 1, 'same', name=name+'_3_conv', trainable=conv_trainable)(conv)
conv = layers.Add(name=name+'_out')([shortcut, conv])
return conv
def stack_resnetv2_bottleneck(bottom, filters, num_blocks, stride, name=None, conv_trainable=True, bn_trainable=True):
block = resnetv2_bottleneck(bottom, filters, stride, conv_shortcut=True, name=name+'_block1', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
for i in range(2, num_blocks+1):
block = resnetv2_bottleneck(block, filters, 1, name=name+'_block'+str(i), conv_trainable=conv_trainable, bn_trainable=bn_trainable)
return block
def resnext_bottlebeck(bottom, filters, strides, kernel_size=3, groups=32, conv_shortcut=False, name=None, conv_trainable=True, bn_trainable=True):
assert filters % groups == 0
if conv_shortcut:
shortcut = layers.Conv2D((64//groups)*filters, 1, strides, 'same', use_bias=False, name=name+'_0_conv', trainable=conv_trainable)(bottom)
shortcut = layers.BatchNormalization(3, epsilon=1.001e-5, name=name+'_0_bn', trainable=bn_trainable)(shortcut)
else:
shortcut = bottom
conv = layers.Conv2D(filters, 1, strides, 'same', use_bias=False, name=name+'_1_conv', trainable=conv_trainable)(bottom)
conv = layers.BatchNormalization(3, epsilon=1.001e-5, name=name+'_1_bn', trainable=bn_trainable)(conv)
conv = layers.Activation('relu', name=name+'_1_relu')(conv)
c = filters // groups
dwconv = layers.DepthwiseConv2D(kernel_size, 1, 'same', depth_multiplier=c, use_bias=False, name=name+'_2_conv', trainable=conv_trainable)(conv)
dwconv_shape = tf.shape(dwconv)
dwconv = tf.reshape(dwconv, [dwconv_shape[0], dwconv_shape[1], dwconv_shape[2], c, filters])
dwconv = tf.reshape(dwconv, [dwconv_shape[0], dwconv_shape[1], dwconv_shape[2], c, groups, c])
dwconv = tf.reduce_sum(dwconv, axis=-1)
dwconv = tf.reshape(dwconv, [dwconv_shape[0], dwconv_shape[1], dwconv_shape[2], filters])
conv = layers.BatchNormalization(3, epsilon=1.001e-5, name=name+'_2_bn', trainable=bn_trainable)(dwconv)
conv = layers.Activation('relu', name=name+'_2_relu')(conv)
conv = layers.Conv2D((64//groups)*filters, 1, 1, 'same', use_bias=False, name=name+'_3_conv', trainable=conv_trainable)(conv)
conv = layers.BatchNormalization(3, epsilon=1.001e-5, name=name+'_3_bn', trainable=bn_trainable)(conv)
add = layers.Add(name=name+'_add')([shortcut, conv])
relu = layers.Activation('relu', name=name+'_out')(add)
return relu
def stack_resnext_bottleneck(bottom, filters, num_blocks, stride, kernel_size=3, groups=32, name=None, conv_trainable=True, bn_trainable=True):
block = resnext_bottlebeck(bottom, filters, stride, kernel_size, groups, conv_shortcut=True, name=name+'_block1', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
for i in range(2, num_blocks+1):
block = resnext_bottlebeck(block, filters, 1, kernel_size, groups, name=name+'_block'+str(i), conv_trainable=conv_trainable, bn_trainable=bn_trainable)
return block
def resnetv1_50(input,conv_trainable=True, bn_trainable=True, weight=None):
"""
:param input: tensor of 'nhwc'
:param conv_trainable: whether the conv layers in net could trainable
:param bn_trainable: whether the bn layers in net could trainable
:param weight: if not None, the weight will load in net
:return: [features maps stride 8, stride 16, stride 32]
"""
endpoints = []
conv = layers.Conv2D(64, 7, 2, 'same', name='conv1_conv', trainable=conv_trainable)(input)
conv = layers.BatchNormalization(3, epsilon=1.001e-5, name='conv1_bn', trainable=bn_trainable)(conv)
conv = layers.Activation('relu', name='conv1_relu')(conv)
conv = layers.MaxPool2D(3, 2, 'same', name='pool1_pool')(conv)
endpoints.append(conv)
conv = stack_resnetv1_bottleneck(conv, 64, 3, 1, name='conv2', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
conv = stack_resnetv1_bottleneck(conv, 128, 4, 2, name='conv3', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
endpoints.append(conv)
conv = stack_resnetv1_bottleneck(conv, 256, 6, 2, name='conv4', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
endpoints.append(conv)
conv = stack_resnetv1_bottleneck(conv, 512, 3, 2, name='conv5', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
endpoints.append(conv)
model = tf.keras.Model(inputs=input, outputs=endpoints, name='resnetv1_50')
if weight is not None:
model.load_weights(weight)
return model
def resnetv1_101(input,conv_trainable=True, bn_trainable=True, weight=None):
"""
:param input: tensor of 'nhwc'
:param conv_trainable: whether the conv layers in net could trainable
:param bn_trainable: whether the bn layers in net could trainable
:param weight: if not None, the weight will load in net
:return: [features maps stride 8, stride 16, stride 32]
"""
endpoints = []
conv = layers.Conv2D(64, 7, 2, 'same', name='conv1_conv', trainable=conv_trainable)(input)
conv = layers.BatchNormalization(3, epsilon=1.001e-5, name='conv1_bn', trainable=bn_trainable)(conv)
conv = layers.Activation('relu', name='conv1_relu')(conv)
conv = layers.MaxPool2D(3, 2, 'same', name='pool1_pool')(conv)
endpoints.append(conv)
conv = stack_resnetv1_bottleneck(conv, 64, 3, 1, name='conv2', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
conv = stack_resnetv1_bottleneck(conv, 128, 4, 2, name='conv3', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
endpoints.append(conv)
conv = stack_resnetv1_bottleneck(conv, 256, 23, 2, name='conv4', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
endpoints.append(conv)
conv = stack_resnetv1_bottleneck(conv, 512, 3, 2, name='conv5', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
endpoints.append(conv)
model = tf.keras.Model(inputs=input, outputs=endpoints, name='resnetv1_101')
if weight is not None:
model.load_weights(weight)
return model
def resnetv1_152(input,conv_trainable=True, bn_trainable=True, weight=None):
"""
:param input: tensor of 'nhwc'
:param conv_trainable: whether the conv layers in net could trainable
:param bn_trainable: whether the bn layers in net could trainable
:param weight: if not None, the weight will load in net
:return: [features maps stride 8, stride 16, stride 32]
"""
endpoints = []
conv = layers.Conv2D(64, 7, 2, 'same', name='conv1_conv', trainable=conv_trainable)(input)
conv = layers.BatchNormalization(3, epsilon=1.001e-5, name='conv1_bn', trainable=bn_trainable)(conv)
conv = layers.Activation('relu', name='conv1_relu')(conv)
conv = layers.MaxPool2D(3, 2, 'same', name='pool1_pool')(conv)
endpoints.append(conv)
conv = stack_resnetv1_bottleneck(conv, 64, 3, 1, name='conv2', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
conv = stack_resnetv1_bottleneck(conv, 128, 8, 2, name='conv3', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
endpoints.append(conv)
conv = stack_resnetv1_bottleneck(conv, 256, 36, 2, name='conv4', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
endpoints.append(conv)
conv = stack_resnetv1_bottleneck(conv, 512, 3, 2, name='conv5', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
endpoints.append(conv)
model = tf.keras.Model(inputs=input, outputs=endpoints, name='resnetv1_152')
if weight is not None:
model.load_weights(weight)
return model
def resnetv2_50(input,conv_trainable=True, bn_trainable=True, weight=None):
"""
:param input: tensor of 'nhwc'
:param conv_trainable: whether the conv layers in net could trainable
:param bn_trainable: whether the bn layers in net could trainable
:param weight: if not None, the weight will load in net
:return: [features maps stride 8, stride 16, stride 32]
"""
endpoints = []
conv = layers.Conv2D(64, 7, 2, 'same', name='conv1_conv', trainable=conv_trainable)(input)
conv = layers.MaxPool2D(3, 2, 'same', name='pool1_pool')(conv)
endpoints.append(conv)
conv = stack_resnetv2_bottleneck(conv, 64, 3, 1, name='conv2', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
conv = stack_resnetv2_bottleneck(conv, 128, 4, 2, name='conv3', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
endpoints.append(conv)
conv = stack_resnetv2_bottleneck(conv, 256, 6, 2, name='conv4', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
endpoints.append(conv)
conv = stack_resnetv2_bottleneck(conv, 512, 3, 2, name='conv5', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
conv = layers.BatchNormalization(3, epsilon=1.001e-5, name='post_bn', trainable=bn_trainable)(conv)
conv = layers.Activation('relu', name='post_relu')(conv)
endpoints.append(conv)
model = tf.keras.Model(inputs=input, outputs=endpoints, name='resnetv2_50')
if weight is not None:
model.load_weights(weight)
return model
def resnetv2_101(input,conv_trainable=True, bn_trainable=True, weight=None):
"""
:param input: tensor of 'nhwc'
:param conv_trainable: whether the conv layers in net could trainable
:param bn_trainable: whether the bn layers in net could trainable
:param weight: if not None, the weight will load in net
:return: [features maps stride 8, stride 16, stride 32]
"""
endpoints = []
conv = layers.Conv2D(64, 7, 2, 'same', name='conv1_conv', trainable=conv_trainable)(input)
conv = layers.MaxPool2D(3, 2, 'same', name='pool1_pool')(conv)
endpoints.append(conv)
conv = stack_resnetv2_bottleneck(conv, 64, 3, 1, name='conv2', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
conv = stack_resnetv2_bottleneck(conv, 128, 4, 2, name='conv3', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
endpoints.append(conv)
conv = stack_resnetv2_bottleneck(conv, 256, 23, 2, name='conv4', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
endpoints.append(conv)
conv = stack_resnetv2_bottleneck(conv, 512, 3, 2, name='conv5', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
conv = layers.BatchNormalization(3, epsilon=1.001e-5, name='post_bn', trainable=bn_trainable)(conv)
conv = layers.Activation('relu', name='post_relu')(conv)
endpoints.append(conv)
model = tf.keras.Model(inputs=input, outputs=endpoints, name='resnetv2_101')
if weight is not None:
model.load_weights(weight)
return model
def resnetv2_152(input,conv_trainable=True, bn_trainable=True, weight=None):
"""
:param input: tensor of 'nhwc'
:param conv_trainable: whether the conv layers in net could trainable
:param bn_trainable: whether the bn layers in net could trainable
:param weight: if not None, the weight will load in net
:return: [features maps stride 8, stride 16, stride 32]
"""
endpoints = []
conv = layers.Conv2D(64, 7, 2, 'same', name='conv1_conv', trainable=conv_trainable)(input)
conv = layers.MaxPool2D(3, 2, 'same', name='pool1_pool')(conv)
endpoints.append(conv)
conv = stack_resnetv2_bottleneck(conv, 64, 3, 1, name='conv2', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
conv = stack_resnetv2_bottleneck(conv, 128, 8, 2, name='conv3', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
endpoints.append(conv)
conv = stack_resnetv2_bottleneck(conv, 256, 36, 2, name='conv4', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
endpoints.append(conv)
conv = stack_resnetv2_bottleneck(conv, 512, 3, 2, name='conv5', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
conv = layers.BatchNormalization(3, epsilon=1.001e-5, name='post_bn', trainable=bn_trainable)(conv)
conv = layers.Activation('relu', name='post_relu')(conv)
endpoints.append(conv)
model = tf.keras.Model(inputs=input, outputs=endpoints, name='resnetv2_152')
if weight is not None:
model.load_weights(weight)
return model
def resnext_50(input,conv_trainable=True, bn_trainable=True, weight=None):
"""
:param input: tensor of 'nhwc'
:param conv_trainable: whether the conv layers in net could trainable
:param bn_trainable: whether the bn layers in net could trainable
:param weight: if not None, the weight will load in net
:return: [features maps stride 8, stride 16, stride 32]
"""
endpoints = []
conv = layers.Conv2D(64, 7, 2, 'same', name='conv1_conv', use_bias=False, trainable=conv_trainable)(input)
conv = layers.BatchNormalization(3, epsilon=1.001e-5, name='conv1_bn', trainable=bn_trainable)(conv)
conv = layers.Activation('relu', name='conv1_relu')(conv)
conv = layers.MaxPool2D(3, 2, 'same', name='pool1_pool')(conv)
endpoints.append(conv)
conv = stack_resnext_bottleneck(conv, 128, 3, 1, name='conv2', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
conv = stack_resnext_bottleneck(conv, 256, 4, 2, name='conv3', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
endpoints.append(conv)
conv = stack_resnext_bottleneck(conv, 512, 6, 2, name='conv4', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
endpoints.append(conv)
conv = stack_resnext_bottleneck(conv, 1024, 3, 2, name='conv5', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
endpoints.append(conv)
model = tf.keras.Model(inputs=input, outputs=endpoints, name='resnext_50')
if weight is not None:
model.load_weights(weight)
return model
def resnext_101(input,conv_trainable=True, bn_trainable=True, weight=None):
"""
:param input: tensor of 'nhwc'
:param conv_trainable: whether the conv layers in net could trainable
:param bn_trainable: whether the bn layers in net could trainable
:param weight: if not None, the weight will load in net
:return: [features maps stride 8, stride 16, stride 32]
"""
endpoints = []
conv = layers.Conv2D(64, 7, 2, 'same', name='conv1_conv', use_bias=False, trainable=conv_trainable)(input)
conv = layers.BatchNormalization(3, epsilon=1.001e-5, name='conv1_bn', trainable=bn_trainable)(conv)
conv = layers.Activation('relu', name='conv1_relu')(conv)
conv = layers.MaxPool2D(3, 2, 'same', name='pool1_pool')(conv)
endpoints.append(conv)
conv = stack_resnext_bottleneck(conv, 128, 3, 1, name='conv2', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
conv = stack_resnext_bottleneck(conv, 256, 4, 2, name='conv3', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
endpoints.append(conv)
conv = stack_resnext_bottleneck(conv, 512, 23, 2, name='conv4', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
endpoints.append(conv)
conv = stack_resnext_bottleneck(conv, 1024, 3, 2, name='conv5', conv_trainable=conv_trainable, bn_trainable=bn_trainable)
endpoints.append(conv)
model = tf.keras.Model(inputs=input, outputs=endpoints, name='resnext_101')
if weight is not None:
model.load_weights(weight)
return model
|
py | 1a4b207e0102c8c54ba32295d5e618d6a737ae2a | # -*- coding: utf-8 -*-
# Resource object code
#
# Created: Mon Aug 12 09:45:02 2019
# by: The Resource Compiler for PySide2 (Qt v5.12.3)
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtGui, QtCore, QtWidgets
qt_resource_data = "\
\x00\x00\x00\xb7\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x07\x00\x00\x00\x05\x08\x04\x00\x00\x00#\x93>S\
\x00\x00\x00 cHRM\x00\x00z%\x00\x00\x80\x83\
\x00\x00\xf9\xff\x00\x00\x80\xe9\x00\x00u0\x00\x00\xea`\
\x00\x00:\x98\x00\x00\x17o\x92_\xc5F\x00\x00\x00R\
IDATx\xdabX\xf5\xe9\xca?\x18\x5c\xfe\x9e\
!\xd3\xff\xc4\x8f\xab\xbf\xaf\xfe\xbe\xfa\xfb\xd0\x97hc\
\x86\xff\x0c\x85k\xf7~\xdc\xfbq\xf3\x87\xcc\xbc\xff\x0c\
\x0c\xff\x19\x18\x98s\xce\xce\xbd\x1f9\xff?\xc3\x7f\x06\
\x86\xff\x0c\xff\x19\x14\xdd,\xb6\xfeg\xf8\xcf\xf0\x9f\x01\
0\x00j_,gt\xda\xec\xfb\x00\x00\x00\x00IE\
ND\xaeB`\x82\
\x00\x00\x00\xb9\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x07\x00\x00\x00\x05\x08\x06\x00\x00\x00\x89\x9a\xf6\xd8\
\x00\x00\x00\x04gAMA\x00\x00\xe0\xfcaP-\x96\
\x00\x00\x00pIDAT\x18Wc\xf8\xff\xff?\x1c\
[XXlUTTt\x83\xf1\x99\x18\xa0 **\
j~jj\xaa\x96\xb7\xb7w;###3H\x8c\
\x05Ddee\xe5\x01%\x03\xb9\xb8\xb8\x18\x15\x14\x14\
\x84XYYW\x01\x85\x83\x19\xa3\xa3\xa3\x8d\xd3\xd3\xd3\
\x0f\x0a\x0b\x0b\xb3\x83\x14\x82\xc0\xe7\xcf\x9f\xff.\x5c\xb8\
0\x9cq\xf9\xf2\xe5\xefuuu\xf9\xa1\xe2pp\xed\
\xda\xb5/\x00\xbdl*\x96St\x81\x19\x00\x00\x00\x00\
IEND\xaeB`\x82\
\x00\x00\x01B\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x09\x00\x00\x00\x09\x08\x06\x00\x00\x00\xe0\x91\x06\x10\
\x00\x00\x00 cHRM\x00\x00z%\x00\x00\x80\x83\
\x00\x00\xf9\xff\x00\x00\x80\xe9\x00\x00u0\x00\x00\xea`\
\x00\x00:\x98\x00\x00\x17o\x92_\xc5F\x00\x00\x00\xdd\
IDATx\xda\x5c\x8e\xb1N\x84@\x18\x84g\xef\
L,\xc8\xd9,\x0dXP\x1b\x0b\xc3\xfa$w\xbd\x0d\
\x85O@\x0b\xbb\xcb;\xd0hAr\xc5\xd2(O\x02\
\xcf\xb1\x97@a\xd4\xc2\xc4b,\xbcM\xd0I\xfe\xbf\
\xf82\xff?#H\xc2Z;\x00\x80\xd6\xfa\x80\xb3\xac\
\xb5\x03I\x18c\x0e[!\xc4\x90\xe7\xf9>I\x92\x9b\
\xbe\xef\xef\xca\xb2|\xf5\xde\xbf\x04\xe6\x9c\xbb\xbd \xf9\
\x19\xae\x95R\xfb,\xcb\xbe\xa5\x94\x01\x81\xe4\x9b8\xbf\
<*\xa5\x1e\xf0O\xe38>7M\xf3(H\x02\x00\
\xba\xae{\x97R\xee\x82aY\x96\x8f\xa2(\xae\x00`\
\x03\x00\xc6\x98\xe3\xda\x00\x00q\x1c\xef\xb4\xd6O\x00\xb0\
\x05\xf0'j\x9egDQ\x04\x00H\xd3\xf4\xde9w\
\xbd!\xf9\xb5\xeapj\xdb\xf6r\x9a\xa6\xd3\xaa\xf8\xef\
\xaa\xeb\xdaWU\xe5I\x22\xcc\x9a\xfd\x0c\x00$\xabn\
\xfa\x96!\xfc\xb8\x00\x00\x00\x00IEND\xaeB`\
\x82\
\x00\x00\x00\xa5\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x06\x00\x00\x00\x06\x08\x06\x00\x00\x00\xe0\xcc\xefH\
\x00\x00\x00 cHRM\x00\x00z%\x00\x00\x80\x83\
\x00\x00\xf9\xff\x00\x00\x80\xe9\x00\x00u0\x00\x00\xea`\
\x00\x00:\x98\x00\x00\x17o\x92_\xc5F\x00\x00\x00@\
IDATx\xda\x5c\x8c1\x11\x000\x08\xc4B-\
\x03\xfc+aE\x02\x1a\xe8T\xaem\xc6\xcf}\xc4\
\xcc\x1a \x22\x84\x8b\x05\x90\x99\xa8j\xdfB\xba{\xc6\
\xaa\x92G\x1c\xdc}\xb2\x8b\x8f\x93}\x1e\xc0d\xf7\x00\
\xf5\x9f\x1d\xd3\x02\x88\xef\xaf\x00\x00\x00\x00IEND\
\xaeB`\x82\
"
qt_resource_name = "\
\x00\x0e\
\x04\xa2\xfc\xa7\
\x00d\
\x00o\x00w\x00n\x00_\x00a\x00r\x00r\x00o\x00w\x00.\x00p\x00n\x00g\
\x00\x0c\
\x06\xe6\xe6g\
\x00u\
\x00p\x00_\x00a\x00r\x00r\x00o\x00w\x00.\x00p\x00n\x00g\
\x00\x0c\
\x04V#g\
\x00c\
\x00h\x00e\x00c\x00k\x00b\x00o\x00x\x00.\x00p\x00n\x00g\
\x00\x0a\
\x0b-\x87\xc7\
\x00h\
\x00a\x00n\x00d\x00l\x00e\x00.\x00p\x00n\x00g\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x01\
\x00\x00\x00@\x00\x00\x00\x00\x00\x01\x00\x00\x01x\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x22\x00\x00\x00\x00\x00\x01\x00\x00\x00\xbb\
\x00\x00\x00^\x00\x00\x00\x00\x00\x01\x00\x00\x02\xbe\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
py | 1a4b20efa6a22abe1e9a35eb008341421b09f338 | import unittest
from ultrasonic.driver import UltrasonicDriver
class UltrasonicSensorTest(unittest.TestCase):
def test_parse_data(self):
test_data = "SensorA: 34\nSensorB: 0\nSensorC: 0\nSensorA: 40\nSensorD: 0"
parsed_data = []
for line in test_data.split("\n"):
parsed_data.append(UltrasonicDriver.parse_data(line))
self.assertIn(("A", 0.34), parsed_data)
if __name__ == '__main__':
unittest.main() |
py | 1a4b2153c81d4814c1a72e613f4b48b077d4ea28 | import os
import re
from poetry.semver import Version
from poetry.version.requirements import Requirement
from .dependency import Dependency
from .dependency_package import DependencyPackage
from .directory_dependency import DirectoryDependency
from .file_dependency import FileDependency
from .locker import Locker
from .package import Package
from .package_collection import PackageCollection
from .project_package import ProjectPackage
from .utils.link import Link
from .utils.utils import convert_markers
from .utils.utils import group_markers
from .utils.utils import is_archive_file
from .utils.utils import is_installable_dir
from .utils.utils import is_url
from .utils.utils import path_to_url
from .utils.utils import strip_extras
from .vcs_dependency import VCSDependency
def dependency_from_pep_508(name):
# Removing comments
parts = name.split("#", 1)
name = parts[0].strip()
if len(parts) > 1:
rest = parts[1]
if ";" in rest:
name += ";" + rest.split(";", 1)[1]
req = Requirement(name)
if req.marker:
markers = convert_markers(req.marker)
else:
markers = {}
name = req.name
path = os.path.normpath(os.path.abspath(name))
link = None
if is_url(name):
link = Link(name)
else:
p, extras = strip_extras(path)
if os.path.isdir(p) and (os.path.sep in name or name.startswith(".")):
if not is_installable_dir(p):
raise ValueError(
"Directory {!r} is not installable. File 'setup.py' "
"not found.".format(name)
)
link = Link(path_to_url(p))
elif is_archive_file(p):
link = Link(path_to_url(p))
# it's a local file, dir, or url
if link:
# Handle relative file URLs
if link.scheme == "file" and re.search(r"\.\./", link.url):
link = Link(path_to_url(os.path.normpath(os.path.abspath(link.path))))
# wheel file
if link.is_wheel:
m = re.match(r"^(?P<namever>(?P<name>.+?)-(?P<ver>\d.*?))", link.filename)
if not m:
raise ValueError("Invalid wheel name: {}".format(link.filename))
name = m.group("name")
version = m.group("ver")
dep = Dependency(name, version)
else:
name = link.egg_fragment
if link.scheme == "git":
dep = VCSDependency(name, "git", link.url_without_fragment)
else:
dep = Dependency(name, "*")
else:
if req.pretty_constraint:
constraint = req.constraint
else:
constraint = "*"
dep = Dependency(name, constraint)
if "extra" in markers:
# If we have extras, the dependency is optional
dep.deactivate()
for or_ in markers["extra"]:
for _, extra in or_:
dep.in_extras.append(extra)
if "python_version" in markers:
ors = []
for or_ in markers["python_version"]:
ands = []
for op, version in or_:
# Expand python version
if op == "==":
version = "~" + version
op = ""
elif op == "!=":
version += ".*"
elif op in ("<=", ">"):
parsed_version = Version.parse(version)
if parsed_version.precision == 1:
if op == "<=":
op = "<"
version = parsed_version.next_major.text
elif op == ">":
op = ">="
version = parsed_version.next_major.text
elif parsed_version.precision == 2:
if op == "<=":
op = "<"
version = parsed_version.next_minor.text
elif op == ">":
op = ">="
version = parsed_version.next_minor.text
elif op in ("in", "not in"):
versions = []
for v in re.split("[ ,]+", version):
split = v.split(".")
if len(split) in [1, 2]:
split.append("*")
op_ = "" if op == "in" else "!="
else:
op_ = "==" if op == "in" else "!="
versions.append(op_ + ".".join(split))
glue = " || " if op == "in" else ", "
if versions:
ands.append(glue.join(versions))
continue
ands.append("{}{}".format(op, version))
ors.append(" ".join(ands))
dep.python_versions = " || ".join(ors)
if req.marker:
dep.marker = req.marker
# Extras
for extra in req.extras:
dep.extras.append(extra)
return dep
|
py | 1a4b2332e01c34edb48deb036b93f04f162ea00f | """
- intial setup:
- ask for home dir (default to
```
from pathlib import Path
HOME_DIR = str(Path.home())
```
)
- ask for output dir for gh_releases
- commands
- add
- remove
- update
"""
from util import getConfigPath
if __name__ == "__main__":
if not getConfigPath().exists():
from commands.initial_setup import init_setup
init_setup()
else:
from commands import cli
cli()
|
py | 1a4b248cf2e0cabf69f7c48dd56aab4659fabffb | """
Message delivery
Various interfaces to messaging services. Currently:
- ``pushover`` - a platform for sending and receiving push notifications
is supported.
AUTHORS:
- Martin Albrecht (2012) - initial implementation
"""
import http.client as httplib
from urllib.parse import urlencode
from ssl import SSLContext
pushover_defaults = {"token": "Eql67F14ohOZJ0AtEBJJU7FiLAk8wK"}
def pushover(message, **kwds):
"""
Send a push notification with ``message`` to ``user`` using https://pushover.net/.
Pushover is a platform for sending and receiving push notifications. On the server side, it
provides an HTTP API for queueing messages to deliver to devices. On the device side, iOS and
Android clients receive those push notifications, show them to the user, and store them for
offline viewing.
An account on https://pushover.net is required and the Pushover app must be installed on your
phone for this function to be able to deliver messages to you.
INPUT:
- ``message`` - your message
- ``user`` - the user key (not e-mail address) of your user (or you), viewable when logged
into the Pushover dashboard. (default: ``None``)
- ``device`` - your user's device identifier to send the message directly to that device,
rather than all of the user's devices (default: ``None``)
- ``title`` - your message's title, otherwise uses your app's name (default: ``None``)
- ``url`` - a supplementary URL to show with your message (default: ``None``)
- ``url_title`` - a title for your supplementary URL (default: ``None``)
- ``priority`` - set to 1 to display as high-priority and bypass quiet hours, or -1 to always
send as a quiet notification (default: ``0``)
- ``timestamp`` - set to a unix timestamp to have your message show with a particular time,
rather than now (default: ``None``)
- ``sound`` - set to the name of one of the sounds supported by device clients to override the
user's default sound choice (default: ``None``)
- ``token`` - your application's API token (default: Sage's default App token)
EXAMPLES::
sage: import sage.misc.messaging
sage: sage.misc.messaging.pushover("Hi, how are you?", user="XXX") # not tested
To set default values populate ``pushover_defaults``::
sage: sage.misc.messaging.pushover_defaults["user"] = "USER_TOKEN"
sage: sage.misc.messaging.pushover("Hi, how are you?") # not tested
.. note::
You may want to populate ``sage.misc.messaging.pushover_defaults`` with default values such
as the default user in ``$HOME/.sage/init.sage``.
"""
request = {"message": message}
request.update(pushover_defaults)
request.update(kwds)
conn = httplib.HTTPSConnection("api.pushover.net:443", context=SSLContext())
conn.request("POST", "/1/messages.json",
urlencode(request),
{"Content-type": "application/x-www-form-urlencoded"})
return conn.getresponse().status == 200
|
py | 1a4b24d1ddde2048f46015eb0572e83359cca733 |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(name='GradespeedScraper',
version='0.1-dev',
description='Scrapes Gradespeed',
author='Davis Robertson',
author_email='[email protected]',
license='MIT',
url='https://github.com/epicdavi/GradespeedScraper/',
install_requires=['mechanize>=0.2.5', 'beautifulsoup4>=4.3.x,<4.4'],
) |
py | 1a4b272d0cfc420b75357f12bb6515126cf0ec7a | """
@file
@brief Helpers to run examples created with function
@see fn export2tf2onnx.
"""
import collections
import inspect
import numpy
from onnx.numpy_helper import from_array
from onnx.helper import (
make_node, make_graph, make_model, set_model_props, make_tensor)
from onnx import AttributeProto
from ..onnx2py_helper import guess_dtype, guess_proto_dtype
from ..onnx_tools import ensure_topological_order
_make_name_id = 0
def make_name(name):
"Creates a unique name."
global _make_name_id # pylint: disable=W0603
name = "%s_%d" % (name, _make_name_id)
_make_name_id += 1
return name
def make_sure(cond, msg, *args):
"Raises an exception if cond is not verified."
if not cond:
raise RuntimeError(msg % tuple(args))
def map_onnx_to_numpy_type(onnx_dtype):
"Converts ONNX type into numpy type."
return guess_dtype(onnx_dtype)
class tf_op:
"""
Decorator to register any new converter.
:param name: type of the operator to rewrite
:param domain: domain
"""
_OPSETS = collections.OrderedDict()
def __init__(self, name, domain='', **kwargs):
if not isinstance(name, list):
name = [name]
self.names = name
self.domain = domain
self.kwargs = kwargs
def __call__(self, func):
for ke, va in inspect.getmembers(func, inspect.ismethod):
if ke.startswith("version_"):
version = int(ke.replace("version_", ""))
self._register_handler(
va, version, self.names, self.domain, self.kwargs)
return func
def _register_handler(self, func, version, names, domain, kwargs):
opset = tf_op._OPSETS.get(domain)
if not opset:
opset = []
tf_op._OPSETS[domain] = opset
while version >= len(opset):
opset.append({})
opset_dict = opset[version]
for name in names:
opset_dict[name] = (func, kwargs)
class Tf2OnnxConvert:
"""
Applies the converter on an ONNX graph.
:param onnx_model: ONNX graph
:param tf_op: class which register
:param verbose: verbosity
:param target_opset: targetted opsets
"""
def __init__(self, onnx_model, _tf_op=None, verbose=None,
target_opset=None):
self._onnx_model = onnx_model
self._tf_op = _tf_op or tf_op
self.verbose = verbose
if isinstance(target_opset, int):
self.target_opsets = {'': target_opset}
elif isinstance(target_opset, dict):
self.target_opsets = target_opset
elif target_opset is None:
opsets = {}
for oimp in onnx_model.opset_import:
if oimp.domain == '':
opsets[oimp.domain] = oimp.version
opset = oimp.version
else:
opsets[oimp.domain] = opset
self.target_opsets = opsets
else:
raise ValueError( # pragma: no cover
"Unexepected value for target_opset=%r." % target_opset)
self._names = {}
for node in onnx_model.graph.node:
self._names[node.name] = node
for init in onnx_model.graph.initializer:
self._names[init.name] = init
# _forbidden_new_names contains current names and deleted names.
self._forbidden_new_names = set(self._names)
if '' in self.target_opsets:
self.opset = self.target_opsets['']
if not hasattr(self, 'opset'):
raise RuntimeError( # pragma: no cover
"Attribute opset is missing, target_opset=%r." % target_opset)
def get_node_by_name(self, name):
"""
Retrieves a node by its name.
:param name: node name
:return: node name
"""
if name not in self._names:
raise RuntimeError(
"Unable to find node name %r among %r." % (
name, ", ".join(sorted(self._names))))
return self._names[name]
def _add_node_name(self, obj):
"""
Registers an object in in the graph by its name.
:param name: node or initializer
"""
if obj.name in self._forbidden_new_names:
raise RuntimeError(
"Name %r is already registered." % obj.name)
self._names[obj.name] = obj
self._forbidden_new_names.add(obj.name)
def make_node(self, op_type, inputs, attr=None, outputs=None,
name=None, domain='', output_count=1):
"""
Adds a node to the list of nodes.
:param op_type: operator type
:param inputs: list of strings
:param attr: dictionary of attributes
:param outputs: None or list of strings
:param output_count: used if outputs is None to guess
the number of outputs of this node
:param name: name of the node
:param domain: domain
:return: created node
"""
if self.verbose:
print("[Tf2OnnxConvert.make_node] op_type=%r inputs=%r" % (
op_type, inputs))
if attr is None:
attr = {}
if name is None:
name = make_name(op_type)
if name in self._names:
raise RuntimeError(
"Node name %r already exists in %r." % (
name, ", ".join(sorted(self._names))))
if outputs is None:
outputs = [(name + ":" + str(i)) for i in range(output_count)]
output_count = len(outputs)
raw_attr = {}
onnx_attrs = []
for a, v in attr.items():
if isinstance(v, AttributeProto):
onnx_attrs.append(v)
else:
raw_attr[a] = v
onnx_node = make_node(
op_type, inputs, outputs, name=name, domain=domain, **raw_attr)
self._add_node_name(onnx_node)
return onnx_node
def make_const(self, name, np_val, skip_conversion=False, raw=True):
"""
Make a new constants in the graph.
:param name: const node name, must be unique.
:param np_val: value of type numpy ndarray.
:param skip_conversion:
bool, indicate whether this created node would be mapped
during conversion
:param raw: whether to store data at field of raw_data or the
specific field according to its dtype
:return: create initializer
"""
if name in self._names:
raise RuntimeError(
"Initializer name %r already exists in %r." % (
name, ", ".join(sorted(self._names))))
np_val_flat = np_val.flatten()
is_bytes = (np_val.dtype == numpy.object and len(np_val_flat) > 0 and
isinstance(np_val_flat[0], bytes))
if raw and not is_bytes:
onnx_tensor = from_array(np_val, name)
else:
onnx_tensor = make_tensor(
name, guess_proto_dtype(np_val.dtype),
np_val.shape, np_val_flat, raw=False)
self._add_node_name(onnx_tensor)
return onnx_tensor
def get_dtype(self, input_name):
"""
Returns the type of one node or None if unknown.
:param input_name: result name
:return: numpy dtype
"""
inputs = self._onnx_model.graph.input
names = [_.name for _ in inputs]
if input_name not in names:
return None # pragma: no cover
ind = names.index(input_name)
return inputs[ind].type.tensor_type.elem_type
def replace_all_inputs(self, old_name, new_name):
"""
Every taking *old_name* as inputs will take *new_name* instead.
Looks in the output as well but in that case, it creates an identity
node to avoid changing an output name.
:param old_name: name to replace
:param new_name: new name
:return: list of impacted nodes
"""
res = []
for node in self._names.values():
if not hasattr(node, 'input'):
continue
if old_name not in node.input:
continue
new_inputs = [new_name if i.name == old_name else i.name
for i in node.input]
node.input[:] = new_inputs[:]
res.append(node)
if self.verbose:
print("[Tf2OnnxConvert.replace_all_inputs] replace %r by %r in node %r" % (
old_name, new_name, node.name))
for o in self._onnx_model.graph.output:
if o.name != old_name:
continue
n = self.make_node("Identity", [new_name], outputs=[old_name],
name=make_name("IdOutputReplaced"))
res.append(n)
if self.verbose:
print("[Tf2OnnxConvert.replace_all_inputs] add id node from %r to %r "
"with node %r." % (
old_name, new_name, n.name)) # pylint: disable=E1101
return res
def remove_node(self, name):
"""
Removes a node name from the list.
"""
if name not in self._names:
raise RuntimeError(
"Unable to delete name %r because it does not exists." % name)
del self._names[name]
if self.verbose:
print("[Tf2OnnxConvert.remove_node] delete name %r" % name)
def get_shape(self, input_name):
"""
Returns the type of one node or None if unknown.
:param input_name: result name
:return: numpy dtype
"""
inputs = self._onnx_model.graph.input
names = [_.name for _ in inputs]
if input_name not in names:
return None # pragma: no cover
ind = names.index(input_name)
dims = inputs[ind].type.tensor_type.shape.dim
return tuple(dims)
def run(self):
"""
Calls the registered converters on the graph
held by this instance. Returns the new onnx graph.
:return: ONNX graph
"""
if len(self._tf_op._OPSETS) == 0:
raise RuntimeError( # pragma: no cover
"No converter was registered.")
if self.verbose:
print("[Tf2OnnxConvert.run]")
done = {}
modif = 1
while modif > 0:
modif = 0
# The converter may alter the current list of nodes, we freeze it.
current_values = list(self._names.values())
for node in current_values:
if not hasattr(node, 'domain'):
# initializer
continue
if done.get(node.name, False):
continue
domain = node.domain
if domain not in self._tf_op._OPSETS:
continue
# look for a converter
rews = self._tf_op._OPSETS[domain]
target = min(self.target_opsets[domain], len(rews))
conv = None
for i in range(len(rews) - 1, -1, -1):
if node.op_type in rews[i]:
conv = rews[i][node.op_type]
break
if conv is None:
continue
# applies the converter
if self.verbose:
print("[Tf2OnnxConvert.run] convert node type=%r opset=%r name=%r"
"" % (node.op_type, target, node.name))
fct, kwargs = conv
fct(self, node, target_opset=target, **kwargs)
modif += 1
return self.make_model()
def make_model(self):
"""
Produces the new ONNX graph with the updated sets of nodes.
"""
inputs = self._onnx_model.graph.input
outputs = self._onnx_model.graph.output
inits = [init[1] for init in sorted(self._names.items())
if not hasattr(init[1], 'domain')]
nodes = [node[1] for node in sorted(self._names.items())
if hasattr(node[1], 'domain')]
nodes = ensure_topological_order(inputs, inits, nodes)
if self.verbose:
print(
"[Tf2OnnxConvert.make_node] %d nodes %d inputs %d "
"outputs %d initializers"
"" % (len(nodes), len(inputs), len(outputs), len(inits)))
graph = make_graph(nodes, self._onnx_model.graph.name,
inputs, outputs, inits)
onnx_model = make_model(graph)
onnx_model.ir_version = self._onnx_model.ir_version
onnx_model.producer_name = self._onnx_model.producer_name + "-mlprodict"
onnx_model.producer_version = self._onnx_model.producer_version
onnx_model.domain = self._onnx_model.domain
onnx_model.model_version = self._onnx_model.model_version
onnx_model.doc_string = self._onnx_model.doc_string
metadata = {p.key: p.value for p in self._onnx_model.metadata_props}
set_model_props(onnx_model, metadata)
# opsets
del onnx_model.opset_import[:] # pylint: disable=E1101
for dom, value in self.target_opsets.items():
op_set = onnx_model.opset_import.add() # pylint: disable=E1101
op_set.domain = dom
op_set.version = value
return onnx_model
class GraphBuilder:
"""
Helpers to build graph.
:param graph!
"""
def __init__(self, graph):
self._g = graph
@property
def graph(self):
"Returns the graph."
return self._g
def make_slice(self, kwargs, name=None, shapes=None, dtypes=None, return_node=False):
"""
slice changes its schema at opset 10: it treats some attributes as dynamic input
so this function has to process inputs according to graph's opset version
to get "inputs" and "attr" to feed "make_node"
kwargs: key could be ["data", "starts", "ends", "axes", "steps", "outputs"].
"""
outputs = kwargs.pop("outputs", None)
if self.graph.opset < 10:
# "data" is string
# "starts", "ends" and "axes" are attributes, and "axes" is optional.
data = kwargs.pop("data")
starts = self._convert_to_attribute(kwargs.pop("starts"))
ends = self._convert_to_attribute(kwargs.pop("ends"))
axes = self._convert_to_attribute(
kwargs.pop("axes", None), is_optional=True)
attr = {"starts": starts, "ends": ends, "axes": axes}
inputs = [data]
else:
# slice-10 has 3 required inputs "data", "starts", "ends"l
# and 2 optional inputs "axes", "steps"
# input sequence should be "data", "starts", "ends", "axes", "steps"
attr = {}
data = kwargs.pop("data")
starts = self._convert_to_input(kwargs.pop(
"starts"), "const_starts", dtype=numpy.int64)
ends = self._convert_to_input(kwargs.pop(
"ends"), "const_ends", dtype=numpy.int64)
axes = self._convert_to_input(kwargs.pop(
"axes", None), "const_axes", is_optional=True, dtype=numpy.int64)
steps = self._convert_to_input(kwargs.pop(
"steps", None), "const_steps", is_optional=True, dtype=numpy.int64)
inputs = [data, starts.name, ends.name, axes.name, steps.name]
# pro-process inputs and attr
make_sure(not kwargs, "kwargs contains un-used key")
new_attr = {}
for key, val in attr.items():
if val is not None:
new_attr[key] = val
attr = new_attr
for ind, val in enumerate(inputs):
if val is None:
inputs[ind] = "" # empty string means no connection in ONNX
# remove tailing ""
while inputs[-1] == "":
inputs = inputs[:-1]
if self.graph.opset >= 10:
dtype = self.graph.get_dtype(inputs[1])
for input_data in inputs[1:]:
if input_data != "":
make_sure(dtype == self.graph.get_dtype(
input_data), "dtype should be same")
node = self.graph.make_node(op_type="Slice", inputs=inputs, attr=attr, name=name,
outputs=outputs, shapes=shapes, dtypes=dtypes)
if return_node:
return node
raise NotImplementedError("return_node must be True")
def make_squeeze(self, kwargs, name=None, shapes=None, dtypes=None, return_node=False, op_name_scope=None):
"""
Squeeze changes its schema at opset 13: it treats axes as a dynamic input
kwargs: key could be ["data", "axes"].
"""
outputs = kwargs.pop("outputs", None)
if self.graph.opset < 13:
data = kwargs.pop("data")
axes = self._convert_to_attribute(
kwargs.pop("axes", None), is_optional=True)
attr = {"axes": axes}
inputs = [data]
else:
data = kwargs.pop("data")
axes = self._convert_to_input(kwargs.pop(
"axes", None), "const_axes", is_optional=True, dtype=numpy.int64)
attr = {}
inputs = [data, axes.name]
make_sure(not kwargs, "kwargs contains un-used key")
new_attr = {}
for key, val in attr.items():
if val is not None:
new_attr[key] = val
attr = new_attr
for ind, val in enumerate(inputs):
if val is None:
inputs[ind] = "" # empty string means no connection in ONNX
# remove tailing ""
while inputs[-1] == "":
inputs = inputs[:-1]
node = self.graph.make_node(op_type="Squeeze", inputs=inputs, attr=attr, name=name,
outputs=outputs)
if return_node:
return node
raise NotImplementedError("return_node must be True")
def make_unsqueeze(self, kwargs, name=None, shapes=None, dtypes=None, return_node=False, op_name_scope=None):
"""
Unsqueeze changes its schema at opset 13: it treats axes as a dynamic input
kwargs: key could be ["data", "axes"].
"""
outputs = kwargs.pop("outputs", None)
if self.graph.opset < 13:
data = kwargs.pop("data")
axes = self._convert_to_attribute(
kwargs.pop("axes", None), is_optional=True)
attr = {"axes": axes}
inputs = [data]
else:
data = kwargs.pop("data")
axes = self._convert_to_input(kwargs.pop(
"axes", None), "const_axes", is_optional=True, dtype=numpy.int64)
attr = {}
inputs = [data, axes.name]
make_sure(not kwargs, "kwargs contains un-used key")
new_attr = {}
for key, val in attr.items():
if val is not None:
new_attr[key] = val
attr = new_attr
for ind, val in enumerate(inputs):
if val is None:
inputs[ind] = "" # empty string means no connection in ONNX
# remove tailing ""
while inputs[-1] == "":
inputs = inputs[:-1]
node = self.graph.make_node(op_type="Unsqueeze", inputs=inputs, attr=attr, name=name,
outputs=outputs)
if return_node:
return node
raise NotImplementedError("return_node must be True")
def _convert_to_input(self, tensor, const_name, is_optional=False, dtype=None):
"""in ONNX, input shold come from node, so it must be a string"""
if is_optional and tensor is None:
return None
make_sure(tensor is not None,
"input is required so it couldn't be None")
res = tensor
if isinstance(tensor, list):
res = self.graph.make_const(
make_name(const_name), numpy.array(tensor, dtype))
return res
def _convert_to_attribute(self, tensor, is_optional=False):
if is_optional and tensor is None:
return None
make_sure(tensor is not None,
"input is required so it couldn't be None")
res = tensor
if isinstance(tensor, str):
const_node = self.graph.get_node_by_output(tensor)
res = const_node.get_tensor_value(as_list=True)
make_sure(isinstance(res, list),
"input is an attr, so a list is needed")
return res
|
py | 1a4b27322d4f9290feec43bbe6e91b42aff857cf | from lbry.testcase import CommandTestCase
class AddressManagement(CommandTestCase):
async def test_address_list(self):
addresses = await self.out(self.daemon.jsonrpc_address_list())
self.assertEqual(27, len(addresses))
single = await self.out(self.daemon.jsonrpc_address_list(addresses[11]['address']))
self.assertEqual(1, len(single))
self.assertEqual(single[0], addresses[11])
|
py | 1a4b282b49de9a38d9c3f3b091630a0e74fa7af6 | #!/usr/bin/env python
"""
ZetCode wxPython tutorial
In this example, we create a wx.ListBox widget.
author: Jan Bodnar
website: www.zetcode.com
last modified: July 2020
"""
import wx
class Example(wx.Frame):
def __init__(self, *args, **kw):
super(Example, self).__init__(*args, **kw)
self.InitUI()
def InitUI(self):
panel = wx.Panel(self)
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.listbox = wx.ListBox(panel)
hbox.Add(self.listbox, wx.ID_ANY, wx.EXPAND | wx.ALL, 20)
btnPanel = wx.Panel(panel)
vbox = wx.BoxSizer(wx.VERTICAL)
newBtn = wx.Button(btnPanel, wx.ID_ANY, 'New', size=(90, 30))
renBtn = wx.Button(btnPanel, wx.ID_ANY, 'Rename', size=(90, 30))
delBtn = wx.Button(btnPanel, wx.ID_ANY, 'Delete', size=(90, 30))
clrBtn = wx.Button(btnPanel, wx.ID_ANY, 'Clear', size=(90, 30))
self.Bind(wx.EVT_BUTTON, self.NewItem, id=newBtn.GetId())
self.Bind(wx.EVT_BUTTON, self.OnRename, id=renBtn.GetId())
self.Bind(wx.EVT_BUTTON, self.OnDelete, id=delBtn.GetId())
self.Bind(wx.EVT_BUTTON, self.OnClear, id=clrBtn.GetId())
self.Bind(wx.EVT_LISTBOX_DCLICK, self.OnRename)
vbox.Add((-1, 20))
vbox.Add(newBtn)
vbox.Add(renBtn, 0, wx.TOP, 5)
vbox.Add(delBtn, 0, wx.TOP, 5)
vbox.Add(clrBtn, 0, wx.TOP, 5)
btnPanel.SetSizer(vbox)
hbox.Add(btnPanel, 0.6, wx.EXPAND | wx.RIGHT, 20)
panel.SetSizer(hbox)
self.SetTitle('wx.ListBox')
self.Centre()
def NewItem(self, event):
text = wx.GetTextFromUser('Enter a new item', 'Insert dialog')
if text != '':
self.listbox.Append(text)
def OnRename(self, event):
sel = self.listbox.GetSelection()
text = self.listbox.GetString(sel)
renamed = wx.GetTextFromUser('Rename item', 'Rename dialog', text)
if renamed != '':
self.listbox.Delete(sel)
item_id = self.listbox.Insert(renamed, sel)
self.listbox.SetSelection(item_id)
def OnDelete(self, event):
sel = self.listbox.GetSelection()
if sel != -1:
self.listbox.Delete(sel)
def OnClear(self, event):
self.listbox.Clear()
def main():
app = wx.App()
ex = Example(None)
ex.Show()
app.MainLoop()
if __name__ == '__main__':
main() |
py | 1a4b28e673fa5dadcd8265b55cd7e25dc40fad14 | import sys, time
import time
from collections import namedtuple
import mxnet as mx
import numpy as np
def evaluate_model(cnn_model, batch_size,
max_grad_norm, learning_rate,
epoch, x_train, y_train, x_dev,
y_dev):
'''
Train the cnn_model using back prop.
'''
optimizer='rmsprop'
print 'optimizer', optimizer
print 'maximum gradient', max_grad_norm
print 'learning rate (step size)', learning_rate
print 'epochs to train for', epoch
# create optimizer
opt = mx.optimizer.create(optimizer)
opt.lr = learning_rate
updater = mx.optimizer.get_updater(opt)
# create logging output
logs = sys.stderr
# For each training epoch
for iteration in range(epoch):
tic = time.time()
num_correct = 0
num_total = 0
# Over each batch of training data
for begin in range(0, x_train.shape[0], batch_size):
batchX = x_train[begin:begin+batch_size]
batchY = y_train[begin:begin+batch_size]
if batchX.shape[0] != batch_size:
continue
cnn_model.data[:] = batchX
cnn_model.label[:] = batchY
# forward
cnn_model.cnn_exec.forward(is_train=True)
# backward
cnn_model.cnn_exec.backward()
# eval on training data
num_correct += sum(batchY == np.argmax(cnn_model.cnn_exec.outputs[0].asnumpy(), axis=1))
num_total += len(batchY)
# update weights
norm = 0
for idx, weight, grad, name in cnn_model.param_blocks:
grad /= batch_size
l2_norm = mx.nd.norm(grad).asscalar()
norm += l2_norm * l2_norm
norm = np.sqrt(norm)
for idx, weight, grad, name in cnn_model.param_blocks:
if norm > max_grad_norm:
grad *= (max_grad_norm / norm)
updater(idx, grad, weight)
# reset gradient to zero
grad[:] = 0.0
# Decay learning rate for this epoch to ensure we are not "overshooting" optima
if iteration % 50 == 0 and iteration > 0:
opt.lr *= 0.5
print >> logs, 'reset learning rate to %g' % opt.lr
# End of training loop for this epoch
toc = time.time()
train_time = toc - tic
train_acc = num_correct * 100 / float(num_total)
# Evaluate model after this epoch on dev (test) set
num_correct = 0
num_total = 0
# For each test batch
for begin in range(0, x_dev.shape[0], batch_size):
batchX = x_dev[begin:begin+batch_size]
batchY = y_dev[begin:begin+batch_size]
if batchX.shape[0] != batch_size:
continue
cnn_model.data[:] = batchX
cnn_model.cnn_exec.forward(is_train=False)
num_correct += sum(batchY == np.argmax(cnn_model.cnn_exec.outputs[0].asnumpy(), axis=1))
num_total += len(batchY)
dev_acc = num_correct * 100 / float(num_total)
print >> logs, 'Iter [%d] Train: Time: %.3fs, Training Accuracy: %.3f \
--- Dev Accuracy thus far: %.3f' % (iteration, train_time, train_acc, dev_acc)
return dev_acc
|
py | 1a4b2904cfeeed6ffbccda8d3feaab2a932aba32 | from abc import (
ABC,
abstractmethod
)
from argparse import (
ArgumentParser,
Namespace,
_SubParsersAction,
)
import asyncio
from enum import (
auto,
Enum,
)
import logging
from multiprocessing import (
Process
)
from typing import (
Any,
Dict,
NamedTuple,
)
from lahja import (
BaseEvent,
)
from trinity.config import (
TrinityConfig
)
from trinity.endpoint import (
TrinityEventBusEndpoint,
)
from trinity.extensibility.events import (
PluginStartedEvent,
)
from trinity.extensibility.exceptions import (
InvalidPluginStatus,
)
from trinity._utils.mp import (
ctx,
)
from trinity._utils.logging import (
setup_log_levels,
setup_queue_logging,
)
from trinity._utils.os import (
friendly_filename_or_url,
)
class PluginStatus(Enum):
NOT_READY = auto()
READY = auto()
STARTED = auto()
STOPPED = auto()
INVALID_START_STATUS = (PluginStatus.NOT_READY, PluginStatus.STARTED,)
class TrinityBootInfo(NamedTuple):
args: Namespace
trinity_config: TrinityConfig
boot_kwargs: Dict[str, Any] = None
class BasePlugin(ABC):
_status: PluginStatus = PluginStatus.NOT_READY
def __init__(self, boot_info: TrinityBootInfo) -> None:
self.boot_info = boot_info
@property
@abstractmethod
def event_bus(self) -> TrinityEventBusEndpoint:
pass
@property
@abstractmethod
def name(self) -> str:
"""
Describe the name of the plugin.
"""
pass
@property
def normalized_name(self) -> str:
"""
The normalized (computer readable) name of the plugin
"""
return friendly_filename_or_url(self.name)
@classmethod
def get_logger(cls) -> logging.Logger:
return logging.getLogger(f'trinity.extensibility.plugin(#{cls.__name__})')
@property
def logger(self) -> logging.Logger:
return self.get_logger()
@property
def running(self) -> bool:
"""
Return ``True`` if the ``status`` is ``PluginStatus.STARTED``, otherwise return ``False``.
"""
return self._status is PluginStatus.STARTED
@property
def status(self) -> PluginStatus:
"""
Return the current :class:`~trinity.extensibility.plugin.PluginStatus` of the plugin.
"""
return self._status
def ready(self, manager_eventbus: TrinityEventBusEndpoint) -> None:
"""
Set the ``status`` to ``PluginStatus.READY`` and delegate to
:meth:`~trinity.extensibility.plugin.BasePlugin.on_ready`
"""
self._status = PluginStatus.READY
self.on_ready(manager_eventbus)
def on_ready(self, manager_eventbus: TrinityEventBusEndpoint) -> None:
"""
Notify the plugin that it is ready to bootstrap itself.
The ``manager_eventbus`` refers to the instance of the
:class:`~lahja.endpoint.Endpoint` that the
:class:`~trinity.extensibility.plugin_manager.PluginManager` uses which may or may not
be the same :class:`~lahja.endpoint.Endpoint` as the plugin uses depending on the type
of the plugin. The plugin should use this :class:`~lahja.endpoint.Endpoint` instance to
listen for events *before* the plugin has started.
"""
pass
@classmethod
def configure_parser(cls, arg_parser: ArgumentParser, subparser: _SubParsersAction) -> None:
"""
Give the plugin a chance to amend the Trinity CLI argument parser. This hook is called
before :meth:`~trinity.extensibility.plugin.BasePlugin.on_ready`
"""
pass
def start(self) -> None:
"""
Delegate to :meth:`~trinity.extensibility.plugin.BasePlugin.do_start` and set ``running``
to ``True``. Broadcast a :class:`~trinity.extensibility.events.PluginStartedEvent` on the
event bus and hence allow other plugins to act accordingly.
"""
if self._status in INVALID_START_STATUS:
raise InvalidPluginStatus(
f"Can not start plugin when the plugin status is {self.status}"
)
self._status = PluginStatus.STARTED
self.do_start()
self.event_bus.broadcast_nowait(
PluginStartedEvent(type(self))
)
self.logger.info("Plugin started: %s", self.name)
def do_start(self) -> None:
"""
Perform the actual plugin start routine. In the case of a `BaseIsolatedPlugin` this method
will be called in a separate process.
This method should usually be overwritten by subclasses with the exception of plugins that
set ``func`` on the ``ArgumentParser`` to redefine the entire host program.
"""
pass
class BaseAsyncStopPlugin(BasePlugin):
"""
A :class:`~trinity.extensibility.plugin.BaseAsyncStopPlugin` unwinds asynchronoulsy, hence
needs to be awaited.
"""
def __init__(self,
boot_info: TrinityBootInfo,
event_bus: TrinityEventBusEndpoint) -> None:
super().__init__(boot_info)
self._event_bus = event_bus
@property
def event_bus(self) -> TrinityEventBusEndpoint:
return self._event_bus
async def do_stop(self) -> None:
"""
Asynchronously stop the plugin. Should be overwritten by subclasses.
"""
pass
async def stop(self) -> None:
"""
Delegate to :meth:`~trinity.extensibility.plugin.BaseAsyncStopPlugin.do_stop` causing the
plugin to stop asynchronously and setting ``running`` to ``False``.
"""
await self.do_stop()
self._status = PluginStatus.STOPPED
class BaseMainProcessPlugin(BasePlugin):
"""
A :class:`~trinity.extensibility.plugin.BaseMainProcessPlugin` overtakes the whole main process
early before any of the subsystems started. In that sense it redefines the whole meaning of the
``trinity`` command.
"""
@property
def event_bus(self) -> TrinityEventBusEndpoint:
raise NotImplementedError('BaseMainProcessPlugins do not have event busses')
class BaseIsolatedPlugin(BasePlugin):
"""
A :class:`~trinity.extensibility.plugin.BaseIsolatedPlugin` runs in an isolated process and
hence provides security and flexibility by not making assumptions about its internal
operations.
Such plugins are free to use non-blocking asyncio as well as synchronous calls. When an
isolated plugin is stopped it does first receive a SIGINT followed by a SIGTERM soon after.
It is up to the plugin to handle these signals accordingly.
"""
_process: Process = None
_event_bus: TrinityEventBusEndpoint = None
@property
def process(self) -> Process:
"""
Return the ``Process`` created by the isolated plugin.
"""
return self._process
def start(self) -> None:
"""
Prepare the plugin to get started and eventually call ``do_start`` in a separate process.
"""
self._status = PluginStatus.STARTED
self._process = ctx.Process(
target=self._spawn_start,
)
self._process.start()
self.logger.info("Plugin started: %s (pid=%d)", self.name, self._process.pid)
@abstractmethod
def _spawn_start(self) -> None:
pass
def stop(self) -> None:
"""
Set the ``status`` to `STOPPED`` but rely on the
:class:`~trinity.extensibility.plugin_manager.PluginManager` to tear down the process. This
allows isolated plugins to be taken down concurrently without depending on a running
event loop.
"""
self._status = PluginStatus.STOPPED
def _setup_logging(self) -> None:
log_queue = self.boot_info.boot_kwargs['log_queue']
level = self.boot_info.boot_kwargs.get('log_level', logging.INFO)
setup_queue_logging(log_queue, level)
if self.boot_info.args.log_levels:
setup_log_levels(self.boot_info.args.log_levels)
class DebugPlugin(BaseAsyncStopPlugin):
"""
This is a dummy plugin useful for demonstration and debugging purposes
"""
@property
def name(self) -> str:
return "Debug Plugin"
@classmethod
def configure_parser(cls, arg_parser: ArgumentParser, subparser: _SubParsersAction) -> None:
arg_parser.add_argument("--debug-plugin", type=bool, required=False)
def handle_event(self, activation_event: BaseEvent) -> None:
self.logger.info("Debug plugin: handle_event called: %s", activation_event)
def do_start(self) -> None:
self.logger.info("Debug plugin: start called")
asyncio.ensure_future(self.count_forever())
async def count_forever(self) -> None:
i = 0
while True:
self.logger.info(i)
i += 1
await asyncio.sleep(1)
async def do_stop(self) -> None:
self.logger.info("Debug plugin: stop called")
|
py | 1a4b2974f24d5d729f45112823a5666d9687cf49 | from datetime import datetime as dt
from common.logger import get_logger
from orchestrator.config import ORDER_EXPIRATION_THRESHOLD_IN_MINUTES
from orchestrator.order_status import OrderStatus
logger = get_logger(__name__)
class TransactionHistoryDAO:
def __init__(self, repo):
self.__repo = repo
def insert_transaction_history(self, obj_transaction_history):
transaction_history = obj_transaction_history.get_transaction_history()
query_response = self.__repo.execute(
"INSERT INTO transaction_history (username, order_id, order_type, status, payment_id, payment_method, "
"raw_payment_data, transaction_hash, row_created, row_updated)"
"VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s) "
"ON DUPLICATE KEY UPDATE payment_id = %s, payment_method = %s, raw_payment_data = %s, transaction_hash = %s, row_updated = %s",
[
transaction_history["username"],
transaction_history["order_id"],
transaction_history["order_type"],
transaction_history["status"],
transaction_history["payment_id"],
transaction_history["payment_method"],
transaction_history["raw_payment_data"],
transaction_history["transaction_hash"],
dt.utcnow(),
dt.utcnow(),
transaction_history["payment_id"],
transaction_history["payment_method"],
transaction_history["raw_payment_data"],
transaction_history["transaction_hash"],
dt.utcnow()
]
)
if query_response[0] == 1:
return True
return False
def get_order_id_for_expired_transaction(self):
params = [OrderStatus.PAYMENT_INITIATED.value, OrderStatus.PAYMENT_INITIATION_FAILED.value,
OrderStatus.PAYMENT_EXECUTION_FAILED.value, ORDER_EXPIRATION_THRESHOLD_IN_MINUTES]
order_id_raw_data = self.__repo.execute(
"SELECT order_id FROM transaction_history WHERE status IN (%s, %s, %s) AND "
"TIMESTAMPDIFF(MINUTE, row_created, NOW()) > %s ",
[OrderStatus.PAYMENT_INITIATED.value, OrderStatus.PAYMENT_INITIATION_FAILED.value,
OrderStatus.PAYMENT_EXECUTION_FAILED.value, ORDER_EXPIRATION_THRESHOLD_IN_MINUTES])
list_of_order_id = [rec["order_id"] for rec in order_id_raw_data]
return list_of_order_id
def update_transaction_status(self, list_of_order_id, status):
if len(list_of_order_id) == 0:
return "No order id found"
temp_holder = ("%s, " * len(list_of_order_id))[:-2]
params = [status] + list_of_order_id + [OrderStatus.PAYMENT_INITIATED.value,
OrderStatus.PAYMENT_INITIATION_FAILED.value,
OrderStatus.PAYMENT_EXECUTION_FAILED.value]
update_transaction_status_response = self.__repo.execute(
"UPDATE transaction_history SET status = %s WHERE order_id IN (" + temp_holder + ") AND status IN (%s, %s, %s)",
params)
logger.info(f"update_transaction_status: {update_transaction_status_response}")
return update_transaction_status_response
def get_transaction_details_for_given_order_id(self, order_id):
transaction_data = self.__repo.execute(
"SELECT username, order_id, order_type, status, payment_id, payment_type, payment_method, raw_payment_data, "
"transaction_hash FROM transaction_history WHERE order_id = %s", [order_id])
if len(transaction_data) == 0:
raise Exception("Order Id does not exist.")
return transaction_data[0]
|
py | 1a4b299a335dffd04973301324200f878cfc5ee8 | """
This file offers the methods to automatically retrieve the graph Marinobacter salinus.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def MarinobacterSalinus(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Marinobacter salinus graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Marinobacter salinus graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="MarinobacterSalinus",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
py | 1a4b2a34f75d2c4c3d181d418c9949f0ee4c39e9 | import abc
import numpy as np
import math
import random
import itertools as it
from hklearn_genetic.board_conflicts import conflict
from deap import tools, gp
class ProblemInterface(metaclass=abc.ABCMeta):
@classmethod
def __subclasshook__(cls, subclass):
return (hasattr(subclass, 'evaluate') and
callable(subclass.evaluate) and
hasattr(subclass, 'stop_criteria') and
callable(subclass.stop_criteria) and
hasattr(subclass, 'populate') and
callable(subclass.populate) and
hasattr(subclass, 'decode') and
callable(subclass.decode) and
hasattr(subclass, 'crossover') and
callable(subclass.crossover) and
hasattr(subclass, 'mutate') and
callable(subclass.mutate))
@ProblemInterface.register
class IProblem:
"""Evalua las soluciones potenciales del problema"""
def evaluate(self, X):
pass
"""Regresa si la población ha llegado al criterio de paro"""
def stop_criteria(self, X_eval):
pass
"""Crea una poblacion inicial de posibles soluciones"""
def populate(self, n_individuals):
pass
"""Pasa a la población del genotipo al fenotipo"""
def decode(self, X_encoded):
pass
"""Efectúa la cruza con los elementos de la población"""
def crossover(self, X, pc, elitism):
pass
"""Efectúa la mutación con los elementos de la población"""
def mutate(self, X, pm, elitism):
pass
class BaseProblem(IProblem):
def get_crossover_probs(self, n_cross):
return np.random.rand(1 , n_cross)[0,:]
def get_crossover_points(self, length):
return np.random.randint(0, length)
@abc.abstractmethod
def a_eval(self, X_decoded):
pass
def evaluate(self, X):
decoded_rep = self.decode(X)
X_eval = self.a_eval(decoded_rep, X)
return X_eval
def crossover(self, X, pc, elitism):
if not elitism:
n_cross = X.shape[0] // 2
elitism_num = 0
else:
elitism_num = math.floor(elitism * X.shape[0])
n_cross = (X.shape[0] - elitism_num) // 2
prob_cross = self.get_crossover_probs(n_cross)
for i, p in enumerate(prob_cross):
if p <= pc:
cross_point = self.get_crossover_points(X.shape[1] - 1)
son1 = X[2*i + elitism_num,:].copy()
son2 = X[2*i + 1 + elitism_num, :].copy()
son1[cross_point : X.shape[1]] = X[2*i + 1 + elitism_num, cross_point : X.shape[1]].copy()
son2[cross_point : X.shape[1]] = X[2*i + elitism_num, cross_point : X.shape[1]].copy()
X[2*i + elitism_num,:] = son1
X[2*i + 1 + elitism_num,:] = son2
return X
class _BaseGeneticProgrammingProblem(BaseProblem):
def __init__(self, mutation_type = "Branch"):
self.avg_lengths = []
self.mutation_type = mutation_type
def populate(self, n_individuals):
return tools.initRepeat(list, lambda: gp.genHalfAndHalf(self.pset, min_=1, max_=2), n_individuals)
def decode(self, X_encoded):
X_decoded = []
length_sum = 0
for x_i in X_encoded:
tree = gp.PrimitiveTree(x_i)
length_sum += len(tree)
X_decoded += [gp.compile(tree, self.pset)]
self.avg_lengths += [length_sum/len(X_decoded)]
return X_decoded
def crossover(self, X, pc, elitism):
if not elitism:
n_cross = len(X) // 2
elitism_num = 0
else:
elitism_num = math.floor(elitism * len(X))
n_cross = (len(X) - elitism_num) // 2
prob_cross = self.get_crossover_probs(n_cross)
for i, p in enumerate(prob_cross):
if p <= pc:
parent1 = gp.PrimitiveTree(X[2*i + elitism_num])
parent2 = gp.PrimitiveTree(X[2*i + 1 + elitism_num])
offspring = gp.cxOnePoint(parent1, parent2)
if offspring[0].height < self.height_limit:
X[2*i + elitism_num] = offspring[0]
else:
r = random.uniform(0, 1)
X[2*i + elitism_num] = X[2*i + 1 + elitism_num].copy() if r >= 0.5 else X[2*i + elitism_num]
if offspring[1].height < self.height_limit:
X[2*i + 1 + elitism_num] = offspring[1]
else:
r = random.uniform(0, 1)
X[2*i + 1 + elitism_num] = X[2*i + elitism_num].copy() if r >= 0.5 else X[2*i + 1 + elitism_num]
return X
def mutate(self, X, pm, elitism):
if pm > 0:
mutate_m = np.random.uniform(size = (len(X), 1))
mutate_m = mutate_m <= pm
func = lambda pset, type_ : gp.genFull(pset, min_=0, max_=2)
if not elitism:
for i, m in enumerate(mutate_m):
#if m <= 1./len(X[i]):
if m:
if self.mutation_type == "Branch":
offspring = gp.mutUniform(gp.PrimitiveTree(X[i]), func, self.pset)
elif self.mutation_type == "Node":
offspring = gp.mutNodeReplacement(gp.PrimitiveTree(X[i]), self.pset)
if offspring[0].height <= self.height_limit:
X[i] = offspring[0]
else:
elitism_num = math.floor(elitism * len(X))
for i in range(elitism_num, len(X)):
#if mutate_m[i] <= 1./len(X[i]):
if mutate_m[i]:
if self.mutation_type == "Branch":
offspring = gp.mutUniform(gp.PrimitiveTree(X[i]), func, self.pset)
elif self.mutation_type == "Node":
offspring = gp.mutNodeReplacement(gp.PrimitiveTree(X[i]), self.pset)
if offspring[0].height <= self.height_limit:
X[i] = offspring[0]
return X
class SymbolicRegressionProblem(_BaseGeneticProgrammingProblem):
def __init__(self, bounds, pset, real_values, height_limit, stop_thresh = 0.2, mutation_type = "Branch"):
super().__init__(mutation_type)
self.bounds = bounds
self.height_limit = height_limit
self.pset = pset
self.real_values = real_values
self.stop_thresh = stop_thresh
param_values = []
for param_bound in self.bounds:
param_values += [list(np.linspace(param_bound[0], param_bound[1], num = len(real_values)))]
self.points = list(it.product(*param_values))
def a_eval(self, X_decoded, X_encoded):
m = len(self.points)
X_fitness = []
for j, func in enumerate(X_decoded):
try:
s = 0
for i in range(m):
s += (func(*self.points[i]) - self.real_values[i])**2
X_fitness += [- (1./m)*s]
except Exception as e:
print(e)
x_encoded = X_encoded[j]
print(gp.PrimitiveTree(x_encoded))
return np.array(list(zip(X_fitness, list(range(len(X_fitness))))), dtype = [('fitness', float),('index', int)])
def stop_criteria(self, X_eval):
return list(np.where(X_eval >= - self.stop_thresh)[0])
class BitParityCheck(_BaseGeneticProgrammingProblem):
def __init__(self, pset, real_values, height_limit, mutation_type = "Branch"):
super().__init__(mutation_type)
self.height_limit = height_limit
self.pset = pset
self.real_values = real_values
self.points = list(map(list, it.product([False, True], repeat=int(math.log2(len(self.real_values))))))
def stop_criteria(self, X_eval):
return list(np.where(X_eval >= 0)[0])
def a_eval(self, X_decoded, X_encoded):
m = len(self.points)
X_fitness = []
for j, func in enumerate(X_decoded):
try:
X_fitness += [-sum(func(*in_) == out for in_, out in zip(self.points, self.real_values))]
except Exception as e:
print(e)
print(gp.PrimitiveTree(X_encoded[j]))
return X_fitness
class NeutralityProblem(_BaseGeneticProgrammingProblem):
def __init__(self, pset, T, height_limit, terminals, mutation_type = "Branch"):
super().__init__(mutation_type)
self.height_limit = height_limit
self.pset = pset
self.T = T
self.str_terminals = [str(t) for t in terminals]
for t in terminals:
self.pset.addTerminal(t)
self.gene_counts = {t : [] for t in self.str_terminals}
def stop_criteria(self, X_eval):
return []
def a_eval(self, X_decoded, X_encoded):
X_fitness = []
for j, x_i in enumerate(X_decoded):
try:
X_fitness += [-abs(self.T - x_i)]
except Exception as e:
print(e)
print(gp.PrimitiveTree(X_encoded[j]))
for gene in self.gene_counts.keys():
self.gene_counts[gene]+=[0]
for x in X_encoded:
x_tree = gp.PrimitiveTree(x)
x_tree_str = str(x_tree)
for s in x_tree_str:
if s in self.str_terminals:
self.gene_counts[s][-1] += 1
return X_fitness
class _BaseBinaryProblem(BaseProblem):
def __init__(self, thresh, bounds, n_dim = 2, n_prec = 4):
self.bounds = bounds
self.n_dim = n_dim
self.gene_length = math.ceil(math.log2((self.bounds[1] - self.bounds[0])*10**n_prec))
self.thresh = thresh
def stop_criteria(self, X_eval):
return list(np.where(X_eval >= self.thresh)[0])
def populate(self, n_individuals):
return np.random.randint(2, size = (n_individuals, self.gene_length*self.n_dim))
def decode(self, X_encoded):
decoded_rep = np.zeros((X_encoded.shape[0], self.n_dim))
for i in range(self.n_dim):
decoded_rep[:,i] = (X_encoded[:, i*self.gene_length : (i + 1)*self.gene_length]@(2**np.arange(X_encoded[:, i*self.gene_length : (i + 1)*self.gene_length].shape[1], dtype = np.float64)[::-1][:, np.newaxis])).T
return self.bounds[0] + decoded_rep*(self.bounds[1] - self.bounds[0])/(2**self.gene_length - 1)
def get_mutation(self, shape):
return np.random.uniform(size = shape)
def mutate(self, X, pm, elitism):
mutate_m = self.get_mutation((X.shape[0], X.shape[1]))
mutate_m = mutate_m <= pm
X_bit = X == 1
if not elitism:
X = np.logical_xor(X_bit, mutate_m)
else:
elitism_num = math.floor(elitism * X.shape[0])
X[elitism_num : X.shape[0], :] = np.logical_xor(X_bit, mutate_m)[elitism_num : X.shape[0], :]
X = X.astype(int)
return X
class _BaseIntegerProblem(BaseProblem):
def __init__(self, thresh, n_dim = 2):
self.n_dim = n_dim
self.thresh = thresh
def stop_criteria(self, X_eval):
return list(np.where(X_eval >= self.thresh)[0])
def populate(self, n_individuals):
return np.random.randint(self.n_dim, size = (n_individuals, self.n_dim))
def decode(self, X_encoded):
return X_encoded
def get_mutation(self, shape):
return np.random.uniform(size = shape)
def mutate(self, X, pm, elitism):
mutate_m = self.get_mutation((X.shape[0], 1))
mutate_m = mutate_m <= pm
if not elitism:
for i, m in enumerate(mutate_m):
if m:
indices = np.random.permutation(X.shape[1])[0 : 2]
X[i,indices[0]], X[i, indices[1]] = X[i, indices[1]], X[i, indices[0]]
else:
elitism_num = math.floor(elitism * X.shape[0])
for i in range(elitism_num, X.shape[0]):
if mutate_m[i]:
indices = np.random.permutation(X.shape[1])[0 : 2]
X[i,indices[0]], X[i, indices[1]] = X[i, indices[1]], X[i, indices[0]]
return X
class _BaseRealProblem(BaseProblem):
def __init__(self, thresh, bounds, rang_param = 0.1, n_dim = 2):
self.n_dim = n_dim
self.thresh = thresh
self.bounds = bounds
self.rang_param = rang_param
def stop_criteria(self, X_eval):
return list(np.where(X_eval >= self.thresh)[0])
def populate(self, n_individuals):
return np.random.uniform(self.bounds[0], self.bounds[1] + 0.1, size = (n_individuals, self.n_dim))
def decode(self, X_encoded):
return X_encoded
def get_crossover_points(self, length):
return np.random.uniform(low = -.25 , high = 1.25, size = length)
def crossover(self, X, pc, elitism):
if not elitism:
n_cross = X.shape[0] // 2
elitism_num = 0
else:
elitism_num = math.floor(elitism * X.shape[0])
n_cross = (X.shape[0] - elitism_num) // 2
prob_cross = self.get_crossover_probs(n_cross)
for i, p in enumerate(prob_cross):
if p <= pc:
alphas = self.get_crossover_points(X.shape[1])
X[2*i + elitism_num,:] += alphas * (X[2*i + 1 + elitism_num, :] - X[2*i + elitism_num,:])
X[2*i + 1 + elitism_num,:] += alphas * (X[2*i + elitism_num,:] - X[2*i + 1 + elitism_num, :])
X[2*i + elitism_num,:] = np.clip(X[2*i + elitism_num,:], self.bounds[0], self.bounds[1])
X[2*i + 1 + elitism_num,:] = np.clip(X[2*i + 1 + elitism_num,:], self.bounds[0], self.bounds[1])
return X
def get_mutation(self, shape):
return np.random.uniform(size = shape)
def mutate(self, X, pm, elitism):
if not elitism:
elitism = 0
rang = (self.bounds[1] - self.bounds[0])*self.rang_param
mutate_m = self.get_mutation((X.shape[0], X.shape[1]))
mutate_plus_minus = self.get_mutation((X.shape[0], X.shape[1]))
mutate_m[mutate_m <= pm] = 1.
mutate_m[mutate_m < 1.] = 0.
mutate_plus_minus[mutate_plus_minus <= .5] = 1.0
mutate_plus_minus[mutate_plus_minus > .5] = -1.0
elitism_num = math.floor(elitism * X.shape[0])
for i in range(elitism_num, X.shape[0]):
mutate_delta = self.get_mutation((X.shape[1], X.shape[1]))
mutate_delta[mutate_delta <= 1./self.n_dim] = 1.
mutate_delta[mutate_delta < 1.] = 0.
deltas = (mutate_delta @ (2**-np.arange(self.n_dim, dtype = np.float64)[:, np.newaxis])).T
X[i, :] = X[i, :] + mutate_m[i, :] * mutate_plus_minus[i, :] * rang * deltas
X[i, :] = np.clip(X[i, :], self.bounds[0], self.bounds[1])
return X
class BaseNQueen(BaseProblem):
def a_eval(self, X_decoded):
X_fitness = np.zeros(X_decoded.shape[0])
for i, x in enumerate(X_decoded):
X_fitness[i] = -conflict(x)
#print(X_fitness)
return np.array(list(zip(X_fitness, list(range(X_decoded.shape[0])))), dtype = [('fitness', float),('index', int)])
class IntegerNQueen(_BaseIntegerProblem, BaseNQueen):
def __init__(self, n_dim = 2):
super().__init__(0, n_dim = n_dim)
class RealNQueen(_BaseRealProblem, BaseNQueen):
def __init__(self, n_dim = 2):
super().__init__(0, (0, 5.), n_dim = n_dim)
def decode(self, X_encoded):
X_decoded = np.zeros(X_encoded.shape, dtype=np.int64)
for i, x in enumerate(X_encoded):
indexed = np.array(list(zip(x, list(range(X_decoded.shape[1])))), dtype = [('real_rep', float),('index', int)])
indexed = np.sort(indexed, order=["real_rep"])
X_decoded[i, :] = indexed["index"]
return X_decoded
class BinaryNQueen(_BaseBinaryProblem, BaseNQueen):
def __init__(self, n_dim = 2, n_prec = 4):
super().__init__(0, (0.01, n_dim), n_dim = n_dim, n_prec=n_prec)
def decode(self, X_encoded):
return np.ceil(super().decode(X_encoded)).astype(int) - 1
class BaseRastrigin(BaseProblem):
def __init__(self):
self.rank = 100.
def a_eval(self, X_decoded):
return np.array(list(zip(self.rank - (10.*self.n_dim + np.sum(X_decoded**2 - 10.*np.cos(2.*np.pi*X_decoded), axis = 1)), list(range(X_decoded.shape[0])))), dtype = [('fitness', float),('index', int)])
class BaseBeale(BaseProblem):
def __init__(self):
self.rank = 150000.
def a_eval(self, X_decoded):
first_term = (1.5 - X_decoded[:, 0] + X_decoded[:, 0]*X_decoded[:, 1])**2
second_term = (2.25 - X_decoded[:, 0] + X_decoded[:, 0]*(X_decoded[:, 1]**2))**2
third_term = (2.625 - X_decoded[:, 0] + X_decoded[:, 0]*(X_decoded[:, 1]**3))**2
return np.array(list(zip(self.rank - (first_term + second_term + third_term), list(range(X_decoded.shape[0])))), dtype = [('fitness', float),('index', int)])
class BaseHimmelblau(BaseProblem):
def __init__(self):
self.rank = 2200.
def a_eval(self, X_decoded):
first_term = (X_decoded[:, 0]**2 + X_decoded[:, 1] - 11.)**2
second_term = (X_decoded[:, 0] + X_decoded[:, 1]**2 - 7.)**2
return np.array(list(zip(self.rank - (first_term + second_term), list(range(X_decoded.shape[0])))), dtype = [('fitness', float),('index', int)])
class BaseEggholder(BaseProblem):
def __init__(self):
self.rank = 1200.
def a_eval(self, X_decoded):
first_term = - (X_decoded[:, 1] + 47)*np.sin(np.sqrt(np.abs(X_decoded[:, 0]/2. + (X_decoded[:, 1] + 47))))
second_term = - X_decoded[:, 0]*np.sin(np.sqrt(np.abs(X_decoded[:, 0] - (X_decoded[:, 1] + 47))))
return np.array(list(zip(self.rank - (first_term + second_term), list(range(X_decoded.shape[0])))), dtype = [('fitness', float),('index', int)])
class BinaryRastrigin(_BaseBinaryProblem, BaseRastrigin):
def __init__(self, n_dim = 2, n_prec = 4):
super().__init__(99.99, (-5.12, 5.12), n_dim=n_dim, n_prec=n_prec)
BaseRastrigin.__init__(self)
class BinaryBeale(_BaseBinaryProblem, BaseBeale):
def __init__(self, n_prec = 4):
super().__init__(149999.99, (-4.5, 4.5), n_dim=2, n_prec=n_prec)
BaseBeale.__init__(self)
class BinaryHimmelblau(_BaseBinaryProblem, BaseHimmelblau):
def __init__(self, n_prec = 4):
super().__init__(2199.99, (-5., 5.), n_dim=2, n_prec=n_prec)
BaseHimmelblau.__init__(self)
class BinaryEggholder(_BaseBinaryProblem, BaseEggholder):
def __init__(self, n_prec = 4):
super().__init__(2157., (-512., 512.), n_dim=2, n_prec=n_prec)
BaseEggholder.__init__(self)
class RealRastrigin(_BaseRealProblem, BaseRastrigin):
def __init__(self, rang_param = .0001, n_dim = 2):
super().__init__(99.99, (-5.12, 5.12), rang_param, n_dim=n_dim)
BaseRastrigin.__init__(self)
class RealBeale(_BaseRealProblem, BaseBeale):
def __init__(self, rang_param = .0001):
super().__init__(149999.99, (-4.5, 4.5), rang_param, n_dim=2)
BaseBeale.__init__(self)
class RealHimmelblau(_BaseRealProblem, BaseHimmelblau):
def __init__(self, rang_param = .001):
super().__init__(2199.99, (-5., 5.), rang_param, n_dim=2)
BaseHimmelblau.__init__(self)
class RealEggholder(_BaseRealProblem, BaseEggholder):
def __init__(self, rang_param = .001):
super().__init__(2157., (-512., 512.), rang_param, n_dim=2)
BaseEggholder.__init__(self)
class RealRastriginPSO(_BaseRealProblem):
def __init__(self, n_dim = 2):
super().__init__(99.99, (-5.12, 5.12), n_dim=n_dim)
class RealBealePSO(_BaseRealProblem):
def __init__(self):
super().__init__(149999.99, (-4.5, 4.5), n_dim=2)
class RealHimmelblauPSO(_BaseRealProblem):
def __init__(self):
super().__init__(2199.99, (-5., 5.), n_dim=2)
class RealEggholderPSO(_BaseRealProblem):
def __init__(self):
super().__init__(2157., (-512., 512.), n_dim=2) |
py | 1a4b2c6136103c7b2cf7cc9c354dbe22de6da61a | import re
import subprocess
import pygit2
tag_ref = re.compile('^refs/tags/')
committer = pygit2.Signature('Git Worker', '[email protected]')
def git_show(path, commitish, obj):
repo = pygit2.Repository(path)
commit, _ = repo.resolve_refish(commitish)
data = (commit.tree / obj).read_raw().decode()
return data
def delete_tag(path, tag):
repo = pygit2.Repository(path)
repo.references.delete(f'refs/tags/{tag}')
def git_tag(repo):
return [repo.references[r] for r in repo.references if tag_ref.match(r)]
def git_commit(repo, file_paths, author=None, message="[OpenNeuro] Recorded changes", parents=None):
"""Commit array of paths at HEAD."""
# Refresh index with git-annex specific handling
annex_command = ["git-annex", "add"] + file_paths
subprocess.run(annex_command, check=True, cwd=repo.workdir)
repo.index.add_all(file_paths)
repo.index.write()
return git_commit_index(repo, author, message, parents)
def git_commit_index(repo, author=None, message="[OpenNeuro] Recorded changes", parents=None):
"""Commit any existing index changes."""
if not author:
author = committer
if parents is None:
parent_commits = [repo.head.target.hex]
else:
parent_commits = parents
tree = repo.index.write_tree()
commit = repo.create_commit(
'refs/heads/master', author, committer, message, tree, parent_commits)
repo.head.set_target(commit)
return commit
|
py | 1a4b2dd5ec4c14cc270b8fea09eb81c72805e907 | import pdb
import pickle
import pandas as pd
import os
import numpy as np
import sys
sys.path.insert(1,"../")
sys.path.insert(1,"../../")
sys.path.insert(1,"../../../")
from config_u import base
project_base_path = base
current_path = "scripts/cpmg/automated_metabolite_quantification/"
sys.path.insert(1, os.path.join(project_base_path, current_path))
from data_utils import split_to_kfold, spectrum2ppm, spectrum_peak_unit_quantification
# load fully quantified samples
datapath_base = os.path.join(project_base_path, "data/raw_data_cpmg/")
with open(os.path.join(datapath_base, "fully_quantified_samples_spectra"), "rb") as f:
c_spectra = pickle.load(f)
with open(os.path.join(datapath_base, "fully_quantified_samples_quantification"), "rb") as f:
c_quantification = pickle.load(f)
with open(os.path.join(project_base_path, "data/raw_data_cpmg/metabolite_names"), "rb") as f:
metabolite_names = pickle.load(f)
c_statistics = pd.read_pickle(os.path.join(datapath_base, "fully_quantified_samples_statistics"))
# find samples with invalid pathologic classification (i.e. "*")
index = c_statistics.index
condition = c_statistics["Pathologic Classification"] == "*"
invalid_pc_idx = index[condition].tolist()
statistics = c_statistics.iloc[invalid_pc_idx, :].reset_index(drop=True)
spectra = c_spectra[invalid_pc_idx, :]
quant = c_quantification[invalid_pc_idx, :]
# scale CPMG spectra with respect to reference Acetate and sample mass
mass = np.array(statistics["Mass"].tolist()).astype(float)
mass_factor = np.repeat(mass.reshape(-1,1), spectra.shape[1], axis=1)
normalized_spectra = np.divide(spectra, mass_factor)
scaled_spectra = normalized_spectra * spectrum_peak_unit_quantification
# calculate ppm spectra
ppm_spectra = spectrum2ppm(scaled_spectra)
# rename variables to be accessed from other scripts
fq_i_ppm_spectra = ppm_spectra
fq_i_spectra = scaled_spectra
fq_i_statistics = statistics
fq_i_quant = quant |
py | 1a4b2f7ee7520f9f4b2f79c841cc939c3d55b0ea | import os
from pywps import Process
from pywps import LiteralInput
from pywps import ComplexOutput
from pywps import FORMATS, Format
from pywps import configuration
from pywps.app.Common import Metadata
# from c4cds.regridder import Regridder, REGIONAL
from c4cds.subsetter import Subsetter
from c4cds.plotter import Plotter
from c4cds.search import Search
from c4cds.ncdump import ncdump
from c4cds import util
CORDEX_DOMAIN_MAP = {
'Egypt': 'AFR-44i',
'UK': 'EUR-44i',
'France': 'EUR-44i',
'Germany': 'EUR-44i',
}
class CordexSubsetter(Process):
def __init__(self):
inputs = [
LiteralInput('country', 'Country',
abstract='Choose a Country like UK.',
data_type='string',
allowed_values=['UK', 'France', 'Germany', 'Egypt'],
default='UK'),
LiteralInput('model', 'Model',
abstract='Choose a model like MOHC-HadRM3P.',
data_type='string',
allowed_values=['MOHC-HadRM3P'],
default='MOHC-HadRM3P'),
LiteralInput('experiment', 'Experiment',
abstract='Choose an experiment like evaluation.',
data_type='string',
allowed_values=['evaluation'],
default='evaluation'),
LiteralInput('variable', 'Variable',
abstract='Choose a variable like tas.',
data_type='string',
allowed_values=['tas', 'tasmax', 'tasmin'],
default='tas'),
LiteralInput('year', 'Match year', data_type='integer',
abstract='File should match this year.',
allowed_values=[1990, 2000, 2010],
default="1990"),
]
outputs = [
ComplexOutput('output', 'Subsetted Dataset',
abstract='Subsetted Dataset.',
as_reference=True,
supported_formats=[FORMATS.NETCDF]),
ComplexOutput('ncdump', 'Metadata',
abstract='ncdump of subsetted Dataset.',
as_reference=True,
supported_formats=[FORMATS.TEXT]),
ComplexOutput('preview', 'Preview',
abstract='Preview of subsetted Dataset.',
as_reference=True,
supported_formats=[Format('image/png')]),
]
super(CordexSubsetter, self).__init__(
self._handler,
identifier='cordex_subsetter',
version='1.0',
title='CORDEX Subsetter',
abstract='CORDEX Subsetter working on the Copernicus C3S CORDEX archive. '
'The selected CORDEX file is subsetted by the bounding-box of a Country '
'using the CDO "sellonlatbox" operator.',
metadata=[
Metadata('CP4CDS Portal', 'https://cp4cds.github.io/'),
Metadata('Documentation',
'https://c4cds-wps.readthedocs.io/en/latest/processes.html#cordex_subsetter',
role=util.WPS_ROLE_DOC),
Metadata('Media',
'https://c4cds-wps.readthedocs.io/en/latest/_static/media/cordex_subsetter_thumbnail.png',
role=util.WPS_ROLE_MEDIA),
],
inputs=inputs,
outputs=outputs,
store_supported=True,
status_supported=True
)
def _handler(self, request, response):
search = Search(configuration.get_config_value("data", "cordex_archive_root"))
nc_file = search.search_cordex(
model=request.inputs['model'][0].data,
experiment=request.inputs['experiment'][0].data,
variable=request.inputs['variable'][0].data,
domain=CORDEX_DOMAIN_MAP[request.inputs['country'][0].data],
start_year=request.inputs['year'][0].data,
end_year=request.inputs['year'][0].data,
)
if not nc_file:
raise Exception("Could not find CORDEX file.")
response.update_status('search done.', 10)
# regridding
# regridder = Regridder(
# archive_base=configuration.get_config_value("data", "cordex_archive_root"),
# output_dir=os.path.join(self.workdir, 'out_regrid')
# )
# regridded_file = regridder.regrid(input_file=nc_file, domain_type=REGIONAL)
# response.update_status('regridding done.', 60)
# subset by country
subsetter = Subsetter(
output_dir=os.path.join(self.workdir, 'out_subset')
)
subsetted_file = subsetter.subset_by_country(
nc_file,
country=request.inputs['country'][0].data)
response.outputs['output'].file = subsetted_file
response.update_status('subsetting done.', 70)
# plot preview
title = "{} {} {} {} {}".format(
request.inputs['country'][0].data,
request.inputs['model'][0].data,
request.inputs['experiment'][0].data,
request.inputs['variable'][0].data,
request.inputs['year'][0].data,
)
plotter = Plotter(
output_dir=os.path.join(self.workdir, 'out_plot')
)
preview_file = plotter.plot_preview(subsetted_file, title)
response.outputs['preview'].file = preview_file
response.update_status('plot done.', 80)
# run ncdump
with open(os.path.join(self.workdir, "nc_dump.txt"), 'w') as fp:
response.outputs['ncdump'].file = fp.name
fp.writelines(ncdump(subsetted_file))
response.update_status('ncdump done.', 90)
# done
response.update_status("done.", 100)
return response
|
py | 1a4b301c186cbbc8974ebc338325fbbbe1ba3a83 | import argparse
import logging
import time
import ast
from tf_pose import common
import cv2
import numpy as np
from tf_pose.estimator import TfPoseEstimator
from tf_pose.networks import get_graph_path, model_wh
from tf_pose.lifting.prob_model import Prob3dPose
from tf_pose.lifting.draw import plot_pose
logger = logging.getLogger('TfPoseEstimator')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='tf-pose-estimation run')
parser.add_argument('--image', type=str, default='./images/p1.jpg')
parser.add_argument('--model', type=str, default='cmu',
help='cmu / mobilenet_thin / mobilenet_v2_large / mobilenet_v2_small')
parser.add_argument('--resize', type=str, default='0x0',
help='if provided, resize images before they are processed. '
'default=0x0, Recommends : 432x368 or 656x368 or 1312x736 ')
parser.add_argument('--resize-out-ratio', type=float, default=4.0,
help='if provided, resize heatmaps before they are post-processed. default=1.0')
args = parser.parse_args()
w, h = model_wh(args.resize)
if w == 0 or h == 0:
e = TfPoseEstimator(get_graph_path(args.model), target_size=(432, 368))
else:
e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))
# estimate human poses from a single image !
image = common.read_imgfile(args.image, None, None)
# image = cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 7, 21)
if image is None:
logger.error('Image can not be read, path=%s' % args.image)
sys.exit(-1)
t = time.time()
humans = e.inference(image, resize_to_default=(w > 0 and h > 0), upsample_size=args.resize_out_ratio)
elapsed = time.time() - t
logger.info('inference image: %s in %.4f seconds.' % (args.image, elapsed))
image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
# cv2.imshow('tf-pose-estimation result', image)
# cv2.waitKey()
import matplotlib.pyplot as plt
fig = plt.figure()
a = fig.add_subplot(2, 2, 1)
a.set_title('Result')
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
bgimg = cv2.cvtColor(image.astype(np.uint8), cv2.COLOR_BGR2RGB)
bgimg = cv2.resize(bgimg, (e.heatMat.shape[1], e.heatMat.shape[0]), interpolation=cv2.INTER_AREA)
# show network output
a = fig.add_subplot(2, 2, 2)
plt.imshow(bgimg, alpha=0.5)
tmp = np.amax(e.heatMat[:, :, :-1], axis=2)
plt.imshow(tmp, cmap=plt.cm.gray, alpha=0.5)
plt.colorbar()
tmp2 = e.pafMat.transpose((2, 0, 1))
tmp2_odd = np.amax(np.absolute(tmp2[::2, :, :]), axis=0)
tmp2_even = np.amax(np.absolute(tmp2[1::2, :, :]), axis=0)
a = fig.add_subplot(2, 2, 3)
a.set_title('Vectormap-x')
# plt.imshow(CocoPose.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)
plt.imshow(tmp2_odd, cmap=plt.cm.gray, alpha=0.5)
plt.colorbar()
a = fig.add_subplot(2, 2, 4)
a.set_title('Vectormap-y')
# plt.imshow(CocoPose.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)
plt.imshow(tmp2_even, cmap=plt.cm.gray, alpha=0.5)
plt.colorbar()
plt.show()
#import sys
#actisys.exit(0)
logger.info('3d lifting initialization.')
poseLifting = Prob3dPose('./tf_pose/lifting/models/prob_model_params.mat')
image_h, image_w = image.shape[:2]
standard_w = 640
standard_h = 480
pose_2d_mpiis = []
visibilities = []
for human in humans:
pose_2d_mpii, visibility = common.MPIIPart.from_coco(human)
pose_2d_mpiis.append([(int(x * standard_w + 0.5), int(y * standard_h + 0.5)) for x, y in pose_2d_mpii])
visibilities.append(visibility)
pose_2d_mpiis = np.array(pose_2d_mpiis)
visibilities = np.array(visibilities)
transformed_pose2d, weights = poseLifting.transform_joints(pose_2d_mpiis, visibilities)
pose_3d = poseLifting.compute_3d(transformed_pose2d, weights)
for i, single_3d in enumerate(pose_3d):
plot_pose(single_3d)
plt.show()
pass |
py | 1a4b303f959f5337c93643da14f25d5434239866 | # Copyright 2015 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from neutron_lib import constants
def check_subnet_ip(cidr, ip_address, port_owner=''):
"""Validate that the IP address is on the subnet."""
ip = netaddr.IPAddress(ip_address)
net = netaddr.IPNetwork(cidr)
# Check that the IP is valid on subnet. In IPv4 this cannot be the
# network or the broadcast address
if net.version == constants.IP_VERSION_6:
# NOTE(njohnston): In some cases the code cannot know the owner of the
# port. In these cases port_owner should an empty string, and we pass
# it through here.
return ((port_owner in (constants.ROUTER_PORT_OWNERS + ('', )) or
ip != net.network) and
ip in net)
else:
return ip != net.network and ip != net.broadcast and ip in net
def check_gateway_invalid_in_subnet(cidr, gateway):
"""Check whether the gw IP address is invalid on the subnet."""
ip = netaddr.IPAddress(gateway)
net = netaddr.IPNetwork(cidr)
# Check whether the gw IP is in-valid on subnet.
# If gateway is in the subnet, it cannot be the
# 'network' or the 'broadcast address (only in IPv4)'.
# If gateway is out of subnet, there is no way to
# check since we don't have gateway's subnet cidr.
return (ip in net and
(net.version == constants.IP_VERSION_4 and
ip in (net.network, net[-1])))
def generate_pools(cidr, gateway_ip):
"""Create IP allocation pools for a specified subnet
The Neutron API defines a subnet's allocation pools as a list of
IPRange objects for defining the pool range.
"""
# Auto allocate the pool around gateway_ip
net = netaddr.IPNetwork(cidr)
ip_version = net.version
first = netaddr.IPAddress(net.first, ip_version)
last = netaddr.IPAddress(net.last, ip_version)
if first == last:
# handle single address subnet case
return [netaddr.IPRange(first, last)]
first_ip = first + 1
# last address is broadcast in v4
last_ip = last - (ip_version == 4)
if first_ip >= last_ip:
# /31 lands here
return []
ipset = netaddr.IPSet(netaddr.IPRange(first_ip, last_ip))
if gateway_ip:
ipset.remove(netaddr.IPAddress(gateway_ip, ip_version))
return list(ipset.iter_ipranges())
|
py | 1a4b3048bc3bd3dad34953a803474740b100745a | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from sedona.core.SpatialRDD import CircleRDD
from sedona.core.enums import GridType, IndexType
from sedona.core.formatMapper import WktReader
from sedona.core.spatialOperator.join_params import JoinParams
from sedona.core.spatialOperator.join_query_raw import JoinQueryRaw
from sedona.core.spatialOperator.range_query_raw import RangeQueryRaw
from tests.test_base import TestBase
import os
from tests.tools import tests_resource
from shapely.wkt import loads
bank_csv_path = os.path.join(tests_resource, "small/points.csv")
areas_csv_path = os.path.join(tests_resource, "small/areas.csv")
class TestOmitPythonJvmSerdeToRDD(TestBase):
expected_pois_within_areas_ids = [['4', '4'], ['1', '6'], ['2', '1'], ['3', '3'], ['3', '7']]
def test_spatial_join_to_spatial_rdd(self):
poi_point_rdd = WktReader.readToGeometryRDD(self.sc, bank_csv_path, 1, False, False)
areas_polygon_rdd = WktReader.readToGeometryRDD(self.sc, areas_csv_path, 1, False, False)
poi_point_rdd.analyze()
areas_polygon_rdd.analyze()
poi_point_rdd.spatialPartitioning(GridType.QUADTREE)
areas_polygon_rdd.spatialPartitioning(poi_point_rdd.getPartitioner())
jvm_sedona_rdd = JoinQueryRaw.spatialJoin(poi_point_rdd, areas_polygon_rdd, JoinParams())
sedona_rdd = jvm_sedona_rdd.to_rdd().collect()
assert sedona_rdd.__len__() == 5
def test_distance_join_query_flat_to_df(self):
poi_point_rdd = WktReader.readToGeometryRDD(self.sc, bank_csv_path, 1, False, False)
circle_rdd = CircleRDD(poi_point_rdd, 2.0)
circle_rdd.analyze()
poi_point_rdd.analyze()
poi_point_rdd.spatialPartitioning(GridType.QUADTREE)
circle_rdd.spatialPartitioning(poi_point_rdd.getPartitioner())
jvm_sedona_rdd = JoinQueryRaw.DistanceJoinQueryFlat(poi_point_rdd, circle_rdd, False, True)
assert jvm_sedona_rdd.to_rdd().collect().__len__() == 10
def test_spatial_join_query_flat_to_df(self):
poi_point_rdd = WktReader.readToGeometryRDD(self.sc, bank_csv_path, 1, False, False)
areas_polygon_rdd = WktReader.readToGeometryRDD(self.sc, areas_csv_path, 1, False, False)
poi_point_rdd.analyze()
areas_polygon_rdd.analyze()
poi_point_rdd.spatialPartitioning(GridType.QUADTREE)
areas_polygon_rdd.spatialPartitioning(poi_point_rdd.getPartitioner())
jvm_sedona_rdd = JoinQueryRaw.SpatialJoinQueryFlat(
poi_point_rdd, areas_polygon_rdd, False, True)
assert jvm_sedona_rdd.to_rdd().collect().__len__() == 5
def test_range_query_flat_to_df(self):
poi_point_rdd = WktReader.readToGeometryRDD(self.sc, bank_csv_path, 1, False, False)
poi_point_rdd.analyze()
poi_point_rdd.spatialPartitioning(GridType.QUADTREE)
poi_point_rdd.buildIndex(IndexType.QUADTREE, False)
result = RangeQueryRaw.SpatialRangeQuery(
poi_point_rdd, loads("POLYGON((0 0, 0 20, 20 20, 20 0, 0 0))"), True, True
)
rdd = result.to_rdd()
assert rdd.collect().__len__() == 4
|
py | 1a4b309f80b4d78887bda8de3f1527ae055ac5f6 | import torch
import torch.nn as nn
import sys
sys.path.insert(0, '../../../../..')
import libs_layers
class Model(torch.nn.Module):
def __init__(self, input_shape, outputs_count, hidden_count = 512):
super(Model, self).__init__()
self.device = "cpu"
self.layers = [
nn.Linear(input_shape[0], hidden_count),
nn.ReLU(),
libs_layers.NoisyLinearFull(hidden_count, hidden_count//2),
nn.ReLU(),
libs_layers.NoisyLinearFull(hidden_count//2, outputs_count),
nn.Tanh()
]
torch.nn.init.xavier_uniform_(self.layers[0].weight)
torch.nn.init.xavier_uniform_(self.layers[2].weight)
torch.nn.init.uniform_(self.layers[4].weight, -0.3, 0.3)
self.model = nn.Sequential(*self.layers)
self.model.to(self.device)
print("model_actor")
print(self.model)
print("\n\n")
def forward(self, state):
return self.model(state)
def save(self, path):
torch.save(self.model.state_dict(), path + "trained/model_actor.pt")
def load(self, path):
self.model.load_state_dict(torch.load(path + "trained/model_actor.pt", map_location = self.device))
self.model.eval()
|
py | 1a4b31f1738cd8decc9061e9050ddaf9c3c0d91a | from labels import LabelsPlugin
from electrum.plugins import hook
class Plugin(LabelsPlugin):
@hook
def load_wallet(self, wallet, window):
self.window = window
self.start_wallet(wallet)
def on_pulled(self, wallet):
self.print_error('on pulled')
self.window._trigger_update_history()
|
py | 1a4b320e34bbb170d966eb85ee277e53716f5f5b | # -*- coding: utf-8 -*-
#
# Copyright © 2009-2010 CEA
# Pierre Raybaut
# Licensed under the terms of the CECILL License
# (see guidata/__init__.py for details)
"""
All guidata DataItem objects demo
A DataSet object is a set of parameters of various types (integer, float,
boolean, string, etc.) which may be edited in a dialog box thanks to the
'edit' method. Parameters are defined by assigning DataItem objects to a
DataSet class definition: each parameter type has its own DataItem class
(IntItem for integers, FloatItem for floats, StringItem for strings, etc.)
"""
from __future__ import print_function
SHOW = True # Show test in GUI-based test launcher
import tempfile, atexit, shutil, datetime, numpy as np
from guidata.dataset.datatypes import DataSet, BeginGroup, EndGroup
from guidata.dataset.dataitems import (FloatItem, IntItem, BoolItem, ChoiceItem,
MultipleChoiceItem, ImageChoiceItem, FilesOpenItem,
StringItem, TextItem, ColorItem, FileSaveItem,
FileOpenItem, DirectoryItem, FloatArrayItem,
DateItem, DateTimeItem)
# Creating temporary files and registering cleanup functions
TEMPDIR = tempfile.mkdtemp(prefix="test_")
atexit.register(shutil.rmtree, TEMPDIR)
FILE_ETA = tempfile.NamedTemporaryFile(suffix=".eta", dir=TEMPDIR)
atexit.register(FILE_ETA.close)
FILE_CSV = tempfile.NamedTemporaryFile(suffix=".csv", dir=TEMPDIR)
atexit.register(FILE_CSV.close)
class TestParameters(DataSet):
"""
DataSet test
The following text is the DataSet 'comment': <br>Plain text or
<b>rich text<sup>2</sup></b> are both supported,
as well as special characters (α, β, γ, δ, ...)
"""
dir = DirectoryItem("Directory", TEMPDIR)
fname = FileOpenItem("Open file", ("csv", "eta"), FILE_CSV.name)
fnames = FilesOpenItem("Open files", "csv", FILE_CSV.name)
fname_s = FileSaveItem("Save file", "eta", FILE_ETA.name)
string = StringItem("String")
text = TextItem("Text")
float_slider = FloatItem("Float (with slider)",
default=0.5, min=0, max=1, step=0.01, slider=True)
integer = IntItem("Integer", default=5, min=3, max=16, slider=True
).set_pos(col=1)
dtime = DateTimeItem("Date/time", default=datetime.datetime(2010, 10, 10))
date = DateItem("Date", default=datetime.date(2010, 10, 10)).set_pos(col=1)
bool1 = BoolItem("Boolean option without label")
bool2 = BoolItem("Boolean option with label", "Label")
_bg = BeginGroup("A sub group")
color = ColorItem("Color", default="red")
choice = ChoiceItem("Single choice 1",
[('16', "first choice"), ('32', "second choice"),
('64', "third choice")])
mchoice2 = ImageChoiceItem("Single choice 2",
[("rect", "first choice", "gif.png" ),
("ell", "second choice", "txt.png" ),
("qcq", "third choice", "file.png" )]
)
_eg = EndGroup("A sub group")
floatarray = FloatArrayItem("Float array", default=np.ones( (50, 5), float),
format=" %.2e ").set_pos(col=1)
mchoice3 = MultipleChoiceItem("MC type 1",
[ str(i) for i in range(12)]
).horizontal(4)
mchoice1 = MultipleChoiceItem("MC type 2",
["first choice", "second choice",
"third choice"]).vertical(1).set_pos(col=1)
if __name__ == "__main__":
# Create QApplication
import guidata
_app = guidata.qapplication()
e = TestParameters()
e.floatarray[:, 0] = np.linspace( -5, 5, 50)
print(e)
if e.edit():
print(e)
e.view() |
py | 1a4b3288f13e95b037d8ec4988f07395f199a3ba | from django.contrib import admin
from django.urls import path, include
from .views import *
urlpatterns = [
path('staff/all', StaffListView.as_view()),
path('staff/<int:pk>', StaffRetrieveView.as_view()),
path('staff/update/<int:pk>', StaffUpdateView.as_view()),
path('staff/new', StaffCreateView.as_view()),
path('staff/delete/<int:pk>', StaffRetrieveView.as_view()),
path('room/all', RoomListView.as_view()),
path('room/<int:pk>', RoomRetrieveView.as_view()),
path('room/update/<int:pk>', RoomUpdateView.as_view()),
path('room/new', RoomCreateView.as_view()),
path('room/delete/<int:pk>', RoomRetrieveView.as_view()),
path('guest/all', GuestListView.as_view()),
path('guest/<int:pk>', GuestRetrieveView.as_view()),
path('guest/update/<int:pk>', GuestUpdateView.as_view()),
path('guest/new', GuestCreateView.as_view()),
path('guest/delete/<int:pk>', GuestRetrieveView.as_view()),
path('schedule/all', ScheduleListView.as_view()),
path('schedule/<int:pk>', ScheduleRetrieveView.as_view()),
path('schedule/update/<int:pk>', ScheduleUpdateView.as_view()),
path('schedule/new', ScheduleCreateView.as_view()),
path('schedule/delete/<int:pk>', ScheduleRetrieveView.as_view()),
] |
py | 1a4b32ba5ec74a9c126bbe4a07517ac28c48573d | import logging
import os
from quasimodo.parts_of_facts import PartsOfFacts
from quasimodo.data_structures.submodule_interface import SubmoduleInterface
from quasimodo.assertion_fusion.trainer import Trainer
from quasimodo.parameters_reader import ParametersReader
save_weights = True
parameters_reader = ParametersReader()
annotations_file = parameters_reader.get_parameter("annotations-file") or "data/training_active_learning.tsv"
save_file = parameters_reader.get_parameter("weights-file") or os.path.dirname(__file__) + "/../temp/weights.tsv"
def _save_weights(parts_of_facts):
annotations = get_annotated_data()
header = parts_of_facts.get_header()
header.append("label")
save = ["\t".join(header)]
for fact in parts_of_facts.get_all_facts():
row = parts_of_facts.get_fact_row(fact)
row.append(annotations.get((fact.get_subject().get(),
fact.get_predicate().get(),
fact.get_object().get(),
str(int(fact.is_negative()))),
-1))
row = [str(x) for x in row]
save.append("\t".join(row))
with open(save_file, "w") as f:
for element in save:
f.write(element + "\n")
class LinearCombinationWeightedSubmodule(SubmoduleInterface):
def __init__(self, module_reference):
super().__init__()
self._module_reference = module_reference
self._name = "Linear Combination Per Module Submodule"
def process(self, input_interface):
logging.info("Start linear combining per module submodule")
logging.info("Grouping facts")
parts_of_facts = PartsOfFacts.from_generated_facts(input_interface.get_generated_facts())
if save_weights:
logging.info("Saving weights facts")
_save_weights(parts_of_facts)
logging.info("Training the model...")
trainer = Trainer(save_file)
trainer.train()
logging.info("Generating new facts")
new_generated_facts = []
for fact in parts_of_facts.get_all_facts():
new_generated_facts.append(parts_of_facts.get_generated_fact_with_score_from_classifier(fact, trainer))
new_generated_facts = sorted(new_generated_facts,
key=lambda x: -sum([score[0] for score in x.get_score().scores]))
return input_interface.replace_generated_facts(new_generated_facts)
def get_annotated_data():
annotations = dict()
with open(annotations_file) as f:
for line in f:
line = line.strip().split("\t")
annotations[(line[0], line[1], line[2], line[3])] = line[4]
return annotations
|
py | 1a4b3330fd6bb82e01a1c00233cf85dfc2ccfcb1 | import sys, os
import numpy as np
import time
import gym
import tensorflow as tf
from spinup.utils.logx import EpochLogger
from common_utils import *
from core import *
# configure gpu use and supress tensorflow warnings
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
tf_config = tf.compat.v1.ConfigProto(gpu_options=gpu_options)
tf_config.gpu_options.allow_growth = True
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
"""
Soft Actor-Critic
(With slight variations that bring it closer to TD3)
"""
def sac(env_fn, logger_kwargs=dict(), network_params=dict(), rl_params=dict()):
# env params
thresh = rl_params['thresh']
# control params
seed = rl_params['seed']
epochs = rl_params['epochs']
steps_per_epoch = rl_params['steps_per_epoch']
replay_size = rl_params['replay_size']
batch_size = rl_params['batch_size']
start_steps = rl_params['start_steps']
max_ep_len = rl_params['max_ep_len']
save_freq = rl_params['save_freq']
render = rl_params['render']
# rl params
gamma = rl_params['gamma']
polyak = rl_params['polyak']
lr = rl_params['lr']
grad_clip_val = rl_params['grad_clip_val']
# entropy params
alpha = rl_params['alpha']
target_entropy = rl_params['target_entropy']
logger = EpochLogger(**logger_kwargs)
if save_freq is not None:
logger.save_config(locals())
train_env, test_env = env_fn(), env_fn()
obs = train_env.observation_space
act = train_env.action_space
tf.set_random_seed(seed)
np.random.seed(seed)
train_env.seed(seed)
train_env.action_space.np_random.seed(seed)
test_env.seed(seed)
test_env.action_space.np_random.seed(seed)
# get the size after resize
obs_dim = network_params['input_dims']
act_dim = act.shape[0]
# init a state buffer for storing last m states
train_state_buffer = StateBuffer(m=obs_dim[2])
test_state_buffer = StateBuffer(m=obs_dim[2])
# Experience buffer
replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size)
# Inputs to computation graph
x_ph, a_ph, x2_ph, r_ph, d_ph = placeholders(obs_dim, act_dim, obs_dim, None, None)
# Main outputs from computation graph
with tf.variable_scope('main'):
mu, pi, logp_pi, q1_a, q2_a = build_models(x_ph, a_ph, act, act_dim, network_params)
with tf.variable_scope('main', reuse=True):
# compose q with pi, for pi-learning
_, _, _, q1_pi, q2_pi = build_models(x_ph, pi, act, act_dim, network_params)
# get actions and log probs of actions for next states, for Q-learning
_, pi_next, logp_pi_next, _, _ = build_models(x2_ph, a_ph, act, act_dim, network_params)
# Target value network
with tf.variable_scope('target'):
_, _, _, q1_pi_targ, q2_pi_targ = build_models(x2_ph, pi_next, act, act_dim, network_params)
# alpha Params
if target_entropy == 'auto':
target_entropy = tf.cast(-act_dim, tf.float32)
else:
target_entropy = tf.cast(target_entropy, tf.float32)
log_alpha = tf.get_variable('log_alpha', dtype=tf.float32, initializer=0.0)
if alpha == 'auto': # auto tune alpha
alpha = tf.exp(log_alpha)
else: # fixed alpha
alpha = tf.get_variable('alpha', dtype=tf.float32, initializer=alpha)
# Count variables
var_counts = tuple(count_vars(scope) for scope in ['log_alpha',
'main/pi',
'main/q1',
'main/q2',
'main'])
print("""\nNumber of other parameters:
alpha: %d,
pi: %d,
q1: %d,
q2: %d,
total: %d\n"""%var_counts)
# Min Double-Q:
min_q_pi = tf.minimum(q1_pi, q2_pi)
min_q_pi_targ = tf.minimum(q1_pi_targ, q2_pi_targ)
# Targets for Q and V regression
q_backup = tf.stop_gradient(r_ph + gamma*(1-d_ph)*(min_q_pi_targ - alpha*logp_pi_next))
# critic losses
q1_loss = 0.5 * tf.reduce_mean((q_backup - q1_a)**2)
q2_loss = 0.5 * tf.reduce_mean((q_backup - q2_a)**2)
value_loss = q1_loss + q2_loss
# Soft actor losses
pi_loss = tf.reduce_mean(alpha * logp_pi - min_q_pi)
# alpha loss for temperature parameter
alpha_backup = tf.stop_gradient(logp_pi + target_entropy)
alpha_loss = -tf.reduce_mean(log_alpha * alpha_backup)
# Policy train op
pi_optimizer = tf.train.AdamOptimizer(learning_rate=lr, epsilon=1e-04)
if grad_clip_val is not None:
gvs = pi_optimizer.compute_gradients(pi_loss, var_list=get_vars('main/pi'))
capped_gvs = [(ClipIfNotNone(grad, grad_clip_val), var) for grad, var in gvs]
train_pi_op = pi_optimizer.apply_gradients(capped_gvs)
else:
train_pi_op = pi_optimizer.minimize(pi_loss, var_list=get_vars('main/pi'))
# Value train op
value_optimizer = tf.train.AdamOptimizer(learning_rate=lr, epsilon=1e-04)
with tf.control_dependencies([train_pi_op]):
if grad_clip_val is not None:
gvs = value_optimizer.compute_gradients(value_loss, var_list=get_vars('main/q'))
capped_gvs = [(ClipIfNotNone(grad, grad_clip_val), var) for grad, var in gvs]
train_value_op = value_optimizer.apply_gradients(capped_gvs)
else:
train_value_op = value_optimizer.minimize(value_loss, var_list=get_vars('main/q'))
alpha_optimizer = tf.train.AdamOptimizer(learning_rate=lr, epsilon=1e-04)
with tf.control_dependencies([train_value_op]):
train_alpha_op = alpha_optimizer.minimize(alpha_loss, var_list=get_vars('log_alpha'))
# Polyak averaging for target variables
# (control flow because sess.run otherwise evaluates in nondeterministic order)
with tf.control_dependencies([train_value_op]):
target_update = tf.group([tf.assign(v_targ, polyak*v_targ + (1-polyak)*v_main)
for v_main, v_targ in zip(get_vars('main'), get_vars('target'))])
# All ops to call during one training step
step_ops = [pi_loss, q1_loss, q2_loss, q1_a, q2_a, logp_pi, target_entropy, alpha_loss, alpha,
train_pi_op, train_value_op, train_alpha_op, target_update]
# Initializing targets to match main variables
target_init = tf.group([tf.assign(v_targ, v_main)
for v_main, v_targ in zip(get_vars('main'), get_vars('target'))])
sess = tf.Session(config=tf_config)
sess.run(tf.global_variables_initializer())
sess.run(target_init)
# Setup model saving
if save_freq is not None:
logger.setup_tf_saver(sess, inputs={'x_ph': x_ph, 'a_ph': a_ph},
outputs={'mu': mu, 'pi': pi, 'q1_a': q1_a, 'q2_a': q2_a})
def get_action(state, deterministic=False):
state = state.astype('float32') / 255.
act_op = mu if deterministic else pi
return sess.run(act_op, feed_dict={x_ph: [state]})[0]
def reset(env, state_buffer):
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
o = process_image_observation(o, obs_dim, thresh)
state = state_buffer.init_state(init_obs=o)
return o, r, d, ep_ret, ep_len, state
def test_agent(n=10, render=True):
for j in range(n):
o, r, d, ep_ret, ep_len, test_state = reset(test_env, test_state_buffer)
if render: test_env.render()
while not(d or (ep_len == max_ep_len)):
# Take deterministic actions at test time
o, r, d, _ = test_env.step(get_action(test_state, True))
o = process_image_observation(o, obs_dim, thresh)
test_state = test_state_buffer.append_state(o)
ep_ret += r
ep_len += 1
if render: test_env.render()
if render: test_env.close()
logger.store(TestEpRet=ep_ret, TestEpLen=ep_len)
start_time = time.time()
o, r, d, ep_ret, ep_len, state = reset(train_env, train_state_buffer)
total_steps = steps_per_epoch * epochs
save_iter = 0
# Main loop: collect experience in env and update/log each epoch
for t in range(total_steps):
"""
Until start_steps have elapsed, randomly sample actions
from a uniform distribution for better exploration. Afterwards,
use the learned policy.
"""
if t > start_steps:
a = get_action(state)
else:
a = train_env.action_space.sample()
# Step the env
o2, r, d, _ = train_env.step(a)
o2 = process_image_observation(o2, obs_dim, thresh)
next_state = train_state_buffer.append_state(o2)
ep_ret += r
ep_len += 1
# Ignore the "done" signal if it comes from hitting the time
# horizon (that is, when it's an artificial terminal signal
# that isn't based on the agent's state)
d = False if ep_len==max_ep_len else d
# Store experience to replay buffer
replay_buffer.store(state, a, r, next_state, d)
# Super critical, easy to overlook step: make sure to update
# most recent observation!
o = o2
state = next_state
if d or (ep_len == max_ep_len):
"""
Perform all SAC updates at the end of the trajectory.
This is a slight difference from the SAC specified in the
original paper.
"""
for j in range(ep_len):
batch = replay_buffer.sample_batch(batch_size)
feed_dict = {x_ph: batch['obs1'],
x2_ph: batch['obs2'],
a_ph: batch['acts'],
r_ph: batch['rews'],
d_ph: batch['done'],
}
outs = sess.run(step_ops, feed_dict)
logger.store(LossPi=outs[0], LossQ1=outs[1], LossQ2=outs[2],
Q1Vals=outs[3], Q2Vals=outs[4], LogPi=outs[5], TargEntropy=outs[6],
LossAlpha=outs[7], Alpha=outs[8])
logger.store(EpRet=ep_ret, EpLen=ep_len)
o, r, d, ep_ret, ep_len, state = reset(train_env, train_state_buffer)
# End of epoch wrap-up
if t > 0 and t % steps_per_epoch == 0:
epoch = t // steps_per_epoch
# Save model
if save_freq is not None:
if (epoch % save_freq == 0) or (epoch == epochs-1):
logger.save_state({'env': train_env}, itr=save_iter)
save_iter+=1
# Test the performance of the deterministic version of the agent.
test_agent(n=2, render=render)
# Log info about epoch
logger.log_tabular('Epoch', epoch)
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('TestEpRet', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
logger.log_tabular('TestEpLen', average_only=True)
logger.log_tabular('TotalEnvInteracts', t)
logger.log_tabular('Q1Vals', with_min_and_max=True)
logger.log_tabular('Q2Vals', with_min_and_max=True)
logger.log_tabular('LogPi', average_only=True)
logger.log_tabular('TargEntropy', average_only=True)
logger.log_tabular('Alpha', average_only=True)
logger.log_tabular('LossPi', average_only=True)
logger.log_tabular('LossQ1', average_only=True)
logger.log_tabular('LossQ2', average_only=True)
logger.log_tabular('LossAlpha', average_only=True)
logger.log_tabular('Time', time.time()-start_time)
logger.dump_tabular()
if __name__ == '__main__':
from spinup.utils.run_utils import setup_logger_kwargs
network_params = {
'input_dims':[96,96,4],
'conv_filters':(16, 32),
'kernel_width':(8,4),
'strides':(4,2),
'pooling':'none',
'pooling_width':2,
'pooling_strides':1,
'dense_units':(512,),
'hidden_activation':'relu',
'output_activation':'linear',
'batch_norm':False,
'dropout':0.0
}
rl_params = {
# env params
'env_name':'CarRacing-v0',
'thresh':False,
# control params
'seed':int(0),
'epochs':int(50),
'steps_per_epoch':5000,
'replay_size':int(1e5),
'batch_size':64,
'start_steps':4000,
'max_ep_len':1000,
'save_freq':5,
'render':True,
# rl params
'gamma':0.99,
'polyak':0.995,
'lr':0.001,
'grad_clip_val':None,
# entropy params
'alpha': 'auto', # fixed or auto balance
'target_entropy':'auto', # fixed or auto define with act_dim
}
saved_model_dir = '../../saved_models'
logger_kwargs = setup_logger_kwargs(exp_name='sac_cont_image_' + rl_params['env_name'], seed=rl_params['seed'], data_dir=saved_model_dir, datestamp=False)
env = gym.make(rl_params['env_name'])
sac(lambda:env, logger_kwargs=logger_kwargs,
network_params=network_params, rl_params=rl_params)
|
py | 1a4b335787454d91fe1f98cb839c89a2cc78594e | #!/usr/bin/env python
"""
_Template_
Template class for all Step Template implementations to inherit and implement
the API
"""
import os
from WMCore.WMSpec.WMStep import WMStepHelper
from WMCore.WMSpec.ConfigSectionTree import nodeName
class CoreHelper(WMStepHelper):
"""
_CoreHelper_
Helper API for core settings
"""
def stepName(self):
"""
_stepName_
Get the name of the step
"""
return nodeName(self.data)
def addEnvironmentVariable(self, varname, setting):
"""
_addEnvironmentVariable_
add a key = value style setting to the environment for this
step
"""
setattr(self.data.environment.variables, varname, setting)
return
def addEnvironmentPath(self, pathname, setting):
"""
_addEnvironmentPath_
add a key = value1:value2:value3 environment setting to this step
"""
if getattr(self.data.environment.paths, pathname, None) == None:
setattr(self.data.environment.paths, pathname, [])
pathentry = getattr(self.data.environment.paths, pathname)
pathentry.append(setting)
return
def environment(self):
"""
_environment_
Get the environment settings for this step
"""
return self.data.environment
def addDirectory(self, dirName):
"""
_addDirectory_
Add a subdirectory structure to the template that will be built by
the builder
"""
split = dirName.split("/")
split = [ x for x in split if x.strip() != "" ]
dirs = getattr(self.data.build.directories, self.stepName())
for subdir in split:
exists = getattr(dirs, subdir, None)
if exists == None:
dirs.section_(subdir)
dirs = getattr(dirs, subdir)
return dirs
def addFile(self, fileName, newLocation = None):
"""
_addFile_
Add a file to the job at build time. This file must be
a local filesystem file available at fileName.
An optional location within the step can be specified which
may include a path structure that gets translated into calls
to addDirectory
"""
dirs = getattr(self.data.build.directories, self.stepName())
if newLocation != None:
filename = os.path.basename(newLocation)
dirname = os.path.dirname(newLocation)
dirs = self.addDirectory(dirname)
setattr(dirs, filename, { "Source" : fileName, "Target" : filename})
else:
filename = os.path.basename(fileName)
setattr(dirs, filename, {"Target" : filename, "Source" : fileName })
return
def directoryStructure(self):
"""
_directoryStructure_
Util to retrieve the directory structure
"""
return self.data.build.directories
class Template:
"""
_Template_
Base interface definition for any WMStep Template
"""
def __init__(self):
pass
def __call__(self, step):
"""
_operator(step)_
Install the template on the step instance provided
"""
self.coreInstall(step)
self.install(step)
def coreInstall(self, step):
"""
_coreInstall_
Install attributes common to all steps
"""
# Environment settings to pass to the step
step.section_("environment")
step.environment.section_("variables")
step.environment.section_("paths")
# Directory structure and files to be included in the job
# beyond those that would be added by a Step Specific builder
# Step Specific subclasses can simply append to these to get files
# and dirs into the job
step.section_("build")
step.build.section_("directories")
step.build.directories.section_(nodeName(step))
def install(self, step):
"""
_install_
Override this method to install the required attributes
in the step Instance provided
"""
msg = "WMSpec.Steps.Template.install method not overridden in "
msg += "implementation: %s\n" % self.__class__.__name__
raise NotImplementedError(msg)
def helper(self, step):
"""
_helper_
Wrap the step instance in a helper class tailored to this particular
step type
"""
msg = "WMSpec.Steps.Template.helper method not overridden in "
msg += "implementation: %s\n" % self.__class__.__name__
raise NotImplementedError(msg)
|
py | 1a4b340280d823bd2a33bfac0889edfce12298de | from typing import List
from cynergy import container
class Example(object):
pass
class Example1(object):
pass
class Example2(object):
pass
class Example3(object):
pass
class Example4(object):
pass
class Main(object):
def __init__(self, examples: List[Example], examples1: List[Example1]):
self.examples1 = examples1
self.examples = examples
class Main2(object):
def __init__(self, examples: List[Example]):
self.examples = examples
def test_register_multiple():
container.register_many(Example, [Example1, Example2])
instance = container.get(List[Example])
assert type(instance) is list
assert len(instance) == 2
assert type(instance[0]) is Example1
assert type(instance[1]) is Example2
def test_multiple_list_arguments():
container._clear_all()
container.register_many(Example, [Example2, Example3])
container.register_many(Example1, [Example3, Example4])
instance = container.get(Main)
assert type(instance) is Main
assert len(instance.examples) == 2
assert len(instance.examples1) == 2
assert type(instance.examples[0]) is Example2
assert type(instance.examples[1]) is Example3
assert type(instance.examples1[0]) is Example3
assert type(instance.examples1[1]) is Example4
class MainWrapper(object):
def __init__(self, main: Main):
self.main = main
def test_multiple_list_arguments_with_wrap():
container._clear_all()
container.register_many(Example, [Example2, Example3])
container.register_many(Example1, [Example3, Example4])
instance = container.get(MainWrapper)
assert type(instance) is MainWrapper
assert len(instance.main.examples) == 2
assert len(instance.main.examples1) == 2
assert type(instance.main.examples[0]) is Example2
assert type(instance.main.examples[1]) is Example3
assert type(instance.main.examples1[0]) is Example3
assert type(instance.main.examples1[1]) is Example4
def test_register_multiple_when_onc_instance_is_already_registered():
container._clear_all()
ex1 = Example2()
container.register(Example1, ex1)
container.register_many(Example, [Example1, Example3])
instance = container.get(Main2)
assert type(instance) is Main2
assert len(instance.examples) == 2
assert instance.examples[0] == ex1
assert type(instance.examples[1]) is Example3
|
py | 1a4b34894044272ceb52139557e8efab3c9b2aa9 | # coding: utf-8
"""
Feedback Submissions
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from hubspot.crm.objects.feedback_submissions.api_client import ApiClient
from hubspot.crm.objects.feedback_submissions.exceptions import ApiTypeError, ApiValueError # noqa: F401
class AssociationsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_all(self, feedback_submission_id, to_object_type, **kwargs): # noqa: E501
"""List associations of a feedback submission by type # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all(feedback_submission_id, to_object_type, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str feedback_submission_id: (required)
:param str to_object_type: (required)
:param str after: The paging cursor token of the last successfully read resource will be returned as the `paging.next.after` JSON property of a paged response containing more results.
:param int limit: The maximum number of results to display per page.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: CollectionResponseAssociatedIdForwardPaging
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.get_all_with_http_info(feedback_submission_id, to_object_type, **kwargs) # noqa: E501
def get_all_with_http_info(self, feedback_submission_id, to_object_type, **kwargs): # noqa: E501
"""List associations of a feedback submission by type # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_with_http_info(feedback_submission_id, to_object_type, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str feedback_submission_id: (required)
:param str to_object_type: (required)
:param str after: The paging cursor token of the last successfully read resource will be returned as the `paging.next.after` JSON property of a paged response containing more results.
:param int limit: The maximum number of results to display per page.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(CollectionResponseAssociatedIdForwardPaging, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["feedback_submission_id", "to_object_type", "after", "limit"]
all_params.extend(["async_req", "_return_http_data_only", "_preload_content", "_request_timeout"])
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method get_all" % key)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'feedback_submission_id' is set
if self.api_client.client_side_validation and ("feedback_submission_id" not in local_var_params or local_var_params["feedback_submission_id"] is None): # noqa: E501 # noqa: E501
raise ApiValueError("Missing the required parameter `feedback_submission_id` when calling `get_all`") # noqa: E501
# verify the required parameter 'to_object_type' is set
if self.api_client.client_side_validation and ("to_object_type" not in local_var_params or local_var_params["to_object_type"] is None): # noqa: E501 # noqa: E501
raise ApiValueError("Missing the required parameter `to_object_type` when calling `get_all`") # noqa: E501
collection_formats = {}
path_params = {}
if "feedback_submission_id" in local_var_params:
path_params["feedbackSubmissionId"] = local_var_params["feedback_submission_id"] # noqa: E501
if "to_object_type" in local_var_params:
path_params["toObjectType"] = local_var_params["to_object_type"] # noqa: E501
query_params = []
if "after" in local_var_params and local_var_params["after"] is not None: # noqa: E501
query_params.append(("after", local_var_params["after"])) # noqa: E501
if "limit" in local_var_params and local_var_params["limit"] is not None: # noqa: E501
query_params.append(("limit", local_var_params["limit"])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(["application/json", "*/*"]) # noqa: E501
# Authentication setting
auth_settings = ["hapikey"] # noqa: E501
return self.api_client.call_api(
"/crm/v3/objects/feedback_submissions/{feedbackSubmissionId}/associations/{toObjectType}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="CollectionResponseAssociatedIdForwardPaging", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get("_return_http_data_only"), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
|
py | 1a4b35060fe1a85d1ab99897188727bd4c7c7d46 | """
Usage:
# From tensorflow/models/
# Create train data:
python generate_tfrecord.py --csv_input=data/train_labels.csv --output_path=data/train.record
# Create test data:
python generate_tfrecord.py --csv_input=data/test_labels.csv --output_path=data/test.record
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import io
import pandas as pd
import tensorflow as tf
from PIL import Image
from object_detection.utils import dataset_util
from collections import namedtuple, OrderedDict
flags = tf.app.flags
flags.DEFINE_string('csv_input', '', 'Path to the CSV input')
flags.DEFINE_string('output_path', '', 'Path to output TFRecord')
FLAGS = flags.FLAGS
# TO-DO replace this with label map
def class_text_to_int(row_label):
if row_label == 'ship':
return 1
else:
None
def split(df, group):
data = namedtuple('data', ['filename', 'object'])
gb = df.groupby(group)
return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]
def create_tf_example(group, path):
with tf.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = Image.open(encoded_jpg_io)
width, height = image.size
filename = group.filename.encode('utf8')
image_format = b'jpg'
xmins = []
xmaxs = []
ymins = []
ymaxs = []
classes_text = []
classes = []
for index, row in group.object.iterrows():
xmins.append(row['xmin'] / width)
xmaxs.append(row['xmax'] / width)
ymins.append(row['ymin'] / height)
ymaxs.append(row['ymax'] / height)
classes_text.append(row['class'].encode('utf8'))
classes.append(class_text_to_int(row['class']))
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(filename),
'image/source_id': dataset_util.bytes_feature(filename),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature(image_format),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
}))
return tf_example
def main(_):
writer = tf.python_io.TFRecordWriter(FLAGS.output_path)
path = os.path.join(os.getcwd(), 'images')
examples = pd.read_csv(FLAGS.csv_input)
grouped = split(examples, 'filename')
for group in grouped:
tf_example = create_tf_example(group, path)
writer.write(tf_example.SerializeToString())
writer.close()
output_path = os.path.join(os.getcwd(), FLAGS.output_path)
print('Successfully created the TFRecords: {}'.format(output_path))
if __name__ == '__main__':
tf.app.run()
|
py | 1a4b3560585e9045ec536b70de51ea7d38b4491b | import enum
from ipaddress import IPv4Address
import yaml
from CybORG import CybORG
from CybORG.Emulator.AWS import AWSConfig
def enum_representer(dumper, data):
return dumper.represent_scalar(u'tag:yaml.org,2002:str', f'{str(data.name)}')
def ipv4_representer(dumper, data):
return dumper.represent_scalar(u'tag:yaml.org,2002:str', f'{str(data)}')
yaml.add_multi_representer(enum.Enum, enum_representer)
yaml.add_representer(IPv4Address, ipv4_representer)
scenario = '/home/max/PycharmProjects/Autonomous-Cyber-Ops/CybORG/Shared/Scenarios/SingleHostScenario.yaml'
image = "Velociraptor_Server"
sm = {'Hosts': {'Test_Host': {'image': image}}}
cyborg = CybORG(scenario, environment='aws', env_config={
"config": AWSConfig.load_and_setup_logger(test=True),
"create_tunnel": False
})
#This checks to see that the data given has all the required information and prints
#that the state is true as it dumps the data into the outfile.
try:
info_required = {'Test_Host': {'User_info': 'All',
'System_info': 'All',
'Processes': 'All',
'Files': ['/root', '/bin', '/sbin', '/etc', '/home', '/usr/sbin/', '/usr/bin/']}}
true_state = cyborg.get_true_state(info_required)
true_state.data.pop('success')
assert true_state.data != {}
for key, data in true_state.data.items():
if "Interface" in data:
data.pop("Interface")
if 'Processes' in data:
for proc in data['Processes']:
if 'Known Process' in proc:
proc.pop('Known Process')
if 'Known Path' in proc:
proc.pop('Known Path')
if 'System info' in data and 'Hostname' in data['System info']:
data['System info'].pop('Hostname')
if 'User Info' in data:
for user in data['User Info']:
if 'Groups' in user:
for group in user['Groups']:
if 'Builtin Group' in group:
group.pop('Builtin Group')
print(true_state)
with open(f'{image}_image.yaml', 'w') as outfile:
yaml.dump(true_state.data, outfile, default_flow_style=False)
finally:
cyborg.shutdown(teardown=True)
|
py | 1a4b35639ee9c442b7fcf9d0460c93bf0225f075 | # MIT License
#
# Copyright (c) 2018 Michal Czyz
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#!/usr/bin/env python3
import logging
import ply.yacc as yacc
import EvalLexer
# As defined in yacc.py
# p[0] === self.slice[n].value
# There is no type getter
# So use self.slice[n].type
parsed_digits = []
all_digits = []
rule_a = []
rule_b = []
class MadeUpClass:
def __init__(self, data):
self.data = data
def __str__(self):
return str(self.data)
def __repr__(self):
return '__str__:MadeUpClass.data = ' + str(self)
def p_error(p):
if not p:
print("SYNTAX ERROR AT EOF.")
def main(grammar_num):
if grammar_num == 0:
def p_A(t):
'''A : B'''
def p_B(t):
'''B : symbol'''
elif grammar_num == 1:
def p_A(t):
'''A : B'''
def p_B(t):
'''B : digit'''
elif grammar_num == 2:
def p_A(p):
'''A : B
| A B'''
# To append B in both alts
if len(p) == 2:
p[0] = p[1]
all_digits.append(p[1])
elif len(p) == 3:
p[0] = p[1]
all_digits.append(p[1])
p[0] = p[2]
all_digits.append(p[2])
else:
pass
def p_B(p):
'''B : digit'''
p[0] = p[1] # To propagate value
parsed_digits.append(p[0])
def p_C(p):
'''C : symbol
| empty'''
p[0] = p[1]
# This is optional arg, but it becomes always optional, which is not that great.
def p_empty(p):
'''empty : '''
pass
elif grammar_num == 3:
def p_A(p):
''' A : B
| A B'''
# To append B in both alts
if len(p) == 2:
p[0] = p[1]
rule_a.append(p[1])
elif len(p) == 3:
p[0] = p[1],p[2]
#p[0] = p[2]
#rule_a.append( (p[1],p[2]) )
def p_B(p):
''' B : id
| number'''
data = (p.slice[1].value, p.slice[1].type)
made_up_object = MadeUpClass(data)
print(made_up_object)
#p[0] = (p[1],'B rule hit',p)
p[0] = made_up_object
print('This is a very special print, look at it!' + str(p.slice[0].value))
print('This is a very special print, look at it!' + str(p.slice[1].value))
print('This is a very special print, look at it!' + str(p.slice[1].type))
rule_b.append(p[0])
else:
def p_A(t):
'''A : B'''
def p_B(t):
'''B : symbol'''
tokens = EvalLexer.tokens
eval_parser = yacc.yacc()
return eval_parser
def parse(data, debug=0):
eval_parser.error = 0
t = eval_parser.parsedebug(data, debug=debug)
if eval_parser.error:
return None
return t
|
py | 1a4b36b9afdd47c972f2a64d89b433b1bfc54636 | import hashlib
import requests
from datetime import datetime, timedelta
from .filter import McDailyFilter
class McDailyAccount:
def __init__(self):
""" User info """
self.username = '' # Username
self.password = '' # Password
self.access_token = '' # Token
self.param_string = '' # username + password
self.card_no = '' # Card no
""" System info """
self.str1 = datetime.strftime(datetime.now(), '%Y/%m/%d %H:%M:%S') # Device Time
self.str2 = '2.2.0' # App Version
self.str3 = datetime.strftime(datetime.now(), '%Y%m%d%H%M%S') # Call time
self.model_id = 'Pixel XL' # Model ID
self.os_version = '9' # Android OS Version
self.platform = 'Android' # platform
self.device_uuid = 'device_uuid' # Device Uuid
self.order_no = self.device_uuid + self.str3 # Order No
""" Request json data """
self.json = {
"access_token" : self.access_token,
"source_info" : {
"app_version" : self.str2,
"device_time" : self.str1,
"device_uuid" : self.device_uuid,
"model_id" : self.model_id,
"os_version" : self.os_version,
"platform" : self.platform,
}
}
def login(self, username, password):
self.username = username
self.password = password
self.param_string = username + password
""" Mask = md5('Mc' + order_no + platform + os_version + model_id + device_uuid + str1 + str2 + param_string + 'Donalds') """
data = 'Mc%s%s%s%s%s%s%s%sDonalds' % (
self.order_no,
self.platform,
self.os_version,
self.model_id,
self.device_uuid,
self.str1,
self.str2,
self.param_string
)
hash = hashlib.md5()
hash.update(data.encode('utf-8'))
json = {
"account" : self.username,
"password" : self.password,
"OrderNo" : self.order_no,
"mask" : hash.hexdigest(),
"source_info" : {
"app_version" : self.str2,
"device_time" : self.str1,
"device_uuid" : self.device_uuid,
"model_id" : self.model_id,
"os_version" : self.os_version,
"Platform" : self.platform,
}
}
response = requests.post('https://api.mcddaily.com.tw/login_by_mobile', json = json, headers = {'user-agent' : 'okhttp/3.10.0'})
self.set_token(response.json()['results']['member_info']['access_token'])
return response
def set_token(self, access_token):
self.access_token = access_token
self.json['access_token'] = access_token
def get_card_query(self, card_no):
self.card_no = card_no
""" Mask = md5('Mc' + order_no + access_token + card_no + callTime + 'Donalds') """
data = 'Mc%s%s%s%sDonalds' % (
self.order_no,
self.access_token,
self.card_no,
self.str3,
)
hash = hashlib.md5()
hash.update(data.encode('utf-8'))
json = {
"OrderNo" : self.order_no,
"access_token" : self.access_token,
"callTime" : self.str3,
"cardNo" : self.card_no,
"mask" : mask.hexdigest(),
}
respones = requests.post('https://api.mcddaily.com.tw/queryBonus', json = json, headers = {'user-agent' : 'okhttp/3.10.0'})
return respones
def lottery_get_item(self):
respones = requests.post('https://api1.mcddailyapp.com/lottery/get_item', json = self.json, headers = {'user-agent' : 'okhttp/3.10.0'})
return McDailyFilter(respones.json()).get_object()
def coupon_get_list(self):
respones = requests.post('https://api1.mcddailyapp.com/coupon/get_list', json = self.json, headers = {'user-agent' : 'okhttp/3.10.0'})
return McDailyFilter(respones.json()).get_object()
def sticker_get_list(self):
respones = requests.post('https://api1.mcddailyapp.com/sticker/get_list', json = self.json, headers = {'user-agent' : 'okhttp/3.10.0'})
return McDailyFilter(respones.json()).get_object()
def sticker_redeem(self):
sticker_list = self.sticker_get_list()
if len(sticker_list) < 6:
return 'Just %d stickers' % len(sticker_list)
sticker_id_list = []
for i in range(6):
sticker_id_list.append(sticker_list[i].sticker_id)
json = self.json
json['sticker_ids'] = sticker_id_list
respones = requests.post('https://api1.mcddailyapp.com/sticker/redeem', json = json, headers = {'user-agent' : 'okhttp/3.10.0'})
return McDailyFilter(respones.json()).get_object()
|
py | 1a4b37cdea06f3fc420974f2732f22edd4f03771 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest.mock import patch
from main import app
class TestStaticPages(unittest.TestCase):
def test_homepage(self):
response = app.test_client().get('/')
assert response.status_code == 200
assert b"Data Commons aggregates data" in response.data
assert not b"Sustainability Data Commons" in response.data
def test_homepage_i18n(self):
response = app.test_client().get('/?hl=es')
assert response.status_code == 200
# TODO: add i18n
assert b"Data Commons aggregates data" in response.data
def test_about(self):
response = app.test_client().get('/about')
assert response.status_code == 200
assert b"About Data Commons" in response.data
def test_faq(self):
response = app.test_client().get('/faq')
assert response.status_code == 200
assert b"Frequently Asked Questions" in response.data
def test_disclaimers(self):
response = app.test_client().get('/disclaimers')
assert response.status_code == 200
assert b"Disclaimers" in response.data
def test_datasets(self):
response = app.test_client().get('/datasets')
assert response.status_code == 200
assert b"Datasets" in response.data
def test_feedback(self):
response = app.test_client().get('/feedback')
assert response.status_code == 200
assert b"We would love to get your feedback!" in response.data
@patch('routes.static.list_blobs')
def test_special_announcement(self, mock_list_blobs):
mock_list_blobs.side_effect = (lambda bucket, max_blobs: [])
response = app.test_client().get('/special_announcement')
assert response.status_code == 200
assert b"COVID-19 Special Announcements" in response.data
def test_special_announcement_faq(self):
response = app.test_client().get('/special_announcement/faq')
assert response.status_code == 200
assert b"COVID-19 Data Feed FAQ" in response.data
|
py | 1a4b38388c8e48fcb1a305e49714dfcf10cb335d | #Calculadora
''' Dados dos numeros generar operaciones basicas'''
#INPUTS
n1=5
n2=6
suma=0
resta=0
multiplicacion=0
division=0
#PROCESS
suma=n1+n2
resta=n2-n1
multiplicacion=n1*n2
division=n1/n2
#OUTPUT
print ("El resultado de la suma es: ", suma)
print ("El resultado de la resta es: ", resta)
print ("El resultado de la multiplicacion es: ", multiplicacion)
print ("El resultado de la división es: ", division)
|
py | 1a4b39f804e69a9424316a97e3557d1ec68d65e0 | import _plotly_utils.basevalidators
class TextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="textsrc", parent_name="violin", **kwargs):
super(TextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
py | 1a4b3b17bbedf1078cb97f043e286abe7c57d9f1 | #!/usr/bin/python3
import binascii
import json
import logging
import re
import sys
from collections import defaultdict
MASK_MAGIC_REGEX = re.compile(r'[*?!@$]')
def to_unixnano(timestamp):
return int(timestamp) * (10**9)
# include/atheme/channels.h
CMODE_FLAG_TO_MODE = {
0x001: 'i', # CMODE_INVITE
0x010: 'n', # CMODE_NOEXT
0x080: 's', # CMODE_SEC
0x100: 't', # CMODE_TOPIC
}
# attempt to interpret certfp as a hex-encoded SHA-256 fingerprint
def validate_certfp(certfp):
try:
dec = binascii.unhexlify(certfp)
except:
return False
return len(dec) == 32
def convert(infile):
out = {
'version': 1,
'source': 'atheme',
'users': defaultdict(dict),
'channels': defaultdict(dict),
}
group_to_founders = defaultdict(list)
channel_to_founder = defaultdict(lambda: (None, None))
while True:
line = infile.readline()
if not line:
break
line = line.rstrip(b'\r\n')
try:
line = line.decode('utf-8')
except UnicodeDecodeError:
line = line.decode('utf-8', 'replace')
logging.warning("line contained invalid utf8 data " + line)
parts = line.split(' ')
category = parts[0]
if category == 'GACL':
# Note: all group definitions precede channel access entries (token CA) by design, so it
# should be safe to read this in using one pass.
groupname = parts[1]
user = parts[2]
flags = parts[3]
if 'F' in flags:
group_to_founders[groupname].append(user)
elif category == 'MU':
# user account
# MU AAAAAAAAB shivaram $1$hcspif$nCm4r3S14Me9ifsOPGuJT. [email protected] 1600134392 1600467343 +sC default
name = parts[2]
user = {'name': name, 'hash': parts[3], 'email': parts[4], 'registeredAt': to_unixnano(parts[5])}
out['users'][name].update(user)
pass
elif category == 'MN':
# grouped nick
# MN shivaram slingamn 1600218831 1600467343
username, groupednick = parts[1], parts[2]
if username != groupednick:
user = out['users'][username]
user.setdefault('additionalnicks', []).append(groupednick)
elif category == 'MDU':
if parts[2] == 'private:usercloak':
username = parts[1]
out['users'][username]['vhost'] = parts[3]
elif category == 'MCFP':
username, certfp = parts[1], parts[2]
if validate_certfp(certfp):
user = out['users'][username]
user.setdefault('certfps', []).append(certfp.lower())
elif category == 'MC':
# channel registration
# MC #mychannel 1600134478 1600467343 +v 272 0 0
# MC #NEWCHANNELTEST 1602270889 1602270974 +vg 1 0 0 jaeger4
chname = parts[1]
chdata = out['channels'][chname]
# XXX just give everyone +nt, regardless of lock status; they can fix it later
chdata.update({'name': chname, 'registeredAt': to_unixnano(parts[2])})
if parts[8] != '':
chdata['key'] = parts[8]
modes = {'n', 't'}
mlock_on, mlock_off = int(parts[5]), int(parts[6])
for flag, mode in CMODE_FLAG_TO_MODE.items():
if flag & mlock_on != 0:
modes.add(mode)
elif flag & mlock_off != 0 and mode in modes:
modes.remove(mode)
chdata['modes'] = ''.join(sorted(modes))
chdata['limit'] = int(parts[7])
elif category == 'MDC':
# auxiliary data for a channel registration
# MDC #mychannel private:topic:setter s
# MDC #mychannel private:topic:text hi again
# MDC #mychannel private:topic:ts 1600135864
chname = parts[1]
category = parts[2]
if category == 'private:topic:text':
out['channels'][chname]['topic'] = line.split(maxsplit=3)[3]
elif category == 'private:topic:setter':
out['channels'][chname]['topicSetBy'] = parts[3]
elif category == 'private:topic:ts':
out['channels'][chname]['topicSetAt'] = to_unixnano(parts[3])
elif category == 'private:mlockext':
# the channel forward mode is +L on insp/unreal, +f on charybdis
# charybdis has a +L ("large banlist") taking no argument
# and unreal has a +f ("flood limit") taking two colon-delimited numbers,
# so check for an argument that starts with a #
if parts[3].startswith('L#') or parts[3].startswith('f#'):
out['channels'][chname]['forward'] = parts[3][1:]
elif category == 'CA':
# channel access lists
# CA #mychannel shivaram +AFORafhioqrstv 1600134478 shivaram
chname, username, flags, set_at = parts[1], parts[2], parts[3], int(parts[4])
chname = parts[1]
chdata = out['channels'][chname]
flags = parts[3]
set_at = int(parts[4])
if 'amode' not in chdata:
chdata['amode'] = {}
# see libathemecore/flags.c: +o is op, +O is autoop, etc.
if 'F' in flags:
# If the username starts with "!", it's actually a GroupServ group.
if username.startswith('!'):
group_founders = group_to_founders.get(username)
if not group_founders:
# skip this and warn about it later
continue
# attempt to promote the first group founder to channel founder
username = group_founders[0]
# but everyone gets the +q flag
for founder in group_founders:
chdata['amode'][founder] = 'q'
# there can only be one founder
preexisting_founder, preexisting_set_at = channel_to_founder[chname]
if preexisting_founder is None or set_at < preexisting_set_at:
chdata['founder'] = username
channel_to_founder[chname] = (username, set_at)
# but multiple people can receive the 'q' amode
chdata['amode'][username] = 'q'
continue
if MASK_MAGIC_REGEX.search(username):
# ignore groups, masks, etc. for any field other than founder
continue
# record the first appearing successor, if necessary
if 'S' in flags:
if not chdata.get('successor'):
chdata['successor'] = username
# finally, handle amodes
if 'q' in flags:
chdata['amode'][username] = 'q'
elif 'a' in flags:
chdata['amode'][username] = 'a'
elif 'o' in flags or 'O' in flags:
chdata['amode'][username] = 'o'
elif 'h' in flags or 'H' in flags:
chdata['amode'][username] = 'h'
elif 'v' in flags or 'V' in flags:
chdata['amode'][username] = 'v'
else:
pass
# do some basic integrity checks
def validate_user(name):
if not name:
return False
return bool(out['users'].get(name))
invalid_channels = []
for chname, chdata in out['channels'].items():
if not validate_user(chdata.get('founder')):
if validate_user(chdata.get('successor')):
chdata['founder'] = chdata['successor']
else:
invalid_channels.append(chname)
for chname in invalid_channels:
logging.warning("Unable to find a valid founder for channel %s, discarding it", chname)
del out['channels'][chname]
return out
def main():
if len(sys.argv) != 3:
raise Exception("Usage: atheme2json.py atheme_db output.json")
with open(sys.argv[1], 'rb') as infile:
output = convert(infile)
with open(sys.argv[2], 'w') as outfile:
json.dump(output, outfile)
if __name__ == '__main__':
logging.basicConfig()
sys.exit(main())
|
py | 1a4b3d0da1aa8c3f545f11ad9bb252e2202541c4 | #
# Python Macro Language for Dragon NaturallySpeaking
# (c) Copyright 1999 by Joel Gould
# Portions (c) Copyright 1999 by Dragon Systems, Inc.
#
# _mouse.py
# Sample macro file which implements mouse and keyboard movement modes
# similar to DragonDictate for Windows
#
# April 1, 2000
# Updates from Jonathan Epstein
# - cancel arrow movement when the active window changes
# - add support for tray icon during arrow movement
#
# In the grammar we map some keywords into pixel counts according to the
# following dictionary. These numbers can be safely changed within reason.
amountDict = {
'little':3, # as in 'move a little left'
'lot':10 } # as in 'move left a lot'
# For caret movement, this represents the default speed in milliseconds
# between arrow keys
defaultMoveSpeed = 250
# For caret movement, this is the rate change applied when you make it
# faster. For example, 1.5 is a 50% speed increase.
moveRateChange = 2.0
# For mouse movement, this represents the default speed in milliseconds
# between pixel movements and the default number of pixels per move. We
# do not want the update rate to be less than 50 milliseconds so if it
# gets faster than that, we adjust the mouse pixels instead.
defaultMouseSpeed = 100
defaultMousePixels = 1
# For mouse movement, this is the rate change applied when you make it
# faster. For example, 1.5 is a 50% speed increase.
mouseRateChange = 3.0
############################################################################
#
# Here are some of our instance variables
#
# self.haveCallback set when the timer callback in installed
# self.curMode 1 for caret movement, 2 for mouse movement, or None
# self.curSpeed current movement speed (milliseconds for timer)
# self.curPixels for mouse movement, pixels per move
# self.lastClock time of last timer callback or 0
# self.curDirection direction of movement as string
#
import string # for atoi
import time # for clock
import natlink
from natlinkutils import *
class ThisGrammar(GrammarBase):
# when we unload the grammar, we must make sure we clear the timer
# callback so we keep a variable which is set when we currently own
# the timer callback
def __init__(self):
self.haveCallback = 0
self.curMode = None
self.iconState = 0
GrammarBase.__init__(self)
def unload(self):
if self.haveCallback:
natlink.setTimerCallback(None,0)
self.haveCallback = 0
GrammarBase.unload(self)
# This is our grammar. The rule 'start' is what is normally active. The
# rules 'nowMoving' and 'nowMousing' are used when we are in caret or
# mouse movement mode.
gramDefn = """
# this is the rule which is normally active
<start> exported = <startMoving> | <startMousing> |
<nudgeMouse> | <mouseButton>;
# this rule is active when we are moving the caret
<nowMoving> exported =
[ move ] ( {direction} | [much] faster | [much] slower ) |
stop [ moving ];
# this rule is active when we are moving the mouse
<nowMousing> exported =
[ move ] ( {direction} | faster | slower ) |
stop [ moving ] | <mouseButton> | <mouseButton>;
# here are the subrules which deal with caret movement
<startMoving> = move {direction} | start moving {direction};
# here are the subrules which deal with mouse movement
<startMousing> = [ start moving ] mouse {direction};
<nudgeMouse> =
nudge mouse {direction} |
[ move ] mouse {direction} ( a little | a lot | {count} pixels ) |
[ move ] mouse ( a little | a lot | {count} pixels ) {direction};
<mouseButton> =
[ mouse ] [ left | middle | right ] [ single | double ] click;
"""
# These are the lists which we use in our grammar. The directions and
# counts are implemented as lists to make parsing easier (words from
# lists are referenced as part of the rule which includes the list).
listDefn = {
'direction' : ['up','down','left','right'],
'count' : ['1','2','3','4','5','6','7','8','9','10','11','12','13',
'14','15','16','17','18','19','20','25','30','35','40','45','50'] }
# Load the grammar, build the direction and count lists and activate the
# main rule ('start')
def initialize(self):
self.load(self.gramDefn)
for listName in self.listDefn.keys():
self.setList(listName,self.listDefn[listName])
self.activateSet(['start'],exclusive=0)
# This subroutine moves the mouse cursor in an indicated direction
# by an indicated number of pixels
def moveMouse(self,direction,count):
xPos,yPos = natlink.getCursorPos()
if direction == 'up': yPos = yPos - count
elif direction == 'down': yPos = yPos + count
elif direction == 'left': xPos = xPos - count
elif direction == 'right': xPos = xPos + count
xSize,ySize = natlink.getScreenSize()
if xPos < 0: xPos = 0
if xPos >= xSize: xPos = xSize - 1
if yPos < 0: yPos = 0
if yPos >= ySize: yPos = ySize - 1
natlink.playEvents([(wm_mousemove,xPos,yPos)])
# This subroutine cancels any active movement mode
def cancelMode(self):
self.curMode = None
if self.haveCallback:
natlink.setTimerCallback(None,0)
self.haveCallback = 0
self.activateSet(['start'],exclusive=0)
natlink.setTrayIcon()
# This function is called on a timer event. If we are in a movement
# mode then we move the mouse or caret by the indicated amount.
#
# The apparent speed for mouse movement is the speed divided by the
# number of pixels per move. We calculate the number of pixels per
# move to ensure that the speed is never faster than 50 milliseconds.
def onTimer(self):
if self.lastClock:
diff = int( (time.clock() - self.lastClock) * 1000 )
self.lastClock = time.clock()
if self.curMode == 1:
moduleInfo = natlink.getCurrentModule()
if natlink.getMicState() == 'on' and moduleInfo == self.moduleInfo:
self.setTrayIcon(1)
# Note: it is often during a playString operation that the
# "stop moving" command occurs
natlink.playString('{'+self.curDirection+'}')
else:
self.cancelMode()
elif self.curMode == 2:
self.moveMouse(self.curDirection,self.curPixels)
# This handles the nudgeMouse rule. We want to extract the direction
# and the count or amount.
def gotResults_nudgeMouse(self,words,fullResults):
self.cancelMode()
direction = findKeyWord(words,self.listDefn['direction'])
count = findKeyWord(words,self.listDefn['count'])
amount = findKeyWord(words,amountDict.keys())
if count:
count = string.atoi(count)
elif amount:
count = amountDict[amount]
self.moveMouse(direction,count)
# This handles the mouseButton rule. We want to extract the button
# name (if specified) and whether this is a single or double click.
def gotResults_mouseButton(self,words,fullResults):
self.cancelMode()
which = findKeyWord(words,['left','right','middle'])
if not which: which = 'left'
if 'double' in words: count = 2
else: count = 1
buttonClick(which,count)
# This handles the startMoving rule. We only need to extract the
# direction. To turn on cursor movement mode we need to install a
# timer callback (warning: this is global) and set the recognition
# state to be exclusively from the rule <nowMoving>. The cursor only
# moves in the timer callback itself.
def gotResults_startMoving(self,words,fullResults):
self.cancelMode()
direction = findKeyWord(words,self.listDefn['direction'])
self.curMode = 1
self.curDirection = direction
self.setTrayIcon(0)
self.moduleInfo = natlink.getCurrentModule()
self.curSpeed = defaultMoveSpeed
self.lastClock = time.clock()
natlink.setTimerCallback(self.onTimer,defaultMoveSpeed)
self.haveCallback = 1
self.activateSet(['nowMoving'],exclusive=1)
# This handles the nowMoving rule. We want to extract the keyword which
# tells us what to do.
def gotResults_nowMoving(self,words,fullResults):
direction = findKeyWord(words,self.listDefn['direction'])
if direction:
self.curDirection = direction
self.setTrayIcon(0)
elif 'stop' in words:
self.cancelMode()
elif 'faster' in words:
speed = int(self.curSpeed / moveRateChange)
if 'much' in words:
speed = int(speed / (moveRateChange*moveRateChange))
if speed < 50: speed = 50
self.curSpeed = speed
natlink.setTimerCallback(self.onTimer,speed)
elif 'slower' in words:
speed = int(self.curSpeed * moveRateChange)
if 'much' in words:
speed = int(speed * (moveRateChange*moveRateChange))
if speed > 4000: speed = 4000
self.curSpeed = speed
natlink.setTimerCallback(self.onTimer,speed)
# This handles the startMousing rule. We only need to extract the
# direction. To turn on cursor movement mode we need to install a
# timer callback (warning: this is global) and set the recognition
# state to be exclusively from the rule <nowMoving>. The cursor only
# moves in the timer callback itself.
def gotResults_startMousing(self,words,fullResults):
self.cancelMode()
direction = findKeyWord(words,self.listDefn['direction'])
self.curMode = 2
self.curDirection = direction
self.curSpeed = defaultMouseSpeed
self.curPixels = defaultMousePixels
self.lastClock = time.clock()
natlink.setTimerCallback(self.onTimer,defaultMouseSpeed)
self.haveCallback = 1
self.activateSet(['nowMousing'],exclusive=1)
# This handles the nowMousing rule. We want to extract the keyword which
# tells us what to do.
def gotResults_nowMousing(self,words,fullResults):
direction = findKeyWord(words,self.listDefn['direction'])
if direction:
self.curDirection = direction
elif 'stop' in words:
self.cancelMode()
elif 'faster' in words:
speed = int(self.curSpeed / moveRateChange)
pixels = self.curPixels
while speed < 50:
speed = speed * 2
pixels = pixels * 2
if pixels > 10: pixels = 10
self.curSpeed = speed
self.curPixels = pixels
natlink.setTimerCallback(self.onTimer,speed)
elif 'slower' in words:
speed = int(self.curSpeed * moveRateChange)
pixels = self.curPixels
while pixels > defaultMousePixels and speed >= 2*50:
speed = speed / 2
pixels = pixels / 2
if speed > 2000: speed = 2000
self.curSpeed = speed
self.curPixels = pixels
natlink.setTimerCallback(self.onTimer,speed)
# This turns on the tray icon depending on the movement direction.
# self.iconState is used to toggle the image to animate the icon.
def setTrayIcon(self,toggleIcon):
iconName = self.curDirection
toolTip = 'moving '+self.curDirection
if not toggleIcon or self.iconState:
self.iconState = 0
else:
self.iconState = 1
iconName = iconName + '2'
natlink.setTrayIcon(iconName,toolTip,self.onTrayIcon)
# This is called if the user clicks on the tray icon. We simply cancel
# movement in all cases.
def onTrayIcon(self,message):
self.cancelMode()
# This is a simple utility subroutine. It takes two lists of words and
# returns the first word it finds which is in both lists. We use this to
# extract special words (like the direction) from recognition results.
def findKeyWord(list1,list2):
for word in list1:
if word in list2:
return word
return None
#
# Here is the initialization and termination code. See wordpad.py for more
# comments.
#
thisGrammar = ThisGrammar()
thisGrammar.initialize()
def unload():
global thisGrammar
if thisGrammar: thisGrammar.unload()
thisGrammar = None
|
py | 1a4b3d1b5312bd3abcb9a1d83c4484c2b041038d | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' log.py '''
import logging
import logging.handlers
# Create the logger
Log = logging.getLogger('heron-state')
def configure(level, logfile=None):
""" configure logging """
log_format = "%(asctime)s-%(levelname)s: %(message)s"
date_format = '%a, %d %b %Y %H:%M:%S'
logging.basicConfig(format=log_format, datefmt=date_format)
Log.setLevel(level)
if logfile is not None:
fh = logging.FileHandler(logfile)
fh.setFormatter(logging.Formatter(log_format))
Log.addHandler(fh)
|
py | 1a4b3d267e114415fe57216a462d897407c429a5 | from typing import Optional
from pydantic import BaseModel
# Shared properties
class UserBase(BaseModel):
username: Optional[str] = None
# Properties to receive via API on creation
class UserCreate(UserBase):
username: str
password: str
# Properties to receive via API on update
class UserUpdate(UserBase):
password: Optional[str] = None
class UserInDBBase(UserBase):
id: Optional[int] = None
class Config:
orm_mode = True
# Additional properties to return via API
class User(UserInDBBase):
pass
# Additional properties stored in DB
class UserInDB(UserInDBBase):
hashed_password: str
|
py | 1a4b3d955f5c04fe3e8e5455b052102e456b4574 | from django.db import models
from django.utils import timezone
# Create your models here.
class Feedback(models.Model):
data = models.DateTimeField(blank = True)
result = models.CharField(max_length = 3, null=True)
def store(self):
self.data = timezone.now()
self.save()
class Document(models.Model):
upload = models.FileField() |
py | 1a4b3da83dc6b2ffbdfc2ac063781ac406e95ddd | # Copyright (c) 2016-2017, Neil Booth
# Copyright (c) 2017, the ElectrumX authors
#
# All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''Module providing coin abstraction.
Anything coin-specific should go in this file and be subclassed where
necessary for appropriate handling.
'''
from collections import namedtuple
import re
import struct
from decimal import Decimal
from hashlib import sha256
from functools import partial
import electrumx.lib.util as util
from electrumx.lib.hash import Base58, hash160, double_sha256, hash_to_hex_str
from electrumx.lib.hash import HASHX_LEN, hex_str_to_hash
from electrumx.lib.script import (_match_ops, Script, ScriptError,
ScriptPubKey, OpCodes)
import electrumx.lib.tx as lib_tx
import electrumx.lib.tx_dash as lib_tx_dash
import electrumx.lib.tx_axe as lib_tx_axe
import electrumx.server.block_processor as block_proc
import electrumx.server.daemon as daemon
from electrumx.server.session import (ElectrumX, DashElectrumX,
SmartCashElectrumX, AuxPoWElectrumX)
Block = namedtuple("Block", "raw header transactions")
class CoinError(Exception):
'''Exception raised for coin-related errors.'''
class Coin(object):
'''Base class of coin hierarchy.'''
REORG_LIMIT = 200
# Not sure if these are coin-specific
RPC_URL_REGEX = re.compile('.+@(\\[[0-9a-fA-F:]+\\]|[^:]+)(:[0-9]+)?')
VALUE_PER_COIN = 100000000
CHUNK_SIZE = 2016
BASIC_HEADER_SIZE = 80
STATIC_BLOCK_HEADERS = True
SESSIONCLS = ElectrumX
DEFAULT_MAX_SEND = 1000000
DESERIALIZER = lib_tx.Deserializer
DAEMON = daemon.Daemon
BLOCK_PROCESSOR = block_proc.BlockProcessor
HEADER_VALUES = ('version', 'prev_block_hash', 'merkle_root', 'timestamp',
'bits', 'nonce')
HEADER_UNPACK = struct.Struct('< I 32s 32s I I I').unpack_from
MEMPOOL_HISTOGRAM_REFRESH_SECS = 500
P2PKH_VERBYTE = bytes.fromhex("00")
P2SH_VERBYTES = [bytes.fromhex("05")]
XPUB_VERBYTES = bytes('????', 'utf-8')
XPRV_VERBYTES = bytes('????', 'utf-8')
WIF_BYTE = bytes.fromhex("80")
ENCODE_CHECK = Base58.encode_check
DECODE_CHECK = Base58.decode_check
GENESIS_HASH = ('000000000019d6689c085ae165831e93'
'4ff763ae46a2a6c172b3f1b60a8ce26f')
GENESIS_ACTIVATION = 100_000_000
# Peer discovery
PEER_DEFAULT_PORTS = {'t': '50001', 's': '50002'}
PEERS = []
CRASH_CLIENT_VER = None
BLACKLIST_URL = None
@classmethod
def lookup_coin_class(cls, name, net):
'''Return a coin class given name and network.
Raise an exception if unrecognised.'''
req_attrs = ['TX_COUNT', 'TX_COUNT_HEIGHT', 'TX_PER_BLOCK']
for coin in util.subclasses(Coin):
if (coin.NAME.lower() == name.lower() and
coin.NET.lower() == net.lower()):
coin_req_attrs = req_attrs.copy()
missing = [attr for attr in coin_req_attrs
if not hasattr(coin, attr)]
if missing:
raise CoinError('coin {} missing {} attributes'
.format(name, missing))
return coin
raise CoinError('unknown coin {} and network {} combination'
.format(name, net))
@classmethod
def sanitize_url(cls, url):
# Remove surrounding ws and trailing /s
url = url.strip().rstrip('/')
match = cls.RPC_URL_REGEX.match(url)
if not match:
raise CoinError('invalid daemon URL: "{}"'.format(url))
if match.groups()[1] is None:
url += ':{:d}'.format(cls.RPC_PORT)
if not url.startswith('http://') and not url.startswith('https://'):
url = 'http://' + url
return url + '/'
@classmethod
def max_fetch_blocks(cls, height):
if height < 130000:
return 1000
return 100
@classmethod
def genesis_block(cls, block):
'''Check the Genesis block is the right one for this coin.
Return the block less its unspendable coinbase.
'''
header = cls.block_header(block, 0)
header_hex_hash = hash_to_hex_str(cls.header_hash(header))
if header_hex_hash != cls.GENESIS_HASH:
raise CoinError('genesis block has hash {} expected {}'
.format(header_hex_hash, cls.GENESIS_HASH))
return header + bytes(1)
@classmethod
def hashX_from_script(cls, script):
'''Returns a hashX from a script.'''
return sha256(script).digest()[:HASHX_LEN]
@staticmethod
def lookup_xverbytes(verbytes):
'''Return a (is_xpub, coin_class) pair given xpub/xprv verbytes.'''
# Order means BTC testnet will override NMC testnet
for coin in util.subclasses(Coin):
if verbytes == coin.XPUB_VERBYTES:
return True, coin
if verbytes == coin.XPRV_VERBYTES:
return False, coin
raise CoinError('version bytes unrecognised')
@classmethod
def address_to_hashX(cls, address):
'''Return a hashX given a coin address.'''
return cls.hashX_from_script(cls.pay_to_address_script(address))
@classmethod
def P2PKH_address_from_hash160(cls, hash160):
'''Return a P2PKH address given a public key.'''
assert len(hash160) == 20
return cls.ENCODE_CHECK(cls.P2PKH_VERBYTE + hash160)
@classmethod
def P2PKH_address_from_pubkey(cls, pubkey):
'''Return a coin address given a public key.'''
return cls.P2PKH_address_from_hash160(hash160(pubkey))
@classmethod
def P2SH_address_from_hash160(cls, hash160):
'''Return a coin address given a hash160.'''
assert len(hash160) == 20
return cls.ENCODE_CHECK(cls.P2SH_VERBYTES[0] + hash160)
@classmethod
def hash160_to_P2PKH_script(cls, hash160):
return ScriptPubKey.P2PKH_script(hash160)
@classmethod
def hash160_to_P2PKH_hashX(cls, hash160):
return cls.hashX_from_script(cls.hash160_to_P2PKH_script(hash160))
@classmethod
def pay_to_address_script(cls, address):
'''Return a pubkey script that pays to a pubkey hash.
Pass the address (either P2PKH or P2SH) in base58 form.
'''
raw = cls.DECODE_CHECK(address)
# Require version byte(s) plus hash160.
verbyte = -1
verlen = len(raw) - 20
if verlen > 0:
verbyte, hash160 = raw[:verlen], raw[verlen:]
if verbyte == cls.P2PKH_VERBYTE:
return cls.hash160_to_P2PKH_script(hash160)
if verbyte in cls.P2SH_VERBYTES:
return ScriptPubKey.P2SH_script(hash160)
raise CoinError('invalid address: {}'.format(address))
@classmethod
def privkey_WIF(cls, privkey_bytes, compressed):
'''Return the private key encoded in Wallet Import Format.'''
payload = bytearray(cls.WIF_BYTE) + privkey_bytes
if compressed:
payload.append(0x01)
return cls.ENCODE_CHECK(payload)
@classmethod
def header_hash(cls, header):
'''Given a header return hash'''
return double_sha256(header)
@classmethod
def header_prevhash(cls, header):
'''Given a header return previous hash'''
return header[4:36]
@classmethod
def static_header_offset(cls, height):
'''Given a header height return its offset in the headers file.
If header sizes change at some point, this is the only code
that needs updating.'''
assert cls.STATIC_BLOCK_HEADERS
return height * cls.BASIC_HEADER_SIZE
@classmethod
def static_header_len(cls, height):
'''Given a header height return its length.'''
return (cls.static_header_offset(height + 1)
- cls.static_header_offset(height))
@classmethod
def block_header(cls, block, height):
'''Returns the block header given a block and its height.'''
return block[:cls.static_header_len(height)]
@classmethod
def block(cls, raw_block, height):
'''Return a Block namedtuple given a raw block and its height.'''
header = cls.block_header(raw_block, height)
txs = cls.DESERIALIZER(raw_block, start=len(header)).read_tx_block()
return Block(raw_block, header, txs)
@classmethod
def decimal_value(cls, value):
'''Return the number of standard coin units as a Decimal given a
quantity of smallest units.
For example 1 BTC is returned for 100 million satoshis.
'''
return Decimal(value) / cls.VALUE_PER_COIN
@classmethod
def warn_old_client_on_tx_broadcast(cls, _client_ver):
return False
class AuxPowMixin(object):
STATIC_BLOCK_HEADERS = False
DESERIALIZER = lib_tx.DeserializerAuxPow
SESSIONCLS = AuxPoWElectrumX
TRUNCATED_HEADER_SIZE = 80
# AuxPoW headers are significantly larger, so the DEFAULT_MAX_SEND from
# Bitcoin is insufficient. In Namecoin mainnet, 5 MB wasn't enough to
# sync, while 10 MB worked fine.
DEFAULT_MAX_SEND = 10000000
@classmethod
def header_hash(cls, header):
'''Given a header return hash'''
return double_sha256(header[:cls.BASIC_HEADER_SIZE])
@classmethod
def block_header(cls, block, height):
'''Return the AuxPow block header bytes'''
deserializer = cls.DESERIALIZER(block)
return deserializer.read_header(cls.BASIC_HEADER_SIZE)
class EquihashMixin(object):
STATIC_BLOCK_HEADERS = False
BASIC_HEADER_SIZE = 140 # Excluding Equihash solution
DESERIALIZER = lib_tx.DeserializerEquihash
HEADER_VALUES = ('version', 'prev_block_hash', 'merkle_root', 'reserved',
'timestamp', 'bits', 'nonce')
HEADER_UNPACK = struct.Struct('< I 32s 32s 32s I I 32s').unpack_from
@classmethod
def block_header(cls, block, height):
'''Return the block header bytes'''
deserializer = cls.DESERIALIZER(block)
return deserializer.read_header(cls.BASIC_HEADER_SIZE)
class ScryptMixin(object):
DESERIALIZER = lib_tx.DeserializerTxTime
HEADER_HASH = None
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
if cls.HEADER_HASH is None:
import scrypt
cls.HEADER_HASH = lambda x: scrypt.hash(x, x, 1024, 1, 1, 32)
version, = util.unpack_le_uint32_from(header)
if version > 6:
return super().header_hash(header)
else:
return cls.HEADER_HASH(header)
class KomodoMixin(object):
P2PKH_VERBYTE = bytes.fromhex("3C")
P2SH_VERBYTES = [bytes.fromhex("55")]
WIF_BYTE = bytes.fromhex("BC")
GENESIS_HASH = ('027e3758c3a65b12aa1046462b486d0a'
'63bfa1beae327897f56c5cfb7daaae71')
DESERIALIZER = lib_tx.DeserializerZcash
class BitcoinMixin(object):
SHORTNAME = "BTC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
RPC_PORT = 8332
class NameMixin(object):
DATA_PUSH_MULTIPLE = -2
@classmethod
def interpret_name_prefix(cls, script, possible_ops):
"""Interprets a potential name prefix
Checks if the given script has a name prefix. If it has, the
name prefix is split off the actual address script, and its parsed
fields (e.g. the name) returned.
possible_ops must be an array of arrays, defining the structures
of name prefixes to look out for. Each array can consist of
actual opcodes, -1 for ignored data placeholders, -2 for
multiple ignored data placeholders and strings for named placeholders.
Whenever a data push matches a named placeholder,
the corresponding value is put into a dictionary the placeholder name
as key, and the dictionary of matches is returned."""
try:
ops = Script.get_ops(script)
except ScriptError:
return None, script
name_op_count = None
for pops in possible_ops:
# Start by translating named placeholders to -1 values, and
# keeping track of which op they corresponded to.
template = []
named_index = {}
n = len(pops)
offset = 0
for i, op in enumerate(pops):
if op == cls.DATA_PUSH_MULTIPLE:
# Emercoin stores value in multiple placeholders
# Script structure: https://git.io/fjuRu
added, template = cls._add_data_placeholders_to_template(ops[i:], template)
offset += added - 1 # subtract the "DATA_PUSH_MULTIPLE" opcode
elif type(op) == str:
template.append(-1)
named_index[op] = i + offset
else:
template.append(op)
n += offset
if not _match_ops(ops[:n], template):
continue
name_op_count = n
named_values = {key: ops[named_index[key]] for key in named_index}
break
if name_op_count is None:
return None, script
name_end_pos = cls.find_end_position_of_name(script, name_op_count)
address_script = script[name_end_pos:]
return named_values, address_script
@classmethod
def _add_data_placeholders_to_template(cls, opcodes, template):
num_dp = cls._read_data_placeholders_count(opcodes)
num_2drop = num_dp // 2
num_drop = num_dp % 2
two_drops = [OpCodes.OP_2DROP for _ in range(num_2drop)]
one_drops = [OpCodes.OP_DROP for _ in range(num_drop)]
elements_added = num_dp + num_2drop + num_drop
placeholders = [-1 for _ in range(num_dp)]
drops = two_drops + one_drops
return elements_added, template + placeholders + drops
@classmethod
def _read_data_placeholders_count(cls, opcodes):
data_placeholders = 0
for opcode in opcodes:
if type(opcode) == tuple:
data_placeholders += 1
else:
break
return data_placeholders
@staticmethod
def find_end_position_of_name(script, length):
"""Finds the end position of the name data
Given the number of opcodes in the name prefix (length), returns the
index into the byte array of where the name prefix ends."""
n = 0
for _i in range(length):
# Content of this loop is copied from Script.get_ops's loop
op = script[n]
n += 1
if op <= OpCodes.OP_PUSHDATA4:
# Raw bytes follow
if op < OpCodes.OP_PUSHDATA1:
dlen = op
elif op == OpCodes.OP_PUSHDATA1:
dlen = script[n]
n += 1
elif op == OpCodes.OP_PUSHDATA2:
dlen, = struct.unpack('<H', script[n: n + 2])
n += 2
else:
dlen, = struct.unpack('<I', script[n: n + 4])
n += 4
if n + dlen > len(script):
raise IndexError
n += dlen
return n
class NameIndexMixin(NameMixin):
"""Shared definitions for coins that have a name index
This class defines common functions and logic for coins that have
a name index in addition to the index by address / script."""
BLOCK_PROCESSOR = block_proc.NameIndexBlockProcessor
@classmethod
def build_name_index_script(cls, name):
"""Returns the script by which names are indexed"""
from electrumx.lib.script import Script
res = bytearray()
res.append(cls.OP_NAME_UPDATE)
res.extend(Script.push_data(name))
res.extend(Script.push_data(bytes([])))
res.append(OpCodes.OP_2DROP)
res.append(OpCodes.OP_DROP)
res.append(OpCodes.OP_RETURN)
return bytes(res)
@classmethod
def split_name_script(cls, script):
named_values, address_script = cls.interpret_name_prefix(script, cls.NAME_OPERATIONS)
if named_values is None or "name" not in named_values:
return None, address_script
name_index_script = cls.build_name_index_script(named_values["name"][1])
return name_index_script, address_script
@classmethod
def hashX_from_script(cls, script):
_, address_script = cls.split_name_script(script)
return super().hashX_from_script(address_script)
@classmethod
def address_from_script(cls, script):
_, address_script = cls.split_name_script(script)
return super().address_from_script(address_script)
@classmethod
def name_hashX_from_script(cls, script):
name_index_script, _ = cls.split_name_script(script)
if name_index_script is None:
return None
return super().hashX_from_script(name_index_script)
class PrimeChainPowMixin(object):
STATIC_BLOCK_HEADERS = False
DESERIALIZER = lib_tx.DeserializerPrimecoin
@classmethod
def block_header(cls, block, height):
'''Return the block header bytes'''
deserializer = cls.DESERIALIZER(block)
return deserializer.read_header(cls.BASIC_HEADER_SIZE)
class HOdlcoin(Coin):
NAME = "HOdlcoin"
SHORTNAME = "HODLC"
NET = "mainnet"
BASIC_HEADER_SIZE = 88
P2PKH_VERBYTE = bytes.fromhex("28")
WIF_BYTE = bytes.fromhex("a8")
GENESIS_HASH = ('008872e5582924544e5c707ee4b839bb'
'82c28a9e94e917c94b40538d5658c04b')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 258858
TX_COUNT_HEIGHT = 382138
TX_PER_BLOCK = 5
class BitcoinSV(BitcoinMixin, Coin):
NAME = "BitcoinSV"
SHORTNAME = "BSV"
TX_COUNT = 267318795
TX_COUNT_HEIGHT = 557037
TX_PER_BLOCK = 400
PEERS = [
'electrumx.bitcoinsv.io s',
'satoshi.vision.cash s',
'sv.usebsv.com s t',
'sv.jochen-hoenicke.de s t',
'sv.satoshi.io s t',
]
GENESIS_ACTIVATION = 620_538
class BitcoinCash(BitcoinMixin, Coin):
NAME = "BitcoinCashABC" # Some releases later remove the ABC suffix
SHORTNAME = "BCH"
TX_COUNT = 265479628
TX_COUNT_HEIGHT = 556592
TX_PER_BLOCK = 400
PEERS = [
'bch.imaginary.cash s t',
'electroncash.dk s t',
'wallet.satoshiscoffeehouse.com s t',
]
BLOCK_PROCESSOR = block_proc.LTORBlockProcessor
@classmethod
def warn_old_client_on_tx_broadcast(cls, client_ver):
if client_ver < (3, 3, 4):
return ('<br/><br/>'
'Your transaction was successfully broadcast.<br/><br/>'
'However, you are using a VULNERABLE version of Electron Cash.<br/>'
'Download the latest version from this web site ONLY:<br/>'
'https://electroncash.org/'
'<br/><br/>')
return False
class BitcoinSegwit(BitcoinMixin, Coin):
NAME = "BitcoinSegwit"
DESERIALIZER = lib_tx.DeserializerSegWit
MEMPOOL_HISTOGRAM_REFRESH_SECS = 120
TX_COUNT = 318337769
TX_COUNT_HEIGHT = 524213
TX_PER_BLOCK = 1400
CRASH_CLIENT_VER = (3, 2, 3)
BLACKLIST_URL = 'https://electrum.org/blacklist.json'
PEERS = [
'E-X.not.fyi s t',
'electrum.vom-stausee.de s t',
'electrum.hsmiths.com s t',
'helicarrier.bauerj.eu s t',
'hsmiths4fyqlw5xw.onion s t',
'ozahtqwp25chjdjd.onion s t',
'electrum.hodlister.co s',
'electrum3.hodlister.co s',
'btc.usebsv.com s50006',
'fortress.qtornado.com s443 t',
'ecdsa.net s110 t',
'e2.keff.org s t',
'currentlane.lovebitco.in s t',
'electrum.jochen-hoenicke.de s50005 t50003',
'vps5.hsmiths.com s',
]
@classmethod
def warn_old_client_on_tx_broadcast(cls, client_ver):
if client_ver < (3, 3, 3):
return ('<br/><br/>'
'Your transaction was successfully broadcast.<br/><br/>'
'However, you are using a VULNERABLE version of Electrum.<br/>'
'Download the new version from the usual place:<br/>'
'https://electrum.org/'
'<br/><br/>')
return False
class BitcoinGold(EquihashMixin, BitcoinMixin, Coin):
CHUNK_SIZE = 252
NAME = "BitcoinGold"
SHORTNAME = "BTG"
FORK_HEIGHT = 491407
P2PKH_VERBYTE = bytes.fromhex("26")
P2SH_VERBYTES = [bytes.fromhex("17")]
DESERIALIZER = lib_tx.DeserializerEquihashSegWit
TX_COUNT = 265026255
TX_COUNT_HEIGHT = 499923
TX_PER_BLOCK = 50
REORG_LIMIT = 1000
RPC_PORT = 8332
PEERS = [
'electrumx-eu.bitcoingold.org s50002 t50001',
'electrumx-us.bitcoingold.org s50002 t50001'
]
@classmethod
def header_hash(cls, header):
'''Given a header return hash'''
height, = util.unpack_le_uint32_from(header, 68)
if height >= cls.FORK_HEIGHT:
return double_sha256(header)
else:
return double_sha256(header[:68] + header[100:112])
class BitcoinGoldTestnet(BitcoinGold):
FORK_HEIGHT = 1
SHORTNAME = "TBTG"
XPUB_VERBYTES = bytes.fromhex("043587CF")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("6F")
P2SH_VERBYTES = [bytes.fromhex("C4")]
WIF_BYTE = bytes.fromhex("EF")
TX_COUNT = 0
TX_COUNT_HEIGHT = 1
NET = 'testnet'
RPC_PORT = 18332
GENESIS_HASH = ('00000000e0781ebe24b91eedc293adfe'
'a2f557b53ec379e78959de3853e6f9f6')
PEERS = [
'test-node1.bitcoingold.org s50002',
'test-node2.bitcoingold.org s50002',
'test-node3.bitcoingold.org s50002'
]
class BitcoinGoldRegtest(BitcoinGold):
FORK_HEIGHT = 2000
SHORTNAME = "TBTG"
XPUB_VERBYTES = bytes.fromhex("043587CF")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("6F")
P2SH_VERBYTES = [bytes.fromhex("C4")]
WIF_BYTE = bytes.fromhex("EF")
TX_COUNT = 0
TX_COUNT_HEIGHT = 1
NET = 'regtest'
RPC_PORT = 18444
GENESIS_HASH = ('0f9188f13cb7b2c71f2a335e3a4fc328'
'bf5beb436012afca590b1a11466e2206')
PEERS = []
class BitcoinDiamond(BitcoinSegwit, Coin):
NAME = "BitcoinDiamond"
SHORTNAME = "BCD"
TX_VERSION = 12
TX_COUNT = 274277819
TX_COUNT_HEIGHT = 498678
TX_PER_BLOCK = 50
REORG_LIMIT = 1000
PEERS = []
VALUE_PER_COIN = 10000000
DESERIALIZER = lib_tx.DeserializerBitcoinDiamondSegWit
class Emercoin(NameMixin, Coin):
NAME = "Emercoin"
SHORTNAME = "EMC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("21")
P2SH_VERBYTES = [bytes.fromhex("5c")]
GENESIS_HASH = ('00000000bcccd459d036a588d1008fce'
'8da3754b205736f32ddfd35350e84c2d')
TX_COUNT = 217380620
TX_COUNT_HEIGHT = 464000
TX_PER_BLOCK = 1700
VALUE_PER_COIN = 1000000
RPC_PORT = 6662
DESERIALIZER = lib_tx.DeserializerEmercoin
PEERS = []
# Name opcodes
OP_NAME_NEW = OpCodes.OP_1
OP_NAME_UPDATE = OpCodes.OP_2
OP_NAME_DELETE = OpCodes.OP_3
# Valid name prefixes.
NAME_NEW_OPS = [OP_NAME_NEW, OpCodes.OP_DROP, "name", "days",
OpCodes.OP_2DROP, NameMixin.DATA_PUSH_MULTIPLE]
NAME_UPDATE_OPS = [OP_NAME_UPDATE, OpCodes.OP_DROP, "name", "days",
OpCodes.OP_2DROP, NameMixin.DATA_PUSH_MULTIPLE]
NAME_DELETE_OPS = [OP_NAME_DELETE, OpCodes.OP_DROP, "name",
OpCodes.OP_DROP]
NAME_OPERATIONS = [
NAME_NEW_OPS,
NAME_UPDATE_OPS,
NAME_DELETE_OPS,
]
@classmethod
def block_header(cls, block, height):
'''Returns the block header given a block and its height.'''
deserializer = cls.DESERIALIZER(block)
if deserializer.is_merged_block():
return deserializer.read_header(cls.BASIC_HEADER_SIZE)
return block[:cls.static_header_len(height)]
@classmethod
def header_hash(cls, header):
'''Given a header return hash'''
return double_sha256(header[:cls.BASIC_HEADER_SIZE])
@classmethod
def hashX_from_script(cls, script):
_, address_script = cls.interpret_name_prefix(script, cls.NAME_OPERATIONS)
return super().hashX_from_script(address_script)
class BitcoinTestnetMixin(object):
SHORTNAME = "XTN"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587cf")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('000000000933ea01ad0ee984209779ba'
'aec3ced90fa3f408719526f8d77f4943')
REORG_LIMIT = 8000
TX_COUNT = 12242438
TX_COUNT_HEIGHT = 1035428
TX_PER_BLOCK = 21
RPC_PORT = 18332
PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'}
class BitcoinSVTestnet(BitcoinTestnetMixin, Coin):
'''Bitcoin Testnet for Bitcoin SV daemons.'''
NAME = "BitcoinSV"
PEERS = [
'electrontest.cascharia.com t51001 s51002',
]
GENESIS_ACTIVATION = 1_344_302
class BitcoinSVScalingTestnet(BitcoinSVTestnet):
NET = "scalingtest"
PEERS = [
'stn-server.electrumsv.io t51001 s51002',
]
TX_COUNT = 2015
TX_COUNT_HEIGHT = 5711
TX_PER_BLOCK = 5000
GENESIS_ACTIVATION = 14_896
@classmethod
def max_fetch_blocks(cls, height):
if height <= 10:
return 100
return 3
class BitcoinCashTestnet(BitcoinTestnetMixin, Coin):
'''Bitcoin Testnet for Bitcoin Cash daemons.'''
NAME = "BitcoinCashABC"
PEERS = [
'bch0.kister.net t s',
'testnet.imaginary.cash t50001 s50002',
'blackie.c3-soft.com t60001 s60002',
]
BLOCK_PROCESSOR = block_proc.LTORBlockProcessor
@classmethod
def warn_old_client_on_tx_broadcast(cls, client_ver):
if client_ver < (3, 3, 4):
return ('<br/><br/>'
'Your transaction was successfully broadcast.<br/><br/>'
'However, you are using a VULNERABLE version of Electron Cash.<br/>'
'Download the latest version from this web site ONLY:<br/>'
'https://electroncash.org/'
'<br/><br/>')
return False
class BitcoinSVRegtest(BitcoinSVTestnet):
NET = "regtest"
GENESIS_HASH = ('0f9188f13cb7b2c71f2a335e3a4fc328'
'bf5beb436012afca590b1a11466e2206')
PEERS = []
TX_COUNT = 1
TX_COUNT_HEIGHT = 1
GENESIS_ACTIVATION = 10_000
class BitcoinSegwitTestnet(BitcoinTestnetMixin, Coin):
'''Bitcoin Testnet for Core bitcoind >= 0.13.1.'''
NAME = "BitcoinSegwit"
DESERIALIZER = lib_tx.DeserializerSegWit
CRASH_CLIENT_VER = (3, 2, 3)
PEERS = [
'testnet.hsmiths.com t53011 s53012',
'hsmithsxurybd7uh.onion t53011 s53012',
'testnet.qtornado.com s t',
'testnet1.bauerj.eu t50001 s50002',
'tn.not.fyi t55001 s55002',
'bitcoin.cluelessperson.com s t',
]
@classmethod
def warn_old_client_on_tx_broadcast(cls, client_ver):
if client_ver < (3, 3, 3):
return ('<br/><br/>'
'Your transaction was successfully broadcast.<br/><br/>'
'However, you are using a VULNERABLE version of Electrum.<br/>'
'Download the new version from the usual place:<br/>'
'https://electrum.org/'
'<br/><br/>')
return False
class BitcoinSegwitRegtest(BitcoinSegwitTestnet):
NAME = "BitcoinSegwit"
NET = "regtest"
GENESIS_HASH = ('0f9188f13cb7b2c71f2a335e3a4fc328'
'bf5beb436012afca590b1a11466e2206')
PEERS = []
TX_COUNT = 1
TX_COUNT_HEIGHT = 1
class BitcoinNolnet(BitcoinCash):
'''Bitcoin Unlimited nolimit testnet.'''
NET = "nolnet"
GENESIS_HASH = ('0000000057e31bd2066c939a63b7b862'
'3bd0f10d8c001304bdfc1a7902ae6d35')
PEERS = []
REORG_LIMIT = 8000
TX_COUNT = 583589
TX_COUNT_HEIGHT = 8617
TX_PER_BLOCK = 50
RPC_PORT = 28332
PEER_DEFAULT_PORTS = {'t': '52001', 's': '52002'}
# Source: https://github.com/sumcoinlabs/sumcoin
class Sumcoin(Coin):
NAME = "Sumcoin"
SHORTNAME = "SUM"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b41c")
XPRV_VERBYTES = bytes.fromhex("0488abe6")
P2PKH_VERBYTE = bytes.fromhex("3f")
P2SH_VERBYTES = [bytes.fromhex("c8"), bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("bf")
GENESIS_HASH = ('37d4696c5072cd012f3b7c651e5ce56a'
'1383577e4edacc2d289ec9b25eebfd5e')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 976394
TX_COUNT_HEIGHT = 659520
TX_PER_BLOCK = 2
REORG_LIMIT = 800
RPC_PORT = 3332
PEER_DEFAULT_PORTS = {'t': '53332', 's': '53333'}
PEERS = []
class Litecoin(Coin):
NAME = "Litecoin"
SHORTNAME = "LTC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("30")
P2SH_VERBYTES = [bytes.fromhex("32"), bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("b0")
GENESIS_HASH = ('12a765e31ffd4059bada1e25190f6e98'
'c99d9714d334efa41a195a7e7e04bfe2')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 8908766
TX_COUNT_HEIGHT = 1105256
TX_PER_BLOCK = 10
RPC_PORT = 9332
REORG_LIMIT = 800
PEERS = [
'ex.lug.gs s444',
'electrum-ltc.bysh.me s t',
'electrum-ltc.ddns.net s t',
'electrum-ltc.wilv.in s t',
'electrum.cryptomachine.com p1000 s t',
'electrum.ltc.xurious.com s t',
'eywr5eubdbbe2laq.onion s50008 t50007',
]
class LitecoinTestnet(Litecoin):
SHORTNAME = "XLT"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587cf")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTES = [bytes.fromhex("3a"), bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('4966625a4b2851d9fdee139e56211a0d'
'88575f59ed816ff5e6a63deb4e3e29a0')
TX_COUNT = 21772
TX_COUNT_HEIGHT = 20800
TX_PER_BLOCK = 2
RPC_PORT = 19332
REORG_LIMIT = 4000
PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'}
PEERS = [
'electrum-ltc.bysh.me s t',
'electrum.ltc.xurious.com s t',
]
class LitecoinRegtest(LitecoinTestnet):
NET = "regtest"
GENESIS_HASH = ('530827f38f93b43ed12af0b3ad25a288'
'dc02ed74d6d7857862df51fc56c416f9')
PEERS = []
TX_COUNT = 1
TX_COUNT_HEIGHT = 1
class BitcoinCashRegtest(BitcoinTestnetMixin, Coin):
NAME = "BitcoinCashABC" # Some releases later remove the ABC suffix
NET = "regtest"
PEERS = []
GENESIS_HASH = ('0f9188f13cb7b2c71f2a335e3a4fc328'
'bf5beb436012afca590b1a11466e2206')
TX_COUNT = 1
TX_COUNT_HEIGHT = 1
BLOCK_PROCESSOR = block_proc.LTORBlockProcessor
class Viacoin(AuxPowMixin, Coin):
NAME = "Viacoin"
SHORTNAME = "VIA"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("47")
P2SH_VERBYTES = [bytes.fromhex("21")]
WIF_BYTE = bytes.fromhex("c7")
GENESIS_HASH = ('4e9b54001f9976049830128ec0331515'
'eaabe35a70970d79971da1539a400ba1')
TX_COUNT = 113638
TX_COUNT_HEIGHT = 3473674
TX_PER_BLOCK = 30
RPC_PORT = 5222
REORG_LIMIT = 5000
DESERIALIZER = lib_tx.DeserializerAuxPowSegWit
PEERS = [
'vialectrum.bitops.me s t',
'server.vialectrum.org s t',
'vialectrum.viacoin.net s t',
'viax1.bitops.me s t',
]
class ViacoinTestnet(Viacoin):
SHORTNAME = "TVI"
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("7f")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ff")
GENESIS_HASH = ('00000007199508e34a9ff81e6ec0c477'
'a4cccff2a4767a8eee39c11db367b008')
RPC_PORT = 25222
REORG_LIMIT = 2500
PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'}
PEERS = [
'vialectrum.bysh.me s t',
]
class ViacoinTestnetSegWit(ViacoinTestnet):
NET = "testnet-segwit"
DESERIALIZER = lib_tx.DeserializerSegWit
# Source: https://github.com/GravityCoinOfficial/GravityCoin/
class GravityCoin(Coin):
NAME = "GravityCoin"
SHORTNAME = "GXX"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("28")
P2SH_VERBYTES = [bytes.fromhex("0a")]
WIF_BYTE = bytes.fromhex("d2")
GENESIS_HASH = ('322bad477efb4b33fa4b1f0b2861eaf543c61068da9898a95062fdb02ada486f')
TX_COUNT = 446050
TX_COUNT_HEIGHT = 547346
TX_PER_BLOCK = 2
PEER_DEFAULT_PORTS = {'t': '50001', 's': '50002'}
RPC_PORT = 29200
REORG_LIMIT = 5000
PEERS = []
# Source: https://github.com/BitcoinZeroOfficial/bitcoinzero
class Bitcoinzero(Coin):
NAME = "Bitcoinzero"
SHORTNAME = "BZX"
TX_COUNT = 43798
TX_COUNT_HEIGHT = 44
TX_PER_BLOCK = 576
NET = "mainnet"
GENESIS_HASH = '322bad477efb4b33fa4b1f0b2861eaf543c61068da9898a95062fdb02ada486f'
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("4b")
P2SH_VERBYTES = [bytes.fromhex("22")]
WIF_BYTE = bytes.fromhex("d2")
RPC_PORT = 29202
REORG_LIMIT = 5000
PEERS = []
class Unitus(Coin):
NAME = "Unitus"
SHORTNAME = "UIS"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
P2PKH_VERBYTE = bytes.fromhex("44")
P2SH_VERBYTES = [bytes.fromhex("0A")]
WIF_BYTE = bytes.fromhex("84")
GENESIS_HASH = ('d8a2b2439d013a59f3bfc626a33487a3'
'd7d27e42a3c9e0b81af814cd8e592f31')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 3484561
TX_COUNT_HEIGHT = 1697605
TX_PER_BLOCK = 3
RPC_PORT = 50604
REORG_LIMIT = 2000
PEERS = [
'electrumx.unituscurrency.com s t',
]
# Source: namecoin.org
class Namecoin(NameIndexMixin, AuxPowMixin, Coin):
NAME = "Namecoin"
SHORTNAME = "NMC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("d7dd6370")
XPRV_VERBYTES = bytes.fromhex("d7dc6e31")
P2PKH_VERBYTE = bytes.fromhex("34")
P2SH_VERBYTES = [bytes.fromhex("0d")]
WIF_BYTE = bytes.fromhex("e4")
GENESIS_HASH = ('000000000062b72c5e2ceb45fbc8587e'
'807c155b0da735e6483dfba2f0a9c770')
DESERIALIZER = lib_tx.DeserializerAuxPowSegWit
TX_COUNT = 4415768
TX_COUNT_HEIGHT = 329065
TX_PER_BLOCK = 10
RPC_PORT = 8336
PEERS = [
'electrum-nmc.le-space.de s50002',
'ex.lug.gs s446',
'luggscoqbymhvnkp.onion t82',
'nmc.bitcoins.sk s50002',
'ulrichard.ch s50006 t50005',
]
BLOCK_PROCESSOR = block_proc.NameIndexBlockProcessor
# Name opcodes
OP_NAME_NEW = OpCodes.OP_1
OP_NAME_FIRSTUPDATE = OpCodes.OP_2
OP_NAME_UPDATE = OpCodes.OP_3
# Valid name prefixes.
NAME_NEW_OPS = [OP_NAME_NEW, -1, OpCodes.OP_2DROP]
NAME_FIRSTUPDATE_OPS = [OP_NAME_FIRSTUPDATE, "name", -1, -1,
OpCodes.OP_2DROP, OpCodes.OP_2DROP]
NAME_UPDATE_OPS = [OP_NAME_UPDATE, "name", -1, OpCodes.OP_2DROP,
OpCodes.OP_DROP]
NAME_OPERATIONS = [
NAME_NEW_OPS,
NAME_FIRSTUPDATE_OPS,
NAME_UPDATE_OPS,
]
class NamecoinTestnet(Namecoin):
NAME = "Namecoin"
SHORTNAME = "XNM"
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('00000007199508e34a9ff81e6ec0c477'
'a4cccff2a4767a8eee39c11db367b008')
class NamecoinRegtest(NamecoinTestnet):
NAME = "Namecoin"
NET = "regtest"
GENESIS_HASH = ('0f9188f13cb7b2c71f2a335e3a4fc328'
'bf5beb436012afca590b1a11466e2206')
PEERS = []
TX_COUNT = 1
TX_COUNT_HEIGHT = 1
class Dogecoin(AuxPowMixin, Coin):
NAME = "Dogecoin"
SHORTNAME = "DOGE"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("02facafd")
XPRV_VERBYTES = bytes.fromhex("02fac398")
P2PKH_VERBYTE = bytes.fromhex("1e")
P2SH_VERBYTES = [bytes.fromhex("16")]
WIF_BYTE = bytes.fromhex("9e")
GENESIS_HASH = ('1a91e3dace36e2be3bf030a65679fe82'
'1aa1d6ef92e7c9902eb318182c355691')
TX_COUNT = 27583427
TX_COUNT_HEIGHT = 1604979
TX_PER_BLOCK = 20
REORG_LIMIT = 2000
class DogecoinTestnet(Dogecoin):
NAME = "Dogecoin"
SHORTNAME = "XDT"
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("71")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("f1")
GENESIS_HASH = ('bb0a78264637406b6360aad926284d54'
'4d7049f45189db5664f3c4d07350559e')
# Source: https://github.com/motioncrypto/motion
class Motion(Coin):
NAME = "Motion"
SHORTNAME = "XMN"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
GENESIS_HASH = ('000001e9dc60dd2618e91f7b90141349'
'22c374496b61c1a272519b1c39979d78')
P2PKH_VERBYTE = bytes.fromhex("32")
P2SH_VERBYTES = [bytes.fromhex("12")]
TX_COUNT_HEIGHT = 54353
TX_COUNT = 92701
TX_PER_BLOCK = 4
RPC_PORT = 3385
SESSIONCLS = DashElectrumX
DAEMON = daemon.DashDaemon
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import x16r_hash
return x16r_hash.getPoWHash(header)
# Source: https://github.com/dashpay/dash
class Dash(Coin):
NAME = "Dash"
SHORTNAME = "DASH"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("02fe52cc")
XPRV_VERBYTES = bytes.fromhex("02fe52f8")
GENESIS_HASH = ('00000ffd590b1485b3caadc19b22e637'
'9c733355108f107a430458cdf3407ab6')
P2PKH_VERBYTE = bytes.fromhex("4c")
P2SH_VERBYTES = [bytes.fromhex("10")]
WIF_BYTE = bytes.fromhex("cc")
TX_COUNT_HEIGHT = 569399
TX_COUNT = 2157510
TX_PER_BLOCK = 4
RPC_PORT = 9998
PEERS = [
'electrum.dash.org s t',
'electrum.masternode.io s t',
'electrum-drk.club s t',
'dashcrypto.space s t',
'electrum.dash.siampm.com s t',
'wl4sfwq2hwxnodof.onion s t',
]
SESSIONCLS = DashElectrumX
DAEMON = daemon.DashDaemon
DESERIALIZER = lib_tx_dash.DeserializerDash
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import x11_hash
return x11_hash.getPoWHash(header)
class DashTestnet(Dash):
SHORTNAME = "tDASH"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("3a805837")
XPRV_VERBYTES = bytes.fromhex("3a8061a0")
GENESIS_HASH = ('00000bafbc94add76cb75e2ec9289483'
'7288a481e5c005f6563d91623bf8bc2c')
P2PKH_VERBYTE = bytes.fromhex("8c")
P2SH_VERBYTES = [bytes.fromhex("13")]
WIF_BYTE = bytes.fromhex("ef")
TX_COUNT_HEIGHT = 101619
TX_COUNT = 132681
TX_PER_BLOCK = 1
RPC_PORT = 19998
PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'}
PEERS = [
'electrum.dash.siampm.com s t',
'dasht.random.re s54002 t54001',
]
class DashRegtest(DashTestnet):
NET = "regtest"
GENESIS_HASH = ('000008ca1832a4baf228eb1553c03d3a'
'2c8e02399550dd6ea8d65cec3ef23d2e')
PEERS = []
TX_COUNT_HEIGHT = 1
TX_COUNT = 1
class Argentum(AuxPowMixin, Coin):
NAME = "Argentum"
SHORTNAME = "ARG"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("17")
WIF_BYTE = bytes.fromhex("97")
GENESIS_HASH = ('88c667bc63167685e4e4da058fffdfe8'
'e007e5abffd6855de52ad59df7bb0bb2')
TX_COUNT = 2263089
TX_COUNT_HEIGHT = 2050260
TX_PER_BLOCK = 2000
RPC_PORT = 13581
class ArgentumTestnet(Argentum):
SHORTNAME = "XRG"
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
REORG_LIMIT = 2000
class DigiByte(Coin):
NAME = "DigiByte"
SHORTNAME = "DGB"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1E")
GENESIS_HASH = ('7497ea1b465eb39f1c8f507bc877078f'
'e016d6fcb6dfad3a64c98dcc6e1e8496')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 1046018
TX_COUNT_HEIGHT = 1435000
TX_PER_BLOCK = 1000
RPC_PORT = 12022
class DigiByteTestnet(DigiByte):
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('b5dca8039e300198e5fe7cd23bdd1728'
'e2a444af34c447dbd0916fa3430a68c2')
RPC_PORT = 15022
REORG_LIMIT = 2000
class FairCoin(Coin):
NAME = "FairCoin"
SHORTNAME = "FAIR"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("5f")
P2SH_VERBYTES = [bytes.fromhex("24")]
WIF_BYTE = bytes.fromhex("df")
GENESIS_HASH = ('beed44fa5e96150d95d56ebd5d262578'
'1825a9407a5215dd7eda723373a0a1d7')
BASIC_HEADER_SIZE = 108
HEADER_VALUES = ('version', 'prev_block_hash', 'merkle_root',
'payload_hash', 'timestamp', 'creatorId')
HEADER_UNPACK = struct.Struct('< I 32s 32s 32s I I').unpack_from
TX_COUNT = 505
TX_COUNT_HEIGHT = 470
TX_PER_BLOCK = 1
RPC_PORT = 40405
PEER_DEFAULT_PORTS = {'t': '51811', 's': '51812'}
PEERS = [
'electrum.faircoin.world s',
'electrumfair.punto0.org s',
]
@classmethod
def block(cls, raw_block, height):
'''Return a Block namedtuple given a raw block and its height.'''
if height > 0:
return super().block(raw_block, height)
else:
return Block(raw_block, cls.block_header(raw_block, height), [])
class Zcash(EquihashMixin, Coin):
NAME = "Zcash"
SHORTNAME = "ZEC"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1CB8")
P2SH_VERBYTES = [bytes.fromhex("1CBD")]
GENESIS_HASH = ('00040fe8ec8471911baa1db1266ea15d'
'd06b4a8a5c453883c000b031973dce08')
DESERIALIZER = lib_tx.DeserializerZcash
TX_COUNT = 329196
TX_COUNT_HEIGHT = 68379
TX_PER_BLOCK = 5
RPC_PORT = 8232
REORG_LIMIT = 800
class ZcashTestnet(Zcash):
SHORTNAME = "TAZ"
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("1D25")
P2SH_VERBYTES = [bytes.fromhex("1CBA")]
WIF_BYTE = bytes.fromhex("EF")
GENESIS_HASH = ('05a60a92d99d85997cce3b87616c089f'
'6124d7342af37106edc76126334a2c38')
TX_COUNT = 242312
TX_COUNT_HEIGHT = 321685
TX_PER_BLOCK = 2
RPC_PORT = 18232
class SnowGem(EquihashMixin, Coin):
NAME = "SnowGem"
SHORTNAME = "XSG"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1C28")
P2SH_VERBYTES = [bytes.fromhex("1C2D")]
GENESIS_HASH = ('00068b35729d9d2b0c294ff1fe9af009'
'4740524311a131de40e7f705e4c29a5b')
DESERIALIZER = lib_tx.DeserializerZcash
TX_COUNT = 1680878
TX_COUNT_HEIGHT = 627250
TX_PER_BLOCK = 2
RPC_PORT = 16112
REORG_LIMIT = 800
CHUNK_SIZE = 200
class Zero(EquihashMixin, Coin):
NAME = "Zero"
SHORTNAME = "ZER"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1CB8")
P2SH_VERBYTES = [bytes.fromhex("1CBD")]
GENESIS_HASH = ('068cbb5db6bc11be5b93479ea4df41fa'
'7e012e92ca8603c315f9b1a2202205c6')
DESERIALIZER = lib_tx.DeserializerZcash
TX_COUNT = 329998
TX_COUNT_HEIGHT = 847425
TX_PER_BLOCK = 2
RPC_PORT = 23811
REORG_LIMIT = 800
class BitcoinZ(EquihashMixin, Coin):
NAME = "BitcoinZ"
SHORTNAME = "BTCZ"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1CB8")
P2SH_VERBYTES = [bytes.fromhex("1CBD")]
GENESIS_HASH = ('f499ee3d498b4298ac6a64205b8addb7'
'c43197e2a660229be65db8a4534d75c1')
DESERIALIZER = lib_tx.DeserializerZcash
TX_COUNT = 171976
TX_COUNT_HEIGHT = 81323
TX_PER_BLOCK = 3
RPC_PORT = 1979
REORG_LIMIT = 800
class ZelCash(EquihashMixin, Coin):
NAME = "ZelCash"
SHORTNAME = "ZEL"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1CB8")
P2SH_VERBYTES = [bytes.fromhex("1CBD")]
GENESIS_HASH = ('00052461a5006c2e3b74ce48992a0869'
'5607912d5604c3eb8da25749b0900444')
DESERIALIZER = lib_tx.DeserializerZcash
TX_COUNT = 450539
TX_COUNT_HEIGHT = 167114
TX_PER_BLOCK = 3
RPC_PORT = 16124
REORG_LIMIT = 800
class Zclassic(EquihashMixin, Coin):
NAME = "Zclassic"
SHORTNAME = "ZCL"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1CB8")
P2SH_VERBYTES = [bytes.fromhex("1CBD")]
GENESIS_HASH = ('0007104ccda289427919efc39dc9e4d4'
'99804b7bebc22df55f8b834301260602')
DESERIALIZER = lib_tx.DeserializerZcash
TX_COUNT = 329196
TX_COUNT_HEIGHT = 68379
TX_PER_BLOCK = 5
RPC_PORT = 8023
REORG_LIMIT = 800
class Koto(Coin):
NAME = "Koto"
SHORTNAME = "KOTO"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1836")
P2SH_VERBYTES = [bytes.fromhex("183B")]
GENESIS_HASH = ('6d424c350729ae633275d51dc3496e16'
'cd1b1d195c164da00f39c499a2e9959e')
DESERIALIZER = lib_tx.DeserializerZcash
TX_COUNT = 158914
TX_COUNT_HEIGHT = 67574
TX_PER_BLOCK = 3
RPC_PORT = 8432
REORG_LIMIT = 800
PEERS = [
'fr.kotocoin.info s t',
'electrum.kotocoin.info s t',
]
class KotoTestnet(Koto):
SHORTNAME = "TOKO"
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("18A4")
P2SH_VERBYTES = [bytes.fromhex("1839")]
WIF_BYTE = bytes.fromhex("EF")
GENESIS_HASH = ('bf84afbde20c2d213b68b231ddb585ab'
'616ef7567226820f00d9b397d774d2f0')
TX_COUNT = 91144
TX_COUNT_HEIGHT = 89662
TX_PER_BLOCK = 1
RPC_PORT = 18432
PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'}
PEERS = [
'testnet.kotocoin.info s t',
]
class Komodo(KomodoMixin, EquihashMixin, Coin):
NAME = "Komodo"
SHORTNAME = "KMD"
NET = "mainnet"
TX_COUNT = 693629
TX_COUNT_HEIGHT = 491777
TX_PER_BLOCK = 2
RPC_PORT = 7771
REORG_LIMIT = 800
PEERS = []
class Hush(KomodoMixin, EquihashMixin, Coin):
NAME = "Hush"
SHORTNAME = "HUSH"
NET = "mainnet"
TX_COUNT = 111317
TX_COUNT_HEIGHT = 169280
TX_PER_BLOCK = 2
RPC_PORT = 18031
REORG_LIMIT = 800
class Monaize(KomodoMixin, EquihashMixin, Coin):
NAME = "Monaize"
SHORTNAME = "MNZ"
NET = "mainnet"
TX_COUNT = 256
TX_COUNT_HEIGHT = 128
TX_PER_BLOCK = 2
RPC_PORT = 14337
REORG_LIMIT = 800
PEERS = []
class Verus(KomodoMixin, EquihashMixin, Coin):
NAME = "Verus"
SHORTNAME = "VRSC"
NET = "mainnet"
TX_COUNT = 55000
TX_COUNT_HEIGHT = 42000
TX_PER_BLOCK = 2
RPC_PORT = 27486
REORG_LIMIT = 800
PEERS = []
@classmethod
def header_hash(cls, header):
'''Given a header return hash'''
import verushash
# if this may be the genesis block, use sha256, otherwise, VerusHash
if cls.header_prevhash(header) == bytes([0] * 32):
return double_sha256(header)
else:
if (header[0] == 4 and header[2] >= 1):
if (len(header) < 144 or header[143] < 3):
return verushash.verushash_v2b(header)
else:
return verushash.verushash_v2b1(header)
else:
return verushash.verushash(header)
class Einsteinium(Coin):
NAME = "Einsteinium"
SHORTNAME = "EMC2"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("21")
WIF_BYTE = bytes.fromhex("b0")
GENESIS_HASH = ('4e56204bb7b8ac06f860ff1c845f03f9'
'84303b5b97eb7b42868f714611aed94b')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 2087559
TX_COUNT_HEIGHT = 1358517
TX_PER_BLOCK = 2
RPC_PORT = 41879
REORG_LIMIT = 2000
class Blackcoin(ScryptMixin, Coin):
NAME = "Blackcoin"
SHORTNAME = "BLK"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("19")
P2SH_VERBYTES = [bytes.fromhex("55")]
WIF_BYTE = bytes.fromhex("99")
GENESIS_HASH = ('000001faef25dec4fbcf906e6242621d'
'f2c183bf232f263d0ba5b101911e4563')
DAEMON = daemon.LegacyRPCDaemon
TX_COUNT = 4594999
TX_COUNT_HEIGHT = 1667070
TX_PER_BLOCK = 3
RPC_PORT = 15715
REORG_LIMIT = 5000
class Bitbay(ScryptMixin, Coin):
NAME = "Bitbay"
SHORTNAME = "BAY"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("19")
P2SH_VERBYTES = [bytes.fromhex("55")]
WIF_BYTE = bytes.fromhex("99")
GENESIS_HASH = ('0000075685d3be1f253ce777174b1594'
'354e79954d2a32a6f77fe9cba00e6467')
TX_COUNT = 4594999
TX_COUNT_HEIGHT = 1667070
TX_PER_BLOCK = 3
RPC_PORT = 19914
REORG_LIMIT = 5000
class DeepOnion(Coin):
NAME = "DeepOnion"
SHORTNAME = "ONION"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1F")
P2SH_VERBYTES = [bytes.fromhex("4E")]
WIF_BYTE = bytes.fromhex("9f")
GENESIS_HASH = ('000004e29458ef4f2e0abab544737b07'
'344e6ff13718f7c2d12926166db07b5e')
DESERIALIZER = lib_tx.DeserializerTxTime
DAEMON = daemon.LegacyRPCDaemon
TX_COUNT = 1194707
TX_COUNT_HEIGHT = 530000
TX_PER_BLOCK = 2
RPC_PORT = 18580
REORG_LIMIT = 200
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
PEERS = []
@classmethod
def header_hash(cls, header):
'''
Given a header return the hash for DeepOnion.
Need to download `x13_hash` module
Source code: https://github.com/MaruCoinOfficial/x13-hash
'''
import x13_hash
return x13_hash.getPoWHash(header)
class Peercoin(Coin):
NAME = "Peercoin"
SHORTNAME = "PPC"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("37")
P2SH_VERBYTES = [bytes.fromhex("75")]
WIF_BYTE = bytes.fromhex("b7")
GENESIS_HASH = ('0000000032fe677166d54963b62a4677'
'd8957e87c508eaa4fd7eb1c880cd27e3')
DESERIALIZER = lib_tx.DeserializerTxTimeSegWit
DAEMON = daemon.FakeEstimateFeeDaemon
ESTIMATE_FEE = 0.001
RELAY_FEE = 0.01
TX_COUNT = 1691771
TX_COUNT_HEIGHT = 455409
TX_PER_BLOCK = 4
RPC_PORT = 9902
REORG_LIMIT = 5000
PEERS = [
"electrum.peercoinexplorer.net s"
]
VALUE_PER_COIN = 1000000
class PeercoinTestnet(Peercoin):
NAME = "PeercoinTestnet"
SHORTNAME = "tPPC"
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('00000001f757bb737f6596503e17cd17'
'b0658ce630cc727c0cca81aec47c9f06')
ESTIMATE_FEE = 0.001
class Trezarcoin(Coin):
NAME = "Trezarcoin"
SHORTNAME = "TZC"
NET = "mainnet"
VALUE_PER_COIN = 1000000
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
P2PKH_VERBYTE = bytes.fromhex("42")
P2SH_VERBYTES = [bytes.fromhex("08")]
WIF_BYTE = bytes.fromhex("c2")
GENESIS_HASH = ('24502ba55d673d2ee9170d83dae2d1ad'
'b3bfb4718e4f200db9951382cc4f6ee6')
DESERIALIZER = lib_tx.DeserializerTrezarcoin
HEADER_HASH = lib_tx.DeserializerTrezarcoin.blake2s
HEADER_HASH_GEN = lib_tx.DeserializerTrezarcoin.blake2s_gen
BASIC_HEADER_SIZE = 80
TX_COUNT = 742886
TX_COUNT_HEIGHT = 643128
TX_PER_BLOCK = 2
RPC_PORT = 17299
REORG_LIMIT = 2000
PEERS = [
'electrumx1.trezarcoin.com s t',
]
@classmethod
def genesis_block(cls, block):
'''Check the Genesis block is the right one for this coin.
Return the block less its unspendable coinbase.
'''
header = cls.block_header(block, 0)
header_hex_hash = cls.HEADER_HASH_GEN(header)
if header_hex_hash != cls.GENESIS_HASH:
raise CoinError('genesis block has hash {} expected {}'
.format(header_hex_hash, cls.GENESIS_HASH))
return header + bytes(1)
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
return cls.HEADER_HASH(header)
class Reddcoin(Coin):
NAME = "Reddcoin"
SHORTNAME = "RDD"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("3d")
WIF_BYTE = bytes.fromhex("bd")
GENESIS_HASH = ('b868e0d95a3c3c0e0dadc67ee587aaf9'
'dc8acbf99e3b4b3110fad4eb74c1decc')
DESERIALIZER = lib_tx.DeserializerReddcoin
TX_COUNT = 5413508
TX_COUNT_HEIGHT = 1717382
TX_PER_BLOCK = 3
RPC_PORT = 45443
class TokenPay(ScryptMixin, Coin):
NAME = "TokenPay"
SHORTNAME = "TPAY"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("41")
P2SH_VERBYTES = [bytes.fromhex("7e")]
WIF_BYTE = bytes.fromhex("b3")
GENESIS_HASH = ('000008b71ab32e585a23f0de642dc113'
'740144e94c0ece047751e9781f953ae9')
DESERIALIZER = lib_tx.DeserializerTokenPay
DAEMON = daemon.LegacyRPCDaemon
TX_COUNT = 147934
TX_COUNT_HEIGHT = 73967
TX_PER_BLOCK = 100
RPC_PORT = 8800
REORG_LIMIT = 500
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
PEERS = [
"electrum-us.tpay.ai s",
"electrum-eu.tpay.ai s",
]
class Vertcoin(Coin):
NAME = "Vertcoin"
SHORTNAME = "VTC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
P2PKH_VERBYTE = bytes.fromhex("47")
GENESIS_HASH = ('4d96a915f49d40b1e5c2844d1ee2dccb'
'90013a990ccea12c492d22110489f0c4')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 2383423
TX_COUNT_HEIGHT = 759076
TX_PER_BLOCK = 3
RPC_PORT = 5888
REORG_LIMIT = 1000
class Monacoin(Coin):
NAME = "Monacoin"
SHORTNAME = "MONA"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
P2PKH_VERBYTE = bytes.fromhex("32")
P2SH_VERBYTES = [bytes.fromhex("37"), bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("B0")
GENESIS_HASH = ('ff9f1c0116d19de7c9963845e129f9ed'
'1bfc0b376eb54fd7afa42e0d418c8bb6')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 2568580
TX_COUNT_HEIGHT = 1029766
TX_PER_BLOCK = 2
RPC_PORT = 9402
REORG_LIMIT = 1000
BLACKLIST_URL = 'https://electrum-mona.org/blacklist.json'
PEERS = [
'electrumx.tamami-foundation.org s t',
'electrumx3.monacoin.nl s t',
'electrumx1.monacoin.ninja s t',
'electrumx2.movsign.info s t',
'electrum-mona.bitbank.cc s t',
'ri7rzlmdaf4eqbza.onion s t',
]
class MonacoinTestnet(Monacoin):
SHORTNAME = "XMN"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587CF")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("6F")
P2SH_VERBYTES = [bytes.fromhex("75"), bytes.fromhex("C4")]
WIF_BYTE = bytes.fromhex("EF")
GENESIS_HASH = ('a2b106ceba3be0c6d097b2a6a6aacf9d'
'638ba8258ae478158f449c321061e0b2')
TX_COUNT = 83602
TX_COUNT_HEIGHT = 83252
TX_PER_BLOCK = 1
RPC_PORT = 19402
REORG_LIMIT = 1000
PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'}
PEERS = [
'electrumx1.testnet.monacoin.ninja s t',
'electrumx1.testnet.monacoin.nl s t',
]
class MonacoinRegtest(MonacoinTestnet):
NET = "regtest"
GENESIS_HASH = ('7543a69d7c2fcdb29a5ebec2fc064c07'
'4a35253b6f3072c8a749473aa590a29c')
PEERS = []
TX_COUNT = 1
TX_COUNT_HEIGHT = 1
class Crown(AuxPowMixin, Coin):
NAME = "Crown"
SHORTNAME = "CRW"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2SH_VERBYTES = [bytes.fromhex("1c")]
GENESIS_HASH = ('0000000085370d5e122f64f4ab19c686'
'14ff3df78c8d13cb814fd7e69a1dc6da')
TX_COUNT = 13336629
TX_COUNT_HEIGHT = 1268206
TX_PER_BLOCK = 10
RPC_PORT = 9341
REORG_LIMIT = 1000
PEERS = [
'sgp-crwseed.crowndns.info s t',
'blr-crwseed.crowndns.info s t',
'sfo-crwseed.crowndns.info s t',
'nyc-crwseed.crowndns.info s t',
'ams-crwseed.crowndns.info s t',
'tor-crwseed.crowndns.info s t',
'lon-crwseed.crowndns.info s t',
'fra-crwseed.crowndns.info s t',
]
class Fujicoin(Coin):
NAME = "Fujicoin"
SHORTNAME = "FJC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("24")
P2SH_VERBYTES = [bytes.fromhex("10")]
WIF_BYTE = bytes.fromhex("a4")
GENESIS_HASH = ('adb6d9cfd74075e7f91608add4bd2a2e'
'a636f70856183086842667a1597714a0')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 170478
TX_COUNT_HEIGHT = 1521676
TX_PER_BLOCK = 1
RPC_PORT = 3776
REORG_LIMIT = 1000
class Neblio(ScryptMixin, Coin):
NAME = "Neblio"
SHORTNAME = "NEBL"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("35")
P2SH_VERBYTES = [bytes.fromhex("70")]
GENESIS_HASH = ('7286972be4dbc1463d256049b7471c25'
'2e6557e222cab9be73181d359cd28bcc')
TX_COUNT = 23675
TX_COUNT_HEIGHT = 22785
TX_PER_BLOCK = 1
RPC_PORT = 6326
REORG_LIMIT = 1000
class Bitzeny(Coin):
NAME = "Bitzeny"
SHORTNAME = "ZNY"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("51")
GENESIS_HASH = ('000009f7e55e9e3b4781e22bd87a7cfa'
'4acada9e4340d43ca738bf4e9fb8f5ce')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 1408733
TX_COUNT_HEIGHT = 1015115
TX_PER_BLOCK = 1
RPC_PORT = 9252
REORG_LIMIT = 1000
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import zny_yespower_0_5
return zny_yespower_0_5.getPoWHash(header)
class CanadaeCoin(AuxPowMixin, Coin):
NAME = "CanadaeCoin"
SHORTNAME = "CDN"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("1C")
WIF_BYTE = bytes.fromhex("9c")
GENESIS_HASH = ('863626dadaef221e2e2f30ff3dacae44'
'cabdae9e0028058072181b3fb675d94a')
ESTIMATE_FEE = 0.0001
RELAY_FEE = 0.0001
DAEMON = daemon.FakeEstimateFeeDaemon
TX_COUNT = 3455905
TX_COUNT_HEIGHT = 3645419
TX_PER_BLOCK = 1
RPC_PORT = 34330
REORG_LIMIT = 1000
class Denarius(Coin):
NAME = "Denarius"
SHORTNAME = "D"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("1E") # Address starts with a D
P2SH_VERBYTES = [bytes.fromhex("5A")]
WIF_BYTE = bytes.fromhex("9E") # WIF starts with a 6
GENESIS_HASH = ('00000d5dbbda01621cfc16bbc1f9bf32'
'64d641a5dbf0de89fd0182c2c4828fcd')
DESERIALIZER = lib_tx.DeserializerTxTime
TX_COUNT = 4230
RPC_PORT = 32339
ESTIMATE_FEE = 0.00001
RELAY_FEE = 0.00001
DAEMON = daemon.FakeEstimateFeeDaemon
TX_COUNT_HEIGHT = 306187
TX_PER_BLOCK = 4000
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import tribushashm
return tribushashm.getPoWHash(header)
class DenariusTestnet(Denarius):
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587cf")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("12")
P2SH_VERBYTES = [bytes.fromhex("74")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('000086bfe8264d241f7f8e5393f74778'
'4b8ca2aa98bdd066278d590462a4fdb4')
RPC_PORT = 32338
REORG_LIMIT = 2000
class Sibcoin(Dash):
NAME = "Sibcoin"
SHORTNAME = "SIB"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("3F")
P2SH_VERBYTES = [bytes.fromhex("28")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('00000c492bf73490420868bc577680bf'
'c4c60116e7e85343bc624787c21efa4c')
DAEMON = daemon.DashDaemon
TX_COUNT = 1000
TX_COUNT_HEIGHT = 10000
TX_PER_BLOCK = 1
RPC_PORT = 1944
REORG_LIMIT = 1000
PEERS = []
@classmethod
def header_hash(cls, header):
'''
Given a header return the hash for sibcoin.
Need to download `x11_gost_hash` module
Source code: https://github.com/ivansib/x11_gost_hash
'''
import x11_gost_hash
return x11_gost_hash.getPoWHash(header)
class SibcoinTestnet(Sibcoin):
SHORTNAME = "tSIB"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587cf")
XPRV_VERBYTES = bytes.fromhex("04358394")
GENESIS_HASH = ('00000617791d0e19f524387f67e558b2'
'a928b670b9a3b387ae003ad7f9093017')
RPC_PORT = 11944
class Chips(Coin):
NAME = "Chips"
SHORTNAME = "CHIPS"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("3c")
P2SH_VERBYTES = [bytes.fromhex("55")]
WIF_BYTE = bytes.fromhex("bc")
GENESIS_HASH = ('0000006e75f6aa0efdbf7db03132aa4e'
'4d0c84951537a6f5a7c39a0a9d30e1e7')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 145290
TX_COUNT_HEIGHT = 318637
TX_PER_BLOCK = 2
RPC_PORT = 57776
REORG_LIMIT = 800
class Feathercoin(Coin):
NAME = "Feathercoin"
SHORTNAME = "FTC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488BC26")
XPRV_VERBYTES = bytes.fromhex("0488DAEE")
P2PKH_VERBYTE = bytes.fromhex("0E")
WIF_BYTE = bytes.fromhex("8E")
GENESIS_HASH = ('12a765e31ffd4059bada1e25190f6e98'
'c99d9714d334efa41a195a7e7e04bfe2')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 3170843
TX_COUNT_HEIGHT = 1981777
TX_PER_BLOCK = 2
RPC_PORT = 9337
REORG_LIMIT = 2000
PEERS = [
'electrumx-gb-1.feathercoin.network s t',
'electrumx-gb-2.feathercoin.network s t',
'electrumx-de-1.feathercoin.network s t',
]
class UFO(Coin):
NAME = "UniformFiscalObject"
SHORTNAME = "UFO"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
P2PKH_VERBYTE = bytes.fromhex("1B")
P2SH_VERBYTES = [bytes.fromhex("44")]
WIF_BYTE = bytes.fromhex("9B")
GENESIS_HASH = ('ba1d39b4928ab03d813d952daf65fb77'
'97fcf538a9c1b8274f4edc8557722d13')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 1608926
TX_COUNT_HEIGHT = 1300154
TX_PER_BLOCK = 2
RPC_PORT = 9888
REORG_LIMIT = 2000
PEERS = [
'electrumx1.ufobject.com s t',
]
class Newyorkcoin(AuxPowMixin, Coin):
NAME = "Newyorkcoin"
SHORTNAME = "NYC"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("3c")
P2SH_VERBYTES = [bytes.fromhex("16")]
WIF_BYTE = bytes.fromhex("bc")
GENESIS_HASH = ('5597f25c062a3038c7fd815fe46c67de'
'dfcb3c839fbc8e01ed4044540d08fe48')
TX_COUNT = 5161944
TX_COUNT_HEIGHT = 3948743
TX_PER_BLOCK = 2
REORG_LIMIT = 2000
class NewyorkcoinTestnet(Newyorkcoin):
SHORTNAME = "tNYC"
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("71")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("f1")
GENESIS_HASH = ('24463e4d3c625b0a9059f309044c2cf0'
'd7e196cf2a6ecce901f24f681be33c8f')
TX_COUNT = 5161944
TX_COUNT_HEIGHT = 3948743
TX_PER_BLOCK = 2
REORG_LIMIT = 2000
class Bitcore(BitcoinMixin, Coin):
NAME = "Bitcore"
SHORTNAME = "BTX"
P2PKH_VERBYTE = bytes.fromhex("03")
P2SH_VERBYTES = [bytes.fromhex("7D")]
DESERIALIZER = lib_tx.DeserializerSegWit
GENESIS_HASH = ('604148281e5c4b7f2487e5d03cd60d8e'
'6f69411d613f6448034508cea52e9574')
TX_COUNT = 126979
TX_COUNT_HEIGHT = 126946
TX_PER_BLOCK = 2
RPC_PORT = 8556
PEERS = [
'ele1.bitcore.cc s t',
'ele2.bitcore.cc s t',
'ele3.bitcore.cc s t',
'ele4.bitcore.cc s t'
]
class GameCredits(Coin):
NAME = "GameCredits"
SHORTNAME = "GAME"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("26")
WIF_BYTE = bytes.fromhex("a6")
GENESIS_HASH = ('91ec5f25ee9a0ffa1af7d4da4db9a552'
'228dd2dc77cdb15b738be4e1f55f30ee')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 316796
TX_COUNT_HEIGHT = 2040250
TX_PER_BLOCK = 2
RPC_PORT = 40001
REORG_LIMIT = 1000
class Machinecoin(Coin):
NAME = "Machinecoin"
SHORTNAME = "MAC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("32")
P2SH_VERBYTES = [bytes.fromhex("26"), bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("b2")
GENESIS_HASH = ('6a1f879bcea5471cbfdee1fd0cb2ddcc'
'4fed569a500e352d41de967703e83172')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 137641
TX_COUNT_HEIGHT = 513020
TX_PER_BLOCK = 2
RPC_PORT = 40332
REORG_LIMIT = 800
class BitcoinAtom(Coin):
NAME = "BitcoinAtom"
SHORTNAME = "BCA"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("17")
P2SH_VERBYTES = [bytes.fromhex("0a")]
STATIC_BLOCK_HEADERS = False
DESERIALIZER = lib_tx.DeserializerBitcoinAtom
HEADER_SIZE_POST_FORK = 84
BLOCK_PROOF_OF_STAKE = 0x01
BLOCK_PROOF_OF_STAKE_FLAGS = b'\x01\x00\x00\x00'
TX_COUNT = 295158744
TX_COUNT_HEIGHT = 589197
TX_PER_BLOCK = 10
RPC_PORT = 9136
REORG_LIMIT = 5000
@classmethod
def header_hash(cls, header):
'''Given a header return hash'''
header_to_be_hashed = header[:cls.BASIC_HEADER_SIZE]
# New block header format has some extra flags in the end
if len(header) == cls.HEADER_SIZE_POST_FORK:
flags, = util.unpack_le_uint32_from(header, len(header) - 4)
# Proof of work blocks have special serialization
if flags & cls.BLOCK_PROOF_OF_STAKE != 0:
header_to_be_hashed += cls.BLOCK_PROOF_OF_STAKE_FLAGS
return double_sha256(header_to_be_hashed)
@classmethod
def block_header(cls, block, height):
'''Return the block header bytes'''
deserializer = cls.DESERIALIZER(block)
return deserializer.read_header(height, cls.BASIC_HEADER_SIZE)
class Decred(Coin):
NAME = "Decred"
SHORTNAME = "DCR"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("02fda926")
XPRV_VERBYTES = bytes.fromhex("02fda4e8")
P2PKH_VERBYTE = bytes.fromhex("073f")
P2SH_VERBYTES = [bytes.fromhex("071a")]
WIF_BYTE = bytes.fromhex("22de")
GENESIS_HASH = ('298e5cc3d985bfe7f81dc135f360abe0'
'89edd4396b86d2de66b0cef42b21d980')
BASIC_HEADER_SIZE = 180
HEADER_HASH = lib_tx.DeserializerDecred.blake256
DESERIALIZER = lib_tx.DeserializerDecred
DAEMON = daemon.DecredDaemon
BLOCK_PROCESSOR = block_proc.DecredBlockProcessor
ENCODE_CHECK = partial(Base58.encode_check,
hash_fn=lib_tx.DeserializerDecred.blake256d)
DECODE_CHECK = partial(Base58.decode_check,
hash_fn=lib_tx.DeserializerDecred.blake256d)
HEADER_VALUES = ('version', 'prev_block_hash', 'merkle_root', 'stake_root',
'vote_bits', 'final_state', 'voters', 'fresh_stake',
'revocations', 'pool_size', 'bits', 'sbits',
'block_height', 'size', 'timestamp', 'nonce',
'extra_data', 'stake_version')
HEADER_UNPACK = struct.Struct(
'< i 32s 32s 32s H 6s H B B I I Q I I I I 32s I').unpack_from
TX_COUNT = 4629388
TX_COUNT_HEIGHT = 260628
TX_PER_BLOCK = 17
REORG_LIMIT = 1000
RPC_PORT = 9109
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
return cls.HEADER_HASH(header)
@classmethod
def block(cls, raw_block, height):
'''Return a Block namedtuple given a raw block and its height.'''
if height > 0:
return super().block(raw_block, height)
else:
return Block(raw_block, cls.block_header(raw_block, height), [])
class DecredTestnet(Decred):
SHORTNAME = "tDCR"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587d1")
XPRV_VERBYTES = bytes.fromhex("04358397")
P2PKH_VERBYTE = bytes.fromhex("0f21")
P2SH_VERBYTES = [bytes.fromhex("0efc")]
WIF_BYTE = bytes.fromhex("230e")
GENESIS_HASH = (
'a649dce53918caf422e9c711c858837e08d626ecfcd198969b24f7b634a49bac')
BASIC_HEADER_SIZE = 180
ALLOW_ADVANCING_ERRORS = True
TX_COUNT = 217380620
TX_COUNT_HEIGHT = 464000
TX_PER_BLOCK = 1800
REORG_LIMIT = 1000
RPC_PORT = 19109
class Axe(Dash):
NAME = "Axe"
SHORTNAME = "AXE"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("02fe52cc")
XPRV_VERBYTES = bytes.fromhex("02fe52f8")
P2PKH_VERBYTE = bytes.fromhex("37")
P2SH_VERBYTES = [bytes.fromhex("10")]
WIF_BYTE = bytes.fromhex("cc")
GENESIS_HASH = ('00000c33631ca6f2f61368991ce2dc03'
'306b5bb50bf7cede5cfbba6db38e52e6')
SESSIONCLS = DashElectrumX
DAEMON = daemon.DashDaemon
DESERIALIZER = lib_tx_axe.DeserializerAxe
TX_COUNT = 18405
TX_COUNT_HEIGHT = 30237
TX_PER_BLOCK = 1
RPC_PORT = 9337
REORG_LIMIT = 1000
PEERS = []
@classmethod
def header_hash(cls, header):
'''
Given a header return the hash for AXE.
Need to download `axe_hash` module
Source code: https://github.com/AXErunners/axe_hash
'''
import x11_hash
return x11_hash.getPoWHash(header)
class AxeTestnet(Axe):
SHORTNAME = "tAxe"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("3a805837")
XPRV_VERBYTES = bytes.fromhex("3a8061a0")
GENESIS_HASH = ('000005b709662e7bc5e89c71d3aba6c9'
'd4623b4bbf44ac205caec55f4cefb483')
P2PKH_VERBYTE = bytes.fromhex("8c")
P2SH_VERBYTES = [bytes.fromhex("13")]
WIF_BYTE = bytes.fromhex("ef")
TX_COUNT_HEIGHT = 101619
TX_COUNT = 132681
TX_PER_BLOCK = 1
RPC_PORT = 19937
PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'}
PEERS = []
class AxeRegtest(AxeTestnet):
NET = "regtest"
GENESIS_HASH = ('2026b8850f3774a0536152ba868c4dcb'
'de9aef5ffc28a5d23f76f80e9b46e565')
PEERS = []
TX_COUNT_HEIGHT = 1
RPC_PORT = 19869
TX_COUNT = 1
class Xuez(Coin):
NAME = "Xuez"
SHORTNAME = "XUEZ"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("022d2533")
XPRV_VERBYTES = bytes.fromhex("0221312b")
P2PKH_VERBYTE = bytes.fromhex("48")
P2SH_VERBYTES = [bytes.fromhex("12")]
WIF_BYTE = bytes.fromhex("d4")
GENESIS_HASH = ('000000e1febc39965b055e8e0117179a'
'4d18e24e7aaa0c69864c4054b4f29445')
TX_COUNT = 30000
TX_COUNT_HEIGHT = 15000
TX_PER_BLOCK = 1
RPC_PORT = 41799
REORG_LIMIT = 1000
BASIC_HEADER_SIZE = 112
PEERS = []
@classmethod
def header_hash(cls, header):
'''
Given a header return the hash for Xuez.
Need to download `xevan_hash` module
Source code: https://github.com/xuez/xuez
'''
version, = util.unpack_le_uint32_from(header)
import xevan_hash
if version == 1:
return xevan_hash.getPoWHash(header[:80])
else:
return xevan_hash.getPoWHash(header)
# Source: https://github.com/odinblockchain/odin
class Odin(Coin):
NAME = "ODIN"
SHORTNAME = "ODIN"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("27561872")
XPRV_VERBYTES = bytes.fromhex("27256746")
P2PKH_VERBYTE = bytes.fromhex("73")
P2SH_VERBYTES = [bytes.fromhex("39")]
WIF_BYTE = bytes.fromhex("8a")
GENESIS_HASH = ('31ca29566549e444cf227a0e2e067aed'
'847c2acc541d3bbf9ca1ae89f4fd57d7')
TX_COUNT = 340000
TX_COUNT_HEIGHT = 340000
TX_PER_BLOCK = 2
RPC_PORT = 22101
REORG_LIMIT = 100
BASIC_HEADER_SIZE = 80
HDR_V4_SIZE = 112
HDR_V4_HEIGHT = 143447
HDR_V4_START_OFFSET = HDR_V4_HEIGHT * BASIC_HEADER_SIZE
SESSIONCLS = DashElectrumX
DAEMON = daemon.DashDaemon
DESERIALIZER = lib_tx.DeserializerSegWit
@classmethod
def static_header_offset(cls, height):
assert cls.STATIC_BLOCK_HEADERS
if height >= cls.HDR_V4_HEIGHT:
relative_v4_offset = (height - cls.HDR_V4_HEIGHT) * cls.HDR_V4_SIZE
return cls.HDR_V4_START_OFFSET + relative_v4_offset
else:
return height * cls.BASIC_HEADER_SIZE
@classmethod
def header_hash(cls, header):
version, = util.unpack_le_uint32_from(header)
if version >= 4:
return super().header_hash(header)
else:
import quark_hash
return quark_hash.getPoWHash(header)
class Pac(Coin):
NAME = "PAC"
SHORTNAME = "PAC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
GENESIS_HASH = ('00000354655ff039a51273fe61d3b493'
'bd2897fe6c16f732dbc4ae19f04b789e')
P2PKH_VERBYTE = bytes.fromhex("37")
P2SH_VERBYTES = [bytes.fromhex("0A")]
WIF_BYTE = bytes.fromhex("CC")
TX_COUNT_HEIGHT = 14939
TX_COUNT = 23708
TX_PER_BLOCK = 2
RPC_PORT = 7111
PEERS = [
'electrum.paccoin.io s t',
'electro-pac.paccoin.io s t'
]
SESSIONCLS = DashElectrumX
DAEMON = daemon.DashDaemon
ESTIMATE_FEE = 0.00001
RELAY_FEE = 0.00001
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import x11_hash
return x11_hash.getPoWHash(header)
class PacTestnet(Pac):
SHORTNAME = "tPAC"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587CF")
XPRV_VERBYTES = bytes.fromhex("04358394")
GENESIS_HASH = ('00000da63bd9478b655ef6bf1bf76cd9'
'af05202ab68643f9091e049b2b5280ed')
P2PKH_VERBYTE = bytes.fromhex("78")
P2SH_VERBYTES = [bytes.fromhex("0E")]
WIF_BYTE = bytes.fromhex("EF")
TX_COUNT_HEIGHT = 16275
TX_COUNT = 16275
TX_PER_BLOCK = 1
RPC_PORT = 17111
class Zcoin(Coin):
NAME = "Zcoin"
SHORTNAME = "XZC"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("52")
P2SH_VERBYTES = [bytes.fromhex("07")]
WIF_BYTE = bytes.fromhex("d2")
GENESIS_HASH = ('4381deb85b1b2c9843c222944b616d99'
'7516dcbd6a964e1eaf0def0830695233')
TX_COUNT = 667154
TX_COUNT_HEIGHT = 100266
TX_PER_BLOCK = 4000 # 2000 for 1MB block
IRC_PREFIX = None
RPC_PORT = 8888
REORG_LIMIT = 5000
PEER_DEFAULT_PORTS = {'t': '50001', 's': '50002'}
MTP_HEADER_EXTRA_SIZE = 100
MTP_HEADER_DATA_SIZE = 198864
MTP_HEADER_DATA_START = Coin.BASIC_HEADER_SIZE + MTP_HEADER_EXTRA_SIZE
MTP_HEADER_DATA_END = MTP_HEADER_DATA_START + MTP_HEADER_DATA_SIZE
STATIC_BLOCK_HEADERS = False
SESSIONCLS = DashElectrumX
DAEMON = daemon.ZcoinMtpDaemon
DESERIALIZER = lib_tx.DeserializerZcoin
PEERS = [
'electrum.polispay.com'
]
@classmethod
def is_mtp(cls, header):
from electrumx.lib.util import unpack_le_uint32_from, hex_to_bytes
if isinstance(header, str):
nVersion, = unpack_le_uint32_from(hex_to_bytes(header[0:4*2]))
elif isinstance(header, bytes):
nVersion, = unpack_le_uint32_from(header[0:4])
else:
raise "Cannot handle the passed type"
return nVersion & 0x1000
@classmethod
def block_header(cls, block, height):
sz = cls.BASIC_HEADER_SIZE
if cls.is_mtp(block):
sz += cls.MTP_HEADER_EXTRA_SIZE
return block[:sz]
@classmethod
def header_hash(cls, header):
sz = cls.BASIC_HEADER_SIZE
if cls.is_mtp(header):
sz += cls.MTP_HEADER_EXTRA_SIZE
return double_sha256(header[:sz])
class ZcoinTestnet(Zcoin):
SHORTNAME = "tXZC"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587cf")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("41")
P2SH_VERBYTES = [bytes.fromhex("b2")]
WIF_BYTE = bytes.fromhex("b9")
GENESIS_HASH = '1e3487fdb1a7d46dac3e8f3e58339c6e' \
'ff54abf6aef353485f3ed64250a35e89'
REORG_LIMIT = 8000
RPC_PORT = 18888
class GINCoin(Coin):
NAME = "GINCoin"
SHORTNAME = "GIN"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
GENESIS_HASH = ('00000cd6bde619b2c3b23ad2e384328a'
'450a37fa28731debf748c3b17f91f97d')
P2PKH_VERBYTE = bytes.fromhex("37")
P2SH_VERBYTES = [bytes.fromhex("38")]
WIF_BYTE = bytes.fromhex("3c")
TX_COUNT_HEIGHT = 225000
TX_COUNT = 470784
TX_PER_BLOCK = 4
RPC_PORT = 10211
PEERS = [
'electrum.polispay.com'
]
SESSIONCLS = DashElectrumX
DAEMON = daemon.DashDaemon
# Seems that the main lyra2z_hash python package doesn't works.
# Tested and working with: https://github.com/LapoLab/lyra2z-py
@classmethod
def header_hash(cls, header):
timestamp = util.unpack_le_uint32_from(header, 68)[0]
if timestamp > 1550246400:
import x16rt_hash
return x16rt_hash.getPoWHash(header)
elif timestamp > 1525651200:
import lyra2z_hash
return lyra2z_hash.getPoWHash(header)
import neoscrypt
return neoscrypt.getPoWHash(header)
class Polis(Coin):
NAME = "Polis"
SHORTNAME = "POLIS"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("03E25D7E")
XPRV_VERBYTES = bytes.fromhex("03E25945")
GENESIS_HASH = ('000009701eb781a8113b1af1d814e2f0'
'60f6408a2c990db291bc5108a1345c1e')
P2PKH_VERBYTE = bytes.fromhex("37")
P2SH_VERBYTES = [bytes.fromhex("38")]
WIF_BYTE = bytes.fromhex("3c")
TX_COUNT_HEIGHT = 280600
TX_COUNT = 635415
TX_PER_BLOCK = 4
RPC_PORT = 24127
PEERS = [
'electrum.polispay.com'
]
SESSIONCLS = DashElectrumX
DAEMON = daemon.DashDaemon
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import x11_hash
return x11_hash.getPoWHash(header)
class MNPCoin(Coin):
NAME = "MNPCoin"
SHORTNAME = "MNP"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
GENESIS_HASH = ('00000924036c67d803ce606ded814312'
'7e62fa2111dd3b063880a1067c69ccb1')
P2PKH_VERBYTE = bytes.fromhex("32")
P2SH_VERBYTES = [bytes.fromhex("35")]
WIF_BYTE = bytes.fromhex("37")
TX_COUNT_HEIGHT = 248000
TX_COUNT = 506447
TX_PER_BLOCK = 4
RPC_PORT = 13373
PEERS = [
'electrum.polispay.com'
]
SESSIONCLS = DashElectrumX
DAEMON = daemon.DashDaemon
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import quark_hash
return quark_hash.getPoWHash(header)
class ColossusXT(Coin):
NAME = "ColossusXT"
SHORTNAME = "COLX"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
GENESIS_HASH = ('a0ce8206c908357008c1b9a8ba2813af'
'f0989ca7f72d62b14e652c55f02b4f5c')
P2PKH_VERBYTE = bytes.fromhex("1E")
P2SH_VERBYTES = [bytes.fromhex("0D")]
WIF_BYTE = bytes.fromhex("D4")
TX_COUNT_HEIGHT = 356500
BASIC_HEADER_SIZE = 80
HDR_V5_HEIGHT = 500000
HDR_V5_SIZE = 112
HDR_V5_START_OFFSET = HDR_V5_HEIGHT * BASIC_HEADER_SIZE
TX_COUNT = 761041
TX_PER_BLOCK = 4
RPC_PORT = 51473
PEERS = [
'electrum.polispay.com'
]
SESSIONCLS = DashElectrumX
DAEMON = daemon.DashDaemon
@classmethod
def static_header_offset(cls, height):
assert cls.STATIC_BLOCK_HEADERS
if height >= cls.HDR_V5_HEIGHT:
relative_v4_offset = (height - cls.HDR_V5_HEIGHT) * cls.HDR_V5_SIZE
return cls.HDR_V5_START_OFFSET + relative_v4_offset
else:
return height * cls.BASIC_HEADER_SIZE
@classmethod
def header_hash(cls, header):
version, = util.unpack_le_uint32_from(header)
if version >= 5:
return super().header_hash(header)
else:
import quark_hash
return quark_hash.getPoWHash(header)
class Minexcoin(EquihashMixin, Coin):
NAME = "Minexcoin"
SHORTNAME = "MNX"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("4b")
GENESIS_HASH = ('490a36d9451a55ed197e34aca7414b35'
'd775baa4a8e896f1c577f65ce2d214cb')
STATIC_BLOCK_HEADERS = True
BASIC_HEADER_SIZE = 209
HEADER_SIZE_NO_SOLUTION = 140
TX_COUNT = 327963
TX_COUNT_HEIGHT = 74495
TX_PER_BLOCK = 5
RPC_PORT = 8022
CHUNK_SIZE = 960
PEERS = [
'electrumx.xpresit.net s t',
'elex01-ams.turinex.eu s t',
'eu.minexpool.nl s t'
]
@classmethod
def block_header(cls, block, height):
'''Return the block header bytes'''
deserializer = cls.DESERIALIZER(block)
return deserializer.read_header(cls.HEADER_SIZE_NO_SOLUTION)
class Groestlcoin(Coin):
NAME = "Groestlcoin"
SHORTNAME = "GRS"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("24")
GENESIS_HASH = ('00000ac5927c594d49cc0bdb81759d0d'
'a8297eb614683d3acb62f0703b639023')
DESERIALIZER = lib_tx.DeserializerGroestlcoin
TX_COUNT = 115900
TX_COUNT_HEIGHT = 1601528
TX_PER_BLOCK = 5
RPC_PORT = 1441
BLACKLIST_URL = 'https://groestlcoin.org/blacklist.json'
PEERS = [
'electrum1.groestlcoin.org s t',
'electrum2.groestlcoin.org s t',
'6brsrbiinpc32tfc.onion t',
'xkj42efxrcy6vbfw.onion t',
]
def grshash(data):
import groestlcoin_hash
return groestlcoin_hash.getHash(data, len(data))
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
return cls.grshash(header)
ENCODE_CHECK = partial(Base58.encode_check, hash_fn=grshash)
DECODE_CHECK = partial(Base58.decode_check, hash_fn=grshash)
class GroestlcoinTestnet(Groestlcoin):
SHORTNAME = "TGRS"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587cf")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('000000ffbb50fc9898cdd36ec163e6ba'
'23230164c0052a28876255b7dcf2cd36')
RPC_PORT = 17766
PEERS = [
'electrum-test1.groestlcoin.org s t',
'electrum-test2.groestlcoin.org s t',
'7frvhgofuf522b5i.onion t',
'aocojvqcybdoxekv.onion t',
]
class Pivx(Coin):
NAME = "PIVX"
SHORTNAME = "PIVX"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("022D2533")
XPRV_VERBYTES = bytes.fromhex("0221312B")
GENESIS_HASH = ('0000041e482b9b9691d98eefb48473405c0b8ec31b76df3797c74a78680ef818')
P2PKH_VERBYTE = bytes.fromhex("1e")
P2SH_VERBYTE = bytes.fromhex("0d")
WIF_BYTE = bytes.fromhex("d4")
TX_COUNT_HEIGHT = 569399
TX_COUNT = 2157510
TX_PER_BLOCK = 1
STATIC_BLOCK_HEADERS = False
RPC_PORT = 51470
ZEROCOIN_HEADER = 112
ZEROCOIN_START_HEIGHT = 863787
ZEROCOIN_BLOCK_VERSION = 4
@classmethod
def static_header_len(cls, height):
'''Given a header height return its length.'''
if (height >= cls.ZEROCOIN_START_HEIGHT):
return cls.ZEROCOIN_HEADER
else:
return cls.BASIC_HEADER_SIZE
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
version, = struct.unpack('<I', header[:4])
if version >= cls.ZEROCOIN_BLOCK_VERSION:
return super().header_hash(header)
else:
import quark_hash
return quark_hash.getPoWHash(header)
class PivxTestnet(Pivx):
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("3a8061a0")
XPRV_VERBYTES = bytes.fromhex("3a805837")
GENESIS_HASH = ('0000041e482b9b9691d98eefb48473405c0b8ec31b76df3797c74a78680ef818')
P2PKH_VERBYTE = bytes.fromhex("8B")
P2SH_VERBYTE = bytes.fromhex("13")
WIF_BYTE = bytes.fromhex("EF")
TX_PER_BLOCK = 4
RPC_PORT = 51472
ZEROCOIN_START_HEIGHT = 201564
class Bitg(Coin):
NAME = "BitcoinGreen"
SHORTNAME = "BITG"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("26")
P2SH_VERBYTES = [bytes.fromhex("06")]
WIF_BYTE = bytes.fromhex("2e")
GENESIS_HASH = (
'000008467c3a9c587533dea06ad9380cded3ed32f9742a6c0c1aebc21bf2bc9b')
DAEMON = daemon.DashDaemon
TX_COUNT = 1000
TX_COUNT_HEIGHT = 10000
TX_PER_BLOCK = 1
RPC_PORT = 9332
REORG_LIMIT = 1000
SESSIONCLS = DashElectrumX
DAEMON = daemon.DashDaemon
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import quark_hash
return quark_hash.getPoWHash(header)
class tBitg(Bitg):
SHORTNAME = "tBITG"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587cf")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("62")
P2SH_VERBYTES = [bytes.fromhex("0c")]
WIF_BYTE = bytes.fromhex("6c")
GENESIS_HASH = (
'000008467c3a9c587533dea06ad9380cded3ed32f9742a6c0c1aebc21bf2bc9b')
RPC_PORT = 19332
class EXOS(Coin):
NAME = "EXOS"
SHORTNAME = "EXOS"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
GENESIS_HASH = ('00000036090a68c523471da7a4f0f958'
'c1b4403fef74a003be7f71877699cab7')
P2PKH_VERBYTE = bytes.fromhex("1C")
P2SH_VERBYTE = [bytes.fromhex("57")]
WIF_BYTE = bytes.fromhex("9C")
RPC_PORT = 4561
TX_COUNT = 1000
TX_COUNT_HEIGHT = 10000
TX_PER_BLOCK = 4
DAEMON = daemon.PreLegacyRPCDaemon
DESERIALIZER = lib_tx.DeserializerTxTime
@classmethod
def header_hash(cls, header):
version, = util.unpack_le_uint32_from(header)
if version > 2:
return double_sha256(header)
else:
return hex_str_to_hash(EXOS.GENESIS_HASH)
class EXOSTestnet(EXOS):
SHORTNAME = "tEXOS"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587cf")
XPRV_VERBYTES = bytes.fromhex("04358394")
GENESIS_HASH = ('0000059bb2c2048493efcb0f1a034972'
'b3ce4089d54c93b69aaab212fb369887')
P2PKH_VERBYTE = bytes.fromhex("4B")
P2SH_VERBYTE = [bytes.fromhex("CE")]
WIF_BYTE = bytes.fromhex("CB")
RPC_PORT = 14561
@classmethod
def header_hash(cls, header):
version, = util.unpack_le_uint32_from(header)
if version > 2:
return double_sha256(header)
else:
return hex_str_to_hash(EXOSTestnet.GENESIS_HASH)
class SmartCash(Coin):
NAME = "SmartCash"
SHORTNAME = "SMART"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("3f")
P2SH_VERBYTES = [bytes.fromhex("12")]
WIF_BYTE = bytes.fromhex("bf")
GENESIS_HASH = ('000007acc6970b812948d14ea5a0a13d'
'b0fdd07d5047c7e69101fa8b361e05a4')
DESERIALIZER = lib_tx.DeserializerSmartCash
RPC_PORT = 9679
REORG_LIMIT = 5000
TX_COUNT = 1115016
TX_COUNT_HEIGHT = 541656
TX_PER_BLOCK = 1
ENCODE_CHECK = partial(Base58.encode_check,
hash_fn=lib_tx.DeserializerSmartCash.keccak)
DECODE_CHECK = partial(Base58.decode_check,
hash_fn=lib_tx.DeserializerSmartCash.keccak)
HEADER_HASH = lib_tx.DeserializerSmartCash.keccak
DAEMON = daemon.SmartCashDaemon
SESSIONCLS = SmartCashElectrumX
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
return cls.HEADER_HASH(header)
class NIX(Coin):
NAME = "NIX"
SHORTNAME = "NIX"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("26")
P2SH_VERBYTES = [bytes.fromhex("35")]
GENESIS_HASH = ('dd28ad86def767c3cfc34267a950d871'
'fc7462bc57ea4a929fc3596d9b598e41')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 114240
TX_COUNT_HEIGHT = 87846
TX_PER_BLOCK = 3
RPC_PORT = 6215
REORG_LIMIT = 1000
class NIXTestnet(NIX):
SHORTNAME = "tNIX"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
GENESIS_HASH = ('dd28ad86def767c3cfc34267a950d871'
'fc7462bc57ea4a929fc3596d9b598e41')
P2PKH_VERBYTE = bytes.fromhex("01")
P2SH_VERBYTE = [bytes.fromhex("03")]
RPC_PORT = 16215
DESERIALIZER = lib_tx.DeserializerSegWit
class Noir(Coin):
NAME = "Noir"
SHORTNAME = "NOR"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2SH_VERBYTES = [bytes.fromhex("07")]
WIF_BYTE = bytes.fromhex("D0")
GENESIS_HASH = ('23911212a525e3d149fcad6c559c8b17'
'f1e8326a272a75ff9bb315c8d96433ef')
RPC_PORT = 8825
TX_COUNT = 586369
TX_COUNT_HEIGHT = 379290
TX_PER_BLOCK = 5
class BitcoinPlus(Coin):
NAME = "BitcoinPlus"
SHORTNAME = "XBC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
P2PKH_VERBYTE = bytes.fromhex("19")
P2SH_VERBYTES = [bytes.fromhex("55")]
WIF_BYTE = bytes.fromhex("99")
GENESIS_HASH = ('0000005f6a28e686f641c616e56182d1'
'b43afbe08a223f23bda23cdf9d55b882')
DESERIALIZER = lib_tx.DeserializerTxTime
DAEMON = daemon.LegacyRPCDaemon
TX_COUNT = 1479247
TX_COUNT_HEIGHT = 749740
TX_PER_BLOCK = 2
RPC_PORT = 8885
REORG_LIMIT = 2000
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import x13_hash
return x13_hash.getPoWHash(header)
class Myriadcoin(AuxPowMixin, Coin):
NAME = "Myriadcoin"
SHORTNAME = "XMY"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("32")
P2SH_VERBYTES = [bytes.fromhex("09")]
WIF_BYTE = bytes.fromhex("b2")
GENESIS_HASH = ('00000ffde4c020b5938441a0ea3d314b'
'f619eff0b38f32f78f7583cffa1ea485')
DESERIALIZER = lib_tx.DeserializerAuxPowSegWit
TX_COUNT = 1976629
TX_COUNT_HEIGHT = 2580356
TX_PER_BLOCK = 20
REORG_LIMIT = 2000
RPC_PORT = 10889
class MyriadcoinTestnet(Myriadcoin):
NAME = "Myriadcoin"
SHORTNAME = "XMT"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587cf")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("58")
P2SH_VERBYTES = [bytes.fromhex("bc")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('0000017ce2a79c8bddafbbe47c004aa9'
'2b20678c354b34085f62b762084b9788')
class Sparks(Coin):
NAME = "Sparks"
SHORTNAME = "SPK"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
GENESIS_HASH = ('00000a5c6ddfaac5097218560d5b92d4'
'16931cfeba1abf10c81d1d6a232fc8ea')
P2PKH_VERBYTE = bytes.fromhex("26")
P2SH_VERBYTES = [bytes.fromhex("0A")]
WIF_BYTE = bytes.fromhex("C6")
TX_COUNT_HEIGHT = 117400
TX_COUNT = 162310
TX_PER_BLOCK = 4
RPC_PORT = 8818
SESSIONCLS = DashElectrumX
DAEMON = daemon.DashDaemon
@classmethod
def header_hash(cls, header):
import neoscrypt
return neoscrypt.getPoWHash(header)
# Source: https://github.com/LIMXTEC/BitSend
class Bitsend(Coin):
NAME = "Bitsend"
SHORTNAME = "BSD"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
P2PKH_VERBYTE = bytes.fromhex("66")
WIF_BYTE = bytes.fromhex("cc")
GENESIS_HASH = ('0000012e1b8843ac9ce8c18603658eaf'
'8895f99d3f5e7e1b7b1686f35e3c087a')
TX_COUNT = 974672
TX_COUNT_HEIGHT = 586022
TX_PER_BLOCK = 2
RPC_PORT = 8800
REORG_LIMIT = 1000
DESERIALIZER = lib_tx.DeserializerSegWit
XEVAN_TIMESTAMP = 1477958400
PEERS = [
'ele1.bitsend.cc s t',
'51.15.121.233 s t'
]
@classmethod
def header_hash(cls, header):
timestamp, = util.unpack_le_uint32_from(header, 68)
if timestamp > cls.XEVAN_TIMESTAMP:
import xevan_hash
return xevan_hash.getPoWHash(header)
else:
import x11_hash
return x11_hash.getPoWHash(header)
@classmethod
def genesis_block(cls, block):
header = cls.block_header(block, 0)
header_hex_hash = hash_to_hex_str(cls.header_hash(header))
if header_hex_hash != cls.GENESIS_HASH:
raise CoinError('genesis block has hash {} expected {}'
.format(header_hex_hash, cls.GENESIS_HASH))
return header + bytes(1)
class Ritocoin(Coin):
NAME = "Ritocoin"
SHORTNAME = "RITO"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0534E7CA")
XPRV_VERBYTES = bytes.fromhex("05347EAC")
P2PKH_VERBYTE = bytes.fromhex("19")
P2SH_VERBYTES = [bytes.fromhex("69")]
GENESIS_HASH = ('00000075e344bdf1c0e433f453764b18'
'30a7aa19b2a5213e707502a22b779c1b')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 1188090
TX_COUNT_HEIGHT = 296030
TX_PER_BLOCK = 3
RPC_PORT = 8766
REORG_LIMIT = 55
PEERS = [
'electrum-rito.minermore.com s t'
]
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import x21s_hash
return x21s_hash.getPoWHash(header)
class Ravencoin(Coin):
NAME = "Ravencoin"
SHORTNAME = "RVN"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
P2PKH_VERBYTE = bytes.fromhex("3C")
P2SH_VERBYTES = [bytes.fromhex("7A")]
GENESIS_HASH = ('0000006b444bc2f2ffe627be9d9e7e7a'
'0730000870ef6eb6da46c8eae389df90')
DESERIALIZER = lib_tx.DeserializerSegWit
X16RV2_ACTIVATION_TIME = 1569945600 # algo switch to x16rv2 at this timestamp
KAWPOW_ACTIVATION_TIME = 1588788000 # kawpow algo activation time
KAWPOW_ACTIVATION_HEIGHT = 1219736
KAWPOW_HEADER_SIZE = 120
TX_COUNT = 5626682
TX_COUNT_HEIGHT = 887000
TX_PER_BLOCK = 6
RPC_PORT = 8766
REORG_LIMIT = 100
PEERS = [
]
@classmethod
def static_header_offset(cls, height):
'''Given a header height return its offset in the headers file.'''
if cls.KAWPOW_ACTIVATION_HEIGHT < 0 or height <= cls.KAWPOW_ACTIVATION_HEIGHT:
result = height * cls.BASIC_HEADER_SIZE
else: # RVN block header size increased with kawpow fork
baseoffset = cls.KAWPOW_ACTIVATION_HEIGHT * cls.BASIC_HEADER_SIZE
result = baseoffset + ((height-cls.KAWPOW_ACTIVATION_HEIGHT) * cls.KAWPOW_HEADER_SIZE)
return result
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
timestamp = util.unpack_le_uint32_from(header, 68)[0]
assert cls.KAWPOW_ACTIVATION_TIME > 0
def reverse_bytes(data):
b = bytearray(data)
b.reverse()
return bytes(b)
if timestamp >= cls.KAWPOW_ACTIVATION_TIME:
import kawpow
nNonce64 = util.unpack_le_uint64_from(header, 80)[0] # uint64_t
mix_hash = reverse_bytes(header[88:120]) # uint256
header_hash = reverse_bytes(double_sha256(header[:80]))
final_hash = reverse_bytes(kawpow.light_verify(header_hash, mix_hash, nNonce64))
return final_hash
elif timestamp >= cls.X16RV2_ACTIVATION_TIME:
import x16rv2_hash
return x16rv2_hash.getPoWHash(header)
else:
import x16r_hash
return x16r_hash.getPoWHash(header)
class RavencoinTestnet(Ravencoin):
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587CF")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("6F")
P2SH_VERBYTES = [bytes.fromhex("C4")]
WIF_BYTE = bytes.fromhex("EF")
GENESIS_HASH = ('000000ecfc5e6324a079542221d00e10'
'362bdc894d56500c414060eea8a3ad5a')
X16RV2_ACTIVATION_TIME = 1567533600
KAWPOW_ACTIVATION_HEIGHT = 231544
KAWPOW_ACTIVATION_TIME = 1585159200
TX_COUNT = 496158
TX_COUNT_HEIGHT = 420500
TX_PER_BLOCK = 1
RPC_PORT = 18766
PEER_DEFAULT_PORTS = {'t': '50003', 's': '50004'}
REORG_LIMIT = 100
PEERS = [
]
class Bolivarcoin(Coin):
NAME = "Bolivarcoin"
SHORTNAME = "BOLI"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
P2PKH_VERBYTE = bytes.fromhex("55")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("D5")
GENESIS_HASH = ('00000e4fc293a1912b9d73cbb8d8f727'
'0007a7d84382f1370661e65d5d57b1f6')
TX_COUNT = 1082515
TX_COUNT_HEIGHT = 540410
TX_PER_BLOCK = 10
RPC_PORT = 3563
REORG_LIMIT = 800
PEERS = []
SESSIONCLS = DashElectrumX
DAEMON = daemon.DashDaemon
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import x11_hash
return x11_hash.getPoWHash(header)
class Onixcoin(Coin):
NAME = "Onixcoin"
SHORTNAME = "ONX"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
P2PKH_VERBYTE = bytes.fromhex("4B")
GENESIS_HASH = ('000007140b7a6ca0b64965824f5731f6'
'e86daadf19eb299033530b1e61236e43')
TX_COUNT = 431808
TX_COUNT_HEIGHT = 321132
TX_PER_BLOCK = 10
RPC_PORT = 41019
REORG_LIMIT = 800
PEERS = []
SESSIONCLS = DashElectrumX
DAEMON = daemon.DashDaemon
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import x11_hash
return x11_hash.getPoWHash(header)
class Electra(Coin):
NAME = "Electra"
SHORTNAME = "ECA"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("21")
P2SH_VERBYTES = [bytes.fromhex("28")]
WIF_BYTE = bytes.fromhex("A1")
GENESIS_HASH = ('00000f98da995de0ef1665c7d3338687'
'923c1199230a44ecbdb5cec9306e4f4e')
RPC_PORT = 5788
TX_COUNT = 615729
TX_COUNT_HEIGHT = 205243
TX_PER_BLOCK = 3
REORG_LIMIT = 100
DESERIALIZER = lib_tx.DeserializerElectra
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
version, = util.unpack_le_uint32_from(header)
import nist5_hash
if version != 8:
return nist5_hash.getPoWHash(header)
else:
return double_sha256(header)
class ECCoin(Coin):
NAME = "ECCoin"
SHORTNAME = "ECC"
NET = "mainnet"
DESERIALIZER = lib_tx.DeserializerECCoin
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("21")
P2SH_VERBYTES = [bytes.fromhex("08")]
GENESIS_HASH = ('a60ac43c88dbc44b826cf315352a8a7b373d2af8b6e1c4c4a0638859c5e9ecd1')
TX_COUNT = 4661197
TX_COUNT_HEIGHT = 2114846
TX_PER_BLOCK = 10
VALUE_PER_COIN = 1000000
RPC_PORT = 19119
@classmethod
def header_hash(cls, header):
# you have to install scryp python module (pip install scrypt)
import scrypt
return scrypt.hash(header, header, 1024, 1, 1, 32)
class Bellcoin(Coin):
NAME = "Bellcoin"
SHORTNAME = "BELL"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("19")
P2SH_VERBYTES = [bytes.fromhex("55")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('000008f3b6bd10c2d03b06674a006b8d'
'9731f6cb58179ef1eee008cee2209603')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 264129
TX_COUNT_HEIGHT = 219574
TX_PER_BLOCK = 5
RPC_PORT = 25252
REORG_LIMIT = 1000
PEERS = [
'bell.electrumx.japanesecoin-pool.work s t',
'bell.streetcrypto7.com s t',
]
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import bell_yespower
return bell_yespower.getPoWHash(header)
class CPUchain(Coin):
NAME = "CPUchain"
SHORTNAME = "CPU"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1C")
P2SH_VERBYTES = [bytes.fromhex("1E")]
GENESIS_HASH = ('000024d8766043ea0e1c9ad42e7ea4b5'
'fdb459887bd80b8f9756f3d87e128f12')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 4471
TX_COUNT_HEIGHT = 3491
TX_PER_BLOCK = 2
RPC_PORT = 19707
REORG_LIMIT = 1000
PEERS = [
'electrumx.cpuchain.org s t',
]
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import cpupower
return cpupower.getPoWHash(header)
class Xaya(NameIndexMixin, AuxPowMixin, Coin):
NAME = "Xaya"
SHORTNAME = "CHI"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("1c")
P2SH_VERBYTES = [bytes.fromhex("1e")]
WIF_BYTE = bytes.fromhex("82")
GENESIS_HASH = ('e5062d76e5f50c42f493826ac9920b63'
'a8def2626fd70a5cec707ec47a4c4651')
TX_COUNT = 1147749
TX_COUNT_HEIGHT = 1030000
TX_PER_BLOCK = 2
DESERIALIZER = lib_tx.DeserializerXaya
TRUNCATED_HEADER_SIZE = 80 + 5
RPC_PORT = 8396
PEERS = [
'seeder.xaya.io s50002',
'xaya.domob.eu s50002',
]
# Op-codes for name operations
OP_NAME_REGISTER = OpCodes.OP_1
OP_NAME_UPDATE = OpCodes.OP_2
# Valid name prefixes.
NAME_REGISTER_OPS = [OP_NAME_REGISTER, "name", -1, OpCodes.OP_2DROP,
OpCodes.OP_DROP]
NAME_UPDATE_OPS = [OP_NAME_UPDATE, "name", -1, OpCodes.OP_2DROP,
OpCodes.OP_DROP]
NAME_OPERATIONS = [
NAME_REGISTER_OPS,
NAME_UPDATE_OPS,
]
@classmethod
def genesis_block(cls, block):
super().genesis_block(block)
# In Xaya, the genesis block's coinbase is spendable. Thus unlike
# the generic genesis_block() method, we return the full block here.
return block
class XayaTestnet(Xaya):
SHORTNAME = "XCH"
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("58")
P2SH_VERBYTES = [bytes.fromhex("5a")]
WIF_BYTE = bytes.fromhex("e6")
GENESIS_HASH = ('5195fc01d0e23d70d1f929f21ec55f47'
'e1c6ea1e66fae98ee44cbbc994509bba')
TX_COUNT = 51557
TX_COUNT_HEIGHT = 49000
TX_PER_BLOCK = 1
RPC_PORT = 18396
PEERS = []
class XayaRegtest(XayaTestnet):
NET = "regtest"
GENESIS_HASH = ('6f750b36d22f1dc3d0a6e483af453010'
'22646dfc3b3ba2187865f5a7d6d83ab1')
RPC_PORT = 18493
# Source: https://github.com/GZR0/GRZ0
class GravityZeroCoin(ScryptMixin, Coin):
NAME = "GravityZeroCoin"
SHORTNAME = "GZRO"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("26")
WIF_BYTE = bytes.fromhex("26")
GENESIS_HASH = ('0000028bfbf9ccaed8f28b3ca6b3ffe6b65e29490ab0e4430679bf41cc7c164f')
DAEMON = daemon.FakeEstimateLegacyRPCDaemon
TX_COUNT = 100
TX_COUNT_HEIGHT = 747635
TX_PER_BLOCK = 2
RPC_PORT = 36442
ESTIMATE_FEE = 0.01
RELAY_FEE = 0.01
class Simplicity(Coin):
NAME = "Simplicity"
SHORTNAME = "SPL"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0444d5bc")
XPRV_VERBYTES = bytes.fromhex("0444f0a3")
P2PKH_VERBYTE = bytes.fromhex("12")
P2SH_VERBYTE = bytes.fromhex("3b")
WIF_BYTE = bytes.fromhex("5d")
GENESIS_HASH = ('f4bbfc518aa3622dbeb8d2818a606b82c2b8b1ac2f28553ebdb6fc04d7abaccf')
RPC_PORT = 11958
TX_COUNT = 1726548
TX_COUNT_HEIGHT = 1040000
TX_PER_BLOCK = 5
REORG_LIMIT = 100
DESERIALIZER = lib_tx.DeserializerSimplicity
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
version, = util.unpack_le_uint32_from(header)
if version < 2:
import quark_hash
return quark_hash.getPoWHash(header)
else:
return double_sha256(header)
class Myce(Coin):
NAME = "Myce"
SHORTNAME = "YCE"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("32")
P2SH_VERBYTE = bytes.fromhex("55")
WIF_BYTE = bytes.fromhex("99")
GENESIS_HASH = ('0000c74cc66c72cb1a327c5c1d4893ae5276aa50be49fb23cec21df1a2f20d87')
RPC_PORT = 23512
TX_COUNT = 1568977
TX_COUNT_HEIGHT = 774450
TX_PER_BLOCK = 3
REORG_LIMIT = 100
DESERIALIZER = lib_tx.DeserializerSimplicity
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
version, = util.unpack_le_uint32_from(header)
if version < 7:
import scrypt
return scrypt.hash(header, header, 1024, 1, 1, 32)
else:
return double_sha256(header)
class Navcoin(Coin):
NAME = "Navcoin"
SHORTNAME = "NAV"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("35")
P2SH_VERBYTES = [bytes.fromhex("55")]
WIF_BYTE = bytes.fromhex("96")
GENESIS_HASH = ('00006a4e3e18c71c6d48ad6c261e2254'
'fa764cf29607a4357c99b712dfbb8e6a')
DESERIALIZER = lib_tx.DeserializerTxTimeSegWitNavCoin
TX_COUNT = 137641
TX_COUNT_HEIGHT = 3649662
TX_PER_BLOCK = 2
RPC_PORT = 44444
REORG_LIMIT = 1000
@classmethod
def header_hash(cls, header):
if int.from_bytes(header[:4], "little") > 6:
return double_sha256(header)
else:
import x13_hash
return x13_hash.getPoWHash(header)
class Primecoin(PrimeChainPowMixin, Coin):
NAME = "Primecoin"
SHORTNAME = "XPM"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("17")
P2SH_VERBYTES = [bytes.fromhex("53")]
WIF_BYTE = bytes.fromhex("97")
GENESIS_HASH = ('963d17ba4dc753138078a2f56afb3af9'
'674e2546822badff26837db9a0152106')
DAEMON = daemon.FakeEstimateFeeDaemon
ESTIMATE_FEE = 1.024
TX_COUNT = 7138730
TX_COUNT_HEIGHT = 3639500
TX_PER_BLOCK = 2
RPC_PORT = 9912
REORG_LIMIT = 5000
PEERS = [
'electrumx.primecoin.org s t',
]
class PrimecoinTestnet(Primecoin):
NAME = "PrimecoinTestnet"
SHORTNAME = "tXPM"
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('221156cf301bc3585e72de34fe1efdb6'
'fbd703bc27cfc468faa1cdd889d0efa0')
RPC_PORT = 9914
PEERS = [
'electrumx.testnet.primecoin.org t',
]
class Unobtanium(AuxPowMixin, Coin):
NAME = "Unobtanium"
SHORTNAME = "UNO"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
P2PKH_VERBYTE = bytes.fromhex("82")
P2SH_VERBYTES = [bytes.fromhex("1e")]
WIF_BYTE = bytes.fromhex("e0")
GENESIS_HASH = ('000004c2fc5fffb810dccc197d603690'
'099a68305232e552d96ccbe8e2c52b75')
TX_COUNT = 1
TX_COUNT_HEIGHT = 1
TX_PER_BLOCK = 1
RPC_PORT = 65535
REORG_LIMIT = 5000
class Linx(Coin):
NAME = "Linx"
SHORTNAME = "LINX"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("4b")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("cb")
GENESIS_HASH = ('3bafea350a70f75e7a1cd279999faed7'
'1a51852aae88fed3c38553cecc810a92')
TX_COUNT = 1
TX_COUNT_HEIGHT = 1
TX_PER_BLOCK = 1
RPC_PORT = 9381
REORG_LIMIT = 5000
class Flashcoin(Coin):
NAME = "Flashcoin"
SHORTNAME = "FLASH"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("44")
P2SH_VERBYTES = [bytes.fromhex("82")]
WIF_BYTE = bytes.fromhex("c4")
GENESIS_HASH = ('aa0cf4f5ce0a3c550ce5674c1e808c41'
'7cf5077b4e95bda1d6fbaeaf4258972b')
TX_COUNT = 1
TX_COUNT_HEIGHT = 1
TX_PER_BLOCK = 1
RPC_PORT = 9385
REORG_LIMIT = 5000
class Defcoin(Coin):
NAME = "Defcoin"
SHORTNAME = "DEFC"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1e")
P2SH_VERBYTES = bytes.fromhex("05")
WIF_BYTE = bytes.fromhex("9e")
GENESIS_HASH = ('192047379f33ffd2bbbab3d53b9c4b9e'
'9b72e48f888eadb3dcf57de95a6038ad')
TX_COUNT = 1
TX_COUNT_HEIGHT = 1
TX_PER_BLOCK = 1
RPC_PORT = 9386
REORG_LIMIT = 5000
class Smileycoin(Coin):
NAME = "Smileycoin"
SHORTNAME = "SMLY"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("19")
P2SH_VERBYTES = bytes.fromhex("05")
WIF_BYTE = bytes.fromhex("b0")
GENESIS_HASH = ('660f734cf6c6d16111bde201bbd21228'
'73f2f2c078b969779b9d4c99732354fd')
TX_COUNT = 1
TX_COUNT_HEIGHT = 1
TX_PER_BLOCK = 1
RPC_PORT = 9388
REORG_LIMIT = 5000
class Iop(Coin):
NAME = "Iop"
SHORTNAME = "IOP"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("75")
P2SH_VERBYTES = [bytes.fromhex("AE")]
WIF_BYTE = bytes.fromhex("31")
GENESIS_HASH = ('00000000bf5f2ee556cb9be8be64e077'
'6af14933438dbb1af72c41bfb6c82db3')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 1
TX_COUNT_HEIGHT = 1
TX_PER_BLOCK = 1
RPC_PORT = 8337
REORG_LIMIT = 5000
class Egulden(Coin):
NAME = "Egulden"
SHORTNAME = "EFL"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("30")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("b0")
GENESIS_HASH = ('6d39f28ad01a7edd3e2374b355cf8c7f'
'8dbc1c5e4596ad3642fa6d10c2599217')
TX_COUNT = 13336629
TX_COUNT_HEIGHT = 1268206
TX_PER_BLOCK = 10
RPC_PORT = 9402
REORG_LIMIT = 5000
class Ixcoin(AuxPowMixin, Coin):
NAME = "ixcoin"
SHORTNAME = "IXC"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("8a")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('0000000001534ef8893b025b9c1da672'
'50285e35c9f76cae36a4904fdf72c591')
TX_COUNT = 1
TX_COUNT_HEIGHT = 1
TX_PER_BLOCK = 1
RPC_PORT = 9406
REORG_LIMIT = 5000
class Batacoin(Coin):
NAME = "bata"
SHORTNAME = "BTA"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("19")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("99")
GENESIS_HASH = ('b4bee36fd54a6176fd832f462641415c'
'142d50e4b378f71c041870c2b1186bc8')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 1
TX_COUNT_HEIGHT = 1
TX_PER_BLOCK = 1
RPC_PORT = 9412
REORG_LIMIT = 5000
class Digitalcoin(Coin):
NAME = "digitalcoin"
SHORTNAME = "DGC"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1e")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("9e")
GENESIS_HASH = ('5e039e1ca1dbf128973bf6cff98169e4'
'0a1b194c3b91463ab74956f413b2f9c8')
TX_COUNT = 1
TX_COUNT_HEIGHT = 1
TX_PER_BLOCK = 1
RPC_PORT = 9413
REORG_LIMIT = 5000
class Cannacoin(Coin):
NAME = "cannacoin"
SHORTNAME = "CCN"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1C")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("bd")
GENESIS_HASH = ('f1b4cdf03c86099a0758f1c018d1a10b'
'f05afab436c92b93b42bb88970de9821')
DESERIALIZER = lib_tx.DeserializerReddcoin
TX_COUNT = 1
TX_COUNT_HEIGHT = 1
TX_PER_BLOCK = 1
RPC_PORT = 1234
REORG_LIMIT = 5000
class Europecoin(Coin):
NAME = "europecoin"
SHORTNAME = "ERC"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("21")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("a8")
GENESIS_HASH = ('000d0da26987ead011c5d568e627f7e3'
'd4a4f83a0b280b1134d8e7e366377f9a')
BASIC_HEADER_SIZE = 88
TX_COUNT = 1
TX_COUNT_HEIGHT = 1
TX_PER_BLOCK = 1
RPC_PORT = 9412
REORG_LIMIT = 5000
class Adcoin(Coin):
NAME = "Adcoin"
SHORTNAME = "ACC"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1e")
P2SH_VERBYTES = [bytes.fromhex("0d")]
WIF_BYTE = bytes.fromhex("97")
GENESIS_HASH = ('000000fc5276647fd959f718c9526f87'
'f4858c4ef62f2e29d3772e4e37040a25')
BASIC_HEADER_SIZE = 112
TX_COUNT = 1
TX_COUNT_HEIGHT = 1
TX_PER_BLOCK = 1
RPC_PORT = 9416
REORG_LIMIT = 5000
class Lynx(Coin):
NAME = "Lynx"
SHORTNAME = "LYNX"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("2d")
P2SH_VERBYTES = [bytes.fromhex("16")]
WIF_BYTE = bytes.fromhex("ad")
GENESIS_HASH = ('984b30fc9bb5e5ff424ad7f4ec193053'
'8a7b14a2d93e58ad7976c23154ea4a76')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 1
TX_COUNT_HEIGHT = 1
TX_PER_BLOCK = 1
RPC_PORT = 9139
REORG_LIMIT = 5000
class LitecoinCash(Coin):
NAME = "LitecoinCash"
SHORTNAME = "LCC"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1c")
P2SH_VERBYTES = [bytes.fromhex("32"), bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("b0")
GENESIS_HASH = ('12a765e31ffd4059bada1e25190f6e98'
'c99d9714d334efa41a195a7e7e04bfe2')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 1
TX_COUNT_HEIGHT = 1
TX_PER_BLOCK = 1
RPC_PORT = 9140
REORG_LIMIT = 5000
class BitcoinPrivate(EquihashMixin, Coin):
NAME = "BitcoinPrivate"
SHORTNAME = "BTCP"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1325")
P2SH_VERBYTES = [bytes.fromhex("13AF")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('0007104ccda289427919efc39dc9e4d4'
'99804b7bebc22df55f8b834301260602')
DESERIALIZER = lib_tx.DeserializerZcash
TX_COUNT = 329196
TX_COUNT_HEIGHT = 68379
TX_PER_BLOCK = 5
RPC_PORT = 9335
REORG_LIMIT = 5000
class Aryacoin(Coin):
NAME = "aryacoin"
SHORTNAME = "AYA"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("019d9cfe")
XPRV_VERBYTES = bytes.fromhex("019da462")
P2PKH_VERBYTE = bytes.fromhex("17")
P2SH_VERBYTES = [bytes.fromhex("6f")]
WIF_BYTE = bytes.fromhex("b0")
GENESIS_HASH = ('b553727635006d7faade229d152482df'
'b9da7822d41cf0cad9ffa82a54f67803')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 1
TX_COUNT_HEIGHT = 1
TX_PER_BLOCK = 10
RPC_PORT = 9151
REORG_LIMIT = 800
class Donu(Coin):
NAME = "donu"
SHORTNAME = "DONU"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("35")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("b1")
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
GENESIS_HASH = ('5f7f26e24291f5be2351e1dcdab18bf9'
'4cee718940e6b9f2fbb46227434c3f12')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 1
TX_COUNT_HEIGHT = 1
TX_PER_BLOCK = 10
RPC_PORT = 26381
REORG_LIMIT = 800
class Quebecoin(AuxPowMixin, Coin):
NAME = "Quebecoin"
SHORTNAME = "QBC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("3a")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("ba")
GENESIS_HASH = ('000008c2d57759af6462352ee9f4923d'
'97401cb599a9318e6595a2a74c26ea74')
DESERIALIZER = lib_tx.DeserializerAuxPowSegWit
TX_COUNT = 1
TX_COUNT_HEIGHT = 1
TX_PER_BLOCK = 20
REORG_LIMIT = 2000
RPC_PORT = 10890
class CARI(coin):
NAME = "CARI"
SHORTNAME = "CARI"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("042F2736")
XPRV_VERBYTES = bytes.fromhex("041F352E")
GENESIS_HASH = ('000005bd970b7d83eb879472fb48b2c01ed8155d7126ac3e0c201755c0c85c23')
P2PKH_VERBYTE = bytes.fromhex("D")
P2SH_VERBYTES = [bytes.fromhex("D")]
WIF_BYTE = bytes.fromhex("2B")
TX_COUNT_HEIGHT = 336846
TX_COUNT = 670075
TX_PER_BLOCK = 1
STATIC_BLOCK_HEADERS = False
RPC_PORT = 31814
REORG_LIMIT = 100
SESSIONCLS = DashElectrumX
DAEMON = daemon.DashDaemon
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import quark_hash
return quark_hash.getPoWHash(header) |
py | 1a4b3ee80fb62a55d7edc2030fb8d63840a56e77 | import argparse
import logging
import numpy as np
import os
import random
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
import sys
from baselines.vectorizers import build_vectorizer_from_df, load_vectorized_data
from baselines.avg_fasttext import build_avg_fasttext_from_df, load_avg_fasttext
from baselines.doc2vec import build_doc2vec_from_df, load_doc2vec
from shared.global_constants import RES_DIR
from shared.loaders import load_train_val_nodes
from shared.utils import save_cli_options, save_dict_to_json
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
def parse_arguments(args_to_parse):
""" Parse CLI arguments """
descr = 'Train a baseline model'
parser = argparse.ArgumentParser(description=descr)
general = parser.add_argument_group('General settings')
general.add_argument('name', type=str, help="The name of the experimental directory - used for saving and loading.")
general.add_argument(
'--input-data-dir',
type=str,
required=True,
help="The name of the directory from which to load the pre-processed data",
)
general.add_argument(
"--stemmer-path",
type=str,
required=True,
help="Path to the SALAMA stemming dictionary",
)
general.add_argument(
'--model',
type=str,
default='tf-idf',
choices=['tf-idf', 'count', 'doc2vec', 'fasttext'],
help='Select the model type to use before feeding into a logistic regression layer',
)
general.add_argument("--seed", type=int, default=12321, help='Random seed for reproducability')
training = parser.add_argument_group('Training settings')
training.add_argument(
'--train-set-label-proportion',
type=float,
default=0.2,
choices=[0.01, 0.05, 0.1, 0.2],
help='Ratio of nodes in the training set which we keep labelled',
)
# CLI options of the form `--doc2vec-XXXX` pertain to doc2vec
training.add_argument(
'--doc2vec-epochs',
type=int,
default=10,
help="The number of epochs to run when training Doc2Vec",
)
training.add_argument(
'--doc2vec-feature-dims',
type=int,
default=300,
help="The Doc2vec feature vector size",
)
training.add_argument(
'--doc2vec-dm',
type=int,
choices=[0, 1],
default=1,
help="The training regime to use for Doc2Vec: Distributed Memory (1) or Distributed Bag of Words (0)",
)
return parser.parse_args(args_to_parse)
def main(args):
""" Entry point for training a doc2vec model """
random.seed(args.seed)
np.random.seed(args.seed)
results_dir = os.path.join(RES_DIR, args.name)
os.makedirs(results_dir, exist_ok=True)
save_cli_options(args, results_dir)
preproc_dir = os.path.join(results_dir, 'preproc')
if args.model == 'tf-idf' or args.model == 'count':
if not os.path.isdir(preproc_dir):
os.makedirs(preproc_dir, exist_ok=True)
build_vectorizer_from_df(
vectorizer_name=args.model,
save_dir=preproc_dir,
df_path=os.path.join(RES_DIR, args.input_data_dir, 'dataset.csv'),
stemming_map_path=os.path.join(RES_DIR, args.stemmer_path),
text_column='document_content',
label_column='document_type',
)
print(f'Load {args.model} data...')
input_features, labels = load_vectorized_data(preproc_dir, args.model)
elif args.model == 'fasttext':
if not os.path.isdir(preproc_dir):
os.makedirs(preproc_dir, exist_ok=True)
build_avg_fasttext_from_df(
save_dir=preproc_dir,
df_path=os.path.join(RES_DIR, args.input_data_dir, 'dataset.csv'),
stemming_map_path=os.path.join(RES_DIR, args.stemmer_path),
text_column='document_content',
label_column='document_type',
)
print('Load average FastText data...')
input_features, labels = load_avg_fasttext(preproc_dir)
elif args.model == 'doc2vec':
if not os.path.isdir(preproc_dir):
os.makedirs(preproc_dir, exist_ok=True)
build_doc2vec_from_df(
save_dir=preproc_dir,
df_path=os.path.join(RES_DIR, args.input_data_dir, 'dataset.csv'),
stemming_map_path=os.path.join(RES_DIR, args.stemmer_path),
text_column='document_content',
label_column='document_type',
training_regime=args.doc2vec_dm,
embedding_dimension=args.doc2vec_feature_dims,
num_epochs=args.doc2vec_epochs,
)
print('Load Doc2vec data...')
input_features, labels = load_doc2vec(preproc_dir)
else:
raise Exception(f'Unrecognised model type: {args.model}')
train_nodes, val_nodes, test_nodes = load_train_val_nodes(
preproc_dir=os.path.join(RES_DIR, args.input_data_dir),
train_set_label_proportion=args.train_set_label_proportion,
as_numpy=True,
)
print('Train classifier ...')
classifier = LogisticRegression(random_state=args.seed).fit(input_features[train_nodes, :], labels[train_nodes])
print('Get accuracies...')
train_predictions = classifier.predict(input_features[train_nodes, :])
val_predictions = classifier.predict(input_features[val_nodes, :])
test_predictions = classifier.predict(input_features[test_nodes, :])
train_accuracy = sum(train_predictions == labels[train_nodes]) / len(train_predictions)
val_accuracy = sum(val_predictions == labels[val_nodes]) / len(val_predictions)
test_accuracy = sum(test_predictions == labels[test_nodes]) / len(test_predictions)
test_micro_f1 = f1_score(labels[test_nodes], test_predictions, average='micro')
test_macro_f1 = f1_score(labels[test_nodes], test_predictions, average='macro')
print(f'Train Accuracy: {train_accuracy}')
print(f'Validation Accuracy: {val_accuracy}')
print(f'Test Accuracy: {test_accuracy}')
print(f'Test Micro F1: {test_micro_f1}')
print(f'Test Macro F1: {test_macro_f1}')
output_save_dir = os.path.join(results_dir, f'model_{args.train_set_label_proportion}')
os.makedirs(output_save_dir, exist_ok=True)
save_dict_to_json(
{
'train_accuracy': train_accuracy,
'val_accuracy': val_accuracy,
'test_accuracy': test_accuracy,
'test_micro_f1': test_micro_f1,
'test_macro_f1': test_macro_f1,
},
os.path.join(output_save_dir, 'metric.json'),
)
# from sklearn.model_selection import learning_curve
# train_sizes, train_scores, test_scores = learning_curve(
# classifier, input_features[train_nodes, :], labels[train_nodes]
# )
# print(train_scores)
if __name__ == '__main__':
args = parse_arguments(sys.argv[1:])
main(args)
|
py | 1a4b402abc81938f7a43136c079ac1bba69c686e | """Unit tests for movescu.py"""
import logging
import os
import shutil
import subprocess
import sys
import time
import pytest
from pydicom import dcmread
from pydicom.dataset import Dataset
from pydicom.uid import (
ExplicitVRLittleEndian, ImplicitVRLittleEndian,
DeflatedExplicitVRLittleEndian, ExplicitVRBigEndian
)
from pynetdicom import (
AE, evt, debug_logger, DEFAULT_TRANSFER_SYNTAXES,
QueryRetrievePresentationContexts,
StoragePresentationContexts
)
from pynetdicom.sop_class import (
VerificationSOPClass, CTImageStorage,
PatientRootQueryRetrieveInformationModelMove,
StudyRootQueryRetrieveInformationModelMove,
PatientStudyOnlyQueryRetrieveInformationModelMove,
)
#debug_logger()
APP_DIR = os.path.join(os.path.dirname(__file__), '../')
APP_FILE = os.path.join(APP_DIR, 'movescu', 'movescu.py')
LOG_CONFIG = os.path.join(APP_DIR, 'echoscu', 'logging.cfg')
DATA_DIR = os.path.join(APP_DIR, '../', 'tests', 'dicom_files')
DATASET_FILE = os.path.join(DATA_DIR, 'CTImageStorage.dcm')
def which(program):
# Determine if a given program is installed on PATH
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
def start_movescu(args):
"""Start the movescu.py app and return the process."""
pargs = [which('python'), APP_FILE, 'localhost', '11112'] + [*args]
return subprocess.Popen(pargs)
class TestMoveSCU(object):
"""Tests for findscu.py"""
def setup(self):
"""Run prior to each test"""
self.ae = None
self.response = ds = Dataset()
ds.file_meta = Dataset()
ds.file_meta.TransferSyntaxUID = ImplicitVRLittleEndian
ds.SOPClassUID = CTImageStorage
ds.SOPInstanceUID = '1.2.3.4'
ds.PatientName = 'Citizen^Jan'
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
def test_default(self):
"""Test default settings."""
events = []
def handle_move(event):
events.append(event)
yield 'localhost', 11113
yield 0
yield 0x0000, None
def handle_release(event):
events.append(event)
def handle_store(event):
return 0x0000
handlers = [
(evt.EVT_C_MOVE, handle_move),
(evt.EVT_RELEASED, handle_release)
]
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.supported_contexts = QueryRetrievePresentationContexts
ae.requested_contexts = StoragePresentationContexts
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
store_scp = ae.start_server(
('', 11113), block=False,
evt_handlers=[(evt.EVT_C_STORE, handle_store)]
)
p = start_movescu(['-k', "PatientName="])
p.wait()
assert p.returncode == 0
store_scp.shutdown()
scp.shutdown()
assert events[0].event == evt.EVT_C_MOVE
assert events[0].identifier.PatientName == ""
assert events[1].event == evt.EVT_RELEASED
requestor = events[1].assoc.requestor
assert b'MOVESCU ' == requestor.ae_title
assert 16382 == requestor.maximum_length
assert b'ANY-SCP ' == requestor.primitive.called_ae_title
assert 0 == len(requestor.extended_negotiation)
assert (1, 1) == requestor.asynchronous_operations
assert {} == requestor.sop_class_common_extended
assert {} == requestor.sop_class_extended
assert requestor.role_selection == {}
assert requestor.user_identity == None
cxs = requestor.primitive.presentation_context_definition_list
assert len(cxs) == 12
cxs = {cx.abstract_syntax: cx for cx in cxs}
assert PatientRootQueryRetrieveInformationModelMove in cxs
cx = cxs[PatientRootQueryRetrieveInformationModelMove]
assert cx.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_no_peer(self, capfd):
"""Test trying to connect to non-existent host."""
p = start_movescu(['-k', "PatientName="])
p.wait()
assert p.returncode == 1
out, err = capfd.readouterr()
assert "Association request failed: unable to connect to remote" in err
assert "TCP Initialisation Error: Connection refused" in err
assert "Association Aborted" in err
def test_bad_input(self, capfd):
"""Test being unable to read the input file."""
p = start_movescu(['-f', 'no-such-file.dcm'])
p.wait()
assert p.returncode == 1
out, err = capfd.readouterr()
assert 'Cannot read input file no-such-file.dcm' in err
def test_flag_version(self, capfd):
"""Test --version flag."""
p = start_movescu(['--version'])
p.wait()
assert p.returncode == 0
out, err = capfd.readouterr()
assert 'movescu.py v' in out
def test_flag_quiet(self, capfd):
"""Test --quiet flag."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
p = start_movescu(['-q', '-k', 'PatientName='])
p.wait()
assert p.returncode == 1
out, err = capfd.readouterr()
assert out == err == ''
scp.shutdown()
def test_flag_verbose(self, capfd):
"""Test --verbose flag."""
def handle_store(event):
return 0x0000
def handle_move(event):
yield 'localhost', 11113
yield 0
yield 0x0000, None
handlers = [
(evt.EVT_C_MOVE, handle_move),
]
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.requested_contexts = StoragePresentationContexts
ae.supported_contexts = QueryRetrievePresentationContexts
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
ae.supported_contexts = StoragePresentationContexts
store_scp = ae.start_server(
('', 11113), block=False,
evt_handlers=[(evt.EVT_C_STORE, handle_store)]
)
p = start_movescu(['-v', '-k', 'PatientName='])
p.wait()
assert p.returncode == 0
out, err = capfd.readouterr()
assert "Requesting Association" in err
assert "Association Accepted" in err
assert "Sending Move Request" in err
assert "Move SCP Result" in err
assert "Releasing Association" in err
assert "Accept Parameters" not in err
store_scp.shutdown()
scp.shutdown()
def test_flag_debug(self, capfd):
"""Test --debug flag."""
def handle_store(event):
return 0x0000
def handle_move(event):
yield 'localhost', 11113
yield 0
yield 0x0000, None
handlers = [
(evt.EVT_C_MOVE, handle_move),
]
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.requested_contexts = StoragePresentationContexts
ae.supported_contexts = QueryRetrievePresentationContexts
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
ae.supported_contexts = StoragePresentationContexts
store_scp = ae.start_server(
('', 11113), block=False,
evt_handlers=[(evt.EVT_C_STORE, handle_store)]
)
p = start_movescu(['-d', '-k', 'PatientName='])
p.wait()
assert p.returncode == 0
out, err = capfd.readouterr()
assert "Releasing Association" in err
assert "Accept Parameters" in err
store_scp.shutdown()
scp.shutdown()
def test_flag_log_collision(self):
"""Test error with -q -v and -d flag."""
p = start_movescu(['-v', '-d'])
p.wait()
assert p.returncode != 0
@pytest.mark.skip("No way to test comprehensively")
def test_flag_log_level(self):
"""Test --log-level flag."""
pass
def test_flag_aet(self):
"""Test --calling-aet flag."""
events = []
def handle_move(event):
events.append(event)
yield 'localhost', 11113
yield 0
yield 0x0000, None
handlers = [
(evt.EVT_C_MOVE, handle_move),
]
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.requested_contexts = StoragePresentationContexts
ae.supported_contexts = QueryRetrievePresentationContexts
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
ae.supported_contexts = StoragePresentationContexts
p = start_movescu(['-aet', 'MYSCU', '-k', 'PatientName='])
p.wait()
assert p.returncode == 0
scp.shutdown()
assert events[0].event == evt.EVT_C_MOVE
requestor = events[0].assoc.requestor
assert b'MYSCU ' == requestor.ae_title
def test_flag_aec(self):
"""Test --called-aet flag."""
events = []
def handle_move(event):
events.append(event)
yield 'localhost', 11113
yield 0
yield 0x0000, None
handlers = [
(evt.EVT_C_MOVE, handle_move),
]
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.requested_contexts = StoragePresentationContexts
ae.supported_contexts = QueryRetrievePresentationContexts
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
p = start_movescu(['-aec', 'YOURSCP', '-k', 'PatientName='])
p.wait()
assert p.returncode == 0
scp.shutdown()
assert events[0].event == evt.EVT_C_MOVE
requestor = events[0].assoc.requestor
assert b'YOURSCP ' == requestor.primitive.called_ae_title
def test_flag_aem(self):
"""Test --called-aem flag."""
events = []
def handle_move(event):
events.append(event)
yield 'localhost', 11113
yield 0
yield 0x0000, None
handlers = [
(evt.EVT_C_MOVE, handle_move),
]
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.requested_contexts = StoragePresentationContexts
ae.supported_contexts = QueryRetrievePresentationContexts
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
ae.supported_contexts = StoragePresentationContexts
p = start_movescu(['-aem', 'SOMESCP', '-k', 'PatientName='])
p.wait()
assert p.returncode == 0
scp.shutdown()
assert events[0].event == evt.EVT_C_MOVE
assert b'SOMESCP' == events[0].move_destination.strip()
def test_flag_ta(self, capfd):
"""Test --acse-timeout flag."""
events = []
def handle_requested(event):
events.append(event)
time.sleep(0.1)
def handle_move(event):
events.append(event)
yield 'localhost', 11113
yield 0
yield 0x0000, None
def handle_abort(event):
events.append(event)
handlers = [
(evt.EVT_C_MOVE, handle_move),
(evt.EVT_ABORTED, handle_abort),
(evt.EVT_REQUESTED, handle_requested),
]
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.requested_contexts = StoragePresentationContexts
ae.supported_contexts = QueryRetrievePresentationContexts
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
p = start_movescu(['-ta', '0.05', '-d', '-k', 'PatientName='])
p.wait()
assert p.returncode == 1
time.sleep(0.1)
scp.shutdown()
out, err = capfd.readouterr()
assert "ACSE timeout reached while waiting for response" in err
assert events[0].event == evt.EVT_REQUESTED
assert events[1].event == evt.EVT_ABORTED
def test_flag_td(self, capfd):
"""Test --dimse-timeout flag."""
events = []
def handle_move(event):
events.append(event)
time.sleep(0.1)
yield 'localhost', 11113
yield 0
yield 0x0000, None
def handle_abort(event):
events.append(event)
handlers = [
(evt.EVT_C_MOVE, handle_move),
(evt.EVT_ABORTED, handle_abort),
]
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.requested_contexts = StoragePresentationContexts
ae.supported_contexts = QueryRetrievePresentationContexts
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
p = start_movescu(['-td', '0.05', '-d', '-k', 'PatientName='])
p.wait()
assert p.returncode == 0
time.sleep(0.1)
scp.shutdown()
out, err = capfd.readouterr()
assert "DIMSE timeout reached while waiting for message" in err
assert events[0].event == evt.EVT_C_MOVE
assert events[1].event == evt.EVT_ABORTED
@pytest.mark.skip("Don't think this can be tested")
def test_flag_tn(self, capfd):
"""Test --network-timeout flag."""
pass
def test_flag_max_pdu(self):
"""Test --max-pdu flag."""
events = []
def handle_move(event):
events.append(event)
yield 'localhost', 11113
yield 0
yield 0x0000, None
def handle_release(event):
events.append(event)
handlers = [
(evt.EVT_C_MOVE, handle_move),
(evt.EVT_RELEASED, handle_release)
]
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.requested_contexts = StoragePresentationContexts
ae.supported_contexts = QueryRetrievePresentationContexts
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
p = start_movescu(['--max-pdu', '123456', '-k', 'PatientName='])
p.wait()
assert p.returncode == 0
scp.shutdown()
assert events[0].event == evt.EVT_C_MOVE
assert events[1].event == evt.EVT_RELEASED
requestor = events[1].assoc.requestor
assert 123456 == requestor.maximum_length
def test_flag_patient(self):
"""Test the -P flag."""
events = []
def handle_move(event):
events.append(event)
yield 'localhost', 11113
yield 0
yield 0x0000, None
handlers = [
(evt.EVT_C_MOVE, handle_move),
]
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.requested_contexts = StoragePresentationContexts
ae.supported_contexts = QueryRetrievePresentationContexts
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
p = start_movescu(['-P', '-k', 'PatientName='])
p.wait()
assert p.returncode == 0
scp.shutdown()
assert events[0].event == evt.EVT_C_MOVE
cx = events[0].context
assert cx.abstract_syntax == (
PatientRootQueryRetrieveInformationModelMove
)
def test_flag_study(self):
"""Test the -S flag."""
events = []
def handle_move(event):
events.append(event)
yield 'localhost', 11113
yield 0
yield 0x0000, None
handlers = [
(evt.EVT_C_MOVE, handle_move),
]
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.requested_contexts = StoragePresentationContexts
ae.supported_contexts = QueryRetrievePresentationContexts
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
p = start_movescu(['-S', '-k', 'PatientName='])
p.wait()
assert p.returncode == 0
scp.shutdown()
assert events[0].event == evt.EVT_C_MOVE
cx = events[0].context
assert cx.abstract_syntax == StudyRootQueryRetrieveInformationModelMove
def test_flag_patient_study(self):
"""Test the -O flag."""
events = []
def handle_move(event):
events.append(event)
yield 'localhost', 11113
yield 0
yield 0x0000, None
handlers = [
(evt.EVT_C_MOVE, handle_move),
]
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.requested_contexts = StoragePresentationContexts
ae.supported_contexts = QueryRetrievePresentationContexts
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
p = start_movescu(['-O', '-k', 'PatientName='])
p.wait()
assert p.returncode == 0
scp.shutdown()
assert events[0].event == evt.EVT_C_MOVE
cx = events[0].context
assert cx.abstract_syntax == (
PatientStudyOnlyQueryRetrieveInformationModelMove
)
def test_flag_store(self):
"""Test the --store flag."""
events = []
def handle_move(event):
events.append(event)
yield 'localhost', 11113
yield 1
yield 0xFF00, self.response
handlers = [
(evt.EVT_C_MOVE, handle_move),
]
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.requested_contexts = StoragePresentationContexts
ae.supported_contexts = QueryRetrievePresentationContexts
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
p = start_movescu(['--store', '-k', 'PatientName='])
p.wait()
assert p.returncode == 0
scp.shutdown()
assert 'CT.1.2.3.4' in os.listdir()
os.remove('CT.1.2.3.4')
assert 'CT.1.2.3.4' not in os.listdir()
def test_flag_store_port(self):
"""Test the --store-port flag."""
events = []
def handle_move(event):
events.append(event)
yield 'localhost', 11114
yield 1
yield 0xFF00, self.response
handlers = [
(evt.EVT_C_MOVE, handle_move),
]
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.requested_contexts = StoragePresentationContexts
ae.supported_contexts = QueryRetrievePresentationContexts
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
p = start_movescu(
['--store', '--store-port', '11114', '-k', 'PatientName=']
)
p.wait()
assert p.returncode == 0
scp.shutdown()
assert 'CT.1.2.3.4' in os.listdir()
os.remove('CT.1.2.3.4')
assert 'CT.1.2.3.4' not in os.listdir()
def test_flag_store_aet(self):
"""Test the --store-aet flag."""
# Value not actually checked
events = []
def handle_move(event):
events.append(event)
yield 'localhost', 11113
yield 1
yield 0xFF00, self.response
def handle_accepted(event):
events.append(event)
handlers = [
(evt.EVT_ACCEPTED, handle_accepted),
(evt.EVT_C_MOVE, handle_move),
]
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.requested_contexts = StoragePresentationContexts
ae.supported_contexts = QueryRetrievePresentationContexts
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
p = start_movescu(
['--store', '--store-aet', 'SOMESCP', '-k', 'PatientName=']
)
p.wait()
assert p.returncode == 0
scp.shutdown()
assert 'CT.1.2.3.4' in os.listdir()
os.remove('CT.1.2.3.4')
assert 'CT.1.2.3.4' not in os.listdir()
def test_flag_output(self):
"""Test the -od --output-directory flag."""
def handle_move(event):
yield 'localhost', 11113
yield 1
yield 0xFF00, self.response
handlers = [
(evt.EVT_C_MOVE, handle_move),
]
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.requested_contexts = StoragePresentationContexts
ae.supported_contexts = QueryRetrievePresentationContexts
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
assert 'test_dir' not in os.listdir()
p = start_movescu(['--store', '-od', 'test_dir', '-k', 'PatientName='])
p.wait()
assert p.returncode == 0
scp.shutdown()
assert 'CT.1.2.3.4' in os.listdir('test_dir')
shutil.rmtree('test_dir')
assert 'test_dir' not in os.listdir()
def test_flag_ignore(self):
"""Test the --ignore flag."""
def handle_move(event):
yield 'localhost', 11113
yield 1
yield 0xFF00, self.response
handlers = [
(evt.EVT_C_MOVE, handle_move),
]
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.requested_contexts = StoragePresentationContexts
ae.supported_contexts = QueryRetrievePresentationContexts
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
p = start_movescu(['--store', '--ignore', '-k', 'PatientName='])
p.wait()
assert p.returncode == 0
scp.shutdown()
assert 'CT.1.2.3.4' not in os.listdir()
|
py | 1a4b4090dcfafb8d8c61202fc36d1a58e22d8109 | # Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import testtools
from neutron.callbacks import events
from neutron.callbacks import exceptions
from neutron.callbacks import manager
from neutron.callbacks import resources
from neutron.tests import base
def callback_1(*args, **kwargs):
callback_1.counter += 1
callback_id_1 = manager._get_id(callback_1)
def callback_2(*args, **kwargs):
callback_2.counter += 1
callback_id_2 = manager._get_id(callback_2)
def callback_raise(*args, **kwargs):
raise Exception()
class CallBacksManagerTestCase(base.BaseTestCase):
def setUp(self):
super(CallBacksManagerTestCase, self).setUp()
self.manager = manager.CallbacksManager()
callback_1.counter = 0
callback_2.counter = 0
def test_subscribe_invalid_resource_raise(self):
with testtools.ExpectedException(exceptions.Invalid):
self.manager.subscribe(mock.ANY, 'foo_resource', mock.ANY)
def test_subscribe_invalid_event_raise(self):
self.assertRaises(exceptions.Invalid,
self.manager.subscribe,
mock.ANY, mock.ANY, 'foo_event')
def test_subscribe(self):
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.assertIsNotNone(
self.manager._callbacks[resources.PORT][events.BEFORE_CREATE])
self.assertIn(callback_id_1, self.manager._index)
def test_subscribe_is_idempotent(self):
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.assertEqual(
1,
len(self.manager._callbacks[resources.PORT][events.BEFORE_CREATE]))
callbacks = self.manager._index[callback_id_1][resources.PORT]
self.assertEqual(1, len(callbacks))
def test_subscribe_multiple_callbacks(self):
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.manager.subscribe(
callback_2, resources.PORT, events.BEFORE_CREATE)
self.assertEqual(2, len(self.manager._index))
self.assertEqual(
2,
len(self.manager._callbacks[resources.PORT][events.BEFORE_CREATE]))
def test_unsubscribe(self):
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.manager.unsubscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.assertNotIn(
callback_id_1,
self.manager._callbacks[resources.PORT][events.BEFORE_CREATE])
self.assertNotIn(callback_id_1, self.manager._index)
def test_unsubscribe_unknown_callback(self):
self.manager.subscribe(
callback_2, resources.PORT, events.BEFORE_CREATE)
self.manager.unsubscribe(callback_1, mock.ANY, mock.ANY)
self.assertEqual(1, len(self.manager._index))
def test_unsubscribe_is_idempotent(self):
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.manager.unsubscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.manager.unsubscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.assertNotIn(callback_id_1, self.manager._index)
self.assertNotIn(callback_id_1,
self.manager._callbacks[resources.PORT][events.BEFORE_CREATE])
def test_unsubscribe_by_resource(self):
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_DELETE)
self.manager.subscribe(
callback_2, resources.PORT, events.BEFORE_DELETE)
self.manager.unsubscribe_by_resource(callback_1, resources.PORT)
self.assertNotIn(
callback_id_1,
self.manager._callbacks[resources.PORT][events.BEFORE_CREATE])
self.assertIn(
callback_id_2,
self.manager._callbacks[resources.PORT][events.BEFORE_DELETE])
self.assertNotIn(callback_id_1, self.manager._index)
def test_unsubscribe_all(self):
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_DELETE)
self.manager.subscribe(
callback_1, resources.ROUTER, events.BEFORE_CREATE)
self.manager.unsubscribe_all(callback_1)
self.assertNotIn(
callback_id_1,
self.manager._callbacks[resources.PORT][events.BEFORE_CREATE])
self.assertNotIn(callback_id_1, self.manager._index)
def test_notify_none(self):
self.manager.notify(resources.PORT, events.BEFORE_CREATE, mock.ANY)
self.assertEqual(0, callback_1.counter)
self.assertEqual(0, callback_2.counter)
def test_notify_with_exception(self):
with mock.patch.object(self.manager, '_notify_loop') as n:
n.return_value = ['error']
self.assertRaises(exceptions.CallbackFailure,
self.manager.notify,
mock.ANY, events.BEFORE_CREATE, mock.ANY)
expected_calls = [
mock.call(mock.ANY, 'before_create', mock.ANY),
mock.call(mock.ANY, 'abort_create', mock.ANY)
]
n.assert_has_calls(expected_calls)
def test_notify_handle_exception(self):
self.manager.subscribe(
callback_raise, resources.PORT, events.BEFORE_CREATE)
e = self.assertRaises(exceptions.CallbackFailure, self.manager.notify,
resources.PORT, events.BEFORE_CREATE, self)
self.assertIsInstance(e.errors[0], exceptions.NotificationError)
def test_notify_called_once_with_no_failures(self):
with mock.patch.object(self.manager, '_notify_loop') as n:
n.return_value = False
self.manager.notify(resources.PORT, events.BEFORE_CREATE, mock.ANY)
n.assert_called_once_with(
resources.PORT, events.BEFORE_CREATE, mock.ANY)
def test__notify_loop_single_event(self):
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.manager.subscribe(
callback_2, resources.PORT, events.BEFORE_CREATE)
self.manager._notify_loop(
resources.PORT, events.BEFORE_CREATE, mock.ANY)
self.assertEqual(1, callback_1.counter)
self.assertEqual(1, callback_2.counter)
def test__notify_loop_multiple_events(self):
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.manager.subscribe(
callback_1, resources.ROUTER, events.BEFORE_DELETE)
self.manager.subscribe(
callback_2, resources.PORT, events.BEFORE_CREATE)
self.manager._notify_loop(
resources.PORT, events.BEFORE_CREATE, mock.ANY)
self.manager._notify_loop(
resources.ROUTER, events.BEFORE_DELETE, mock.ANY)
self.assertEqual(2, callback_1.counter)
self.assertEqual(1, callback_2.counter)
|
py | 1a4b40e995798afada83e6b9c288cfeced1bce08 | from functools import lru_cache
from pytezos.rpc.contract import Contract
from pytezos.rpc.node import RpcQuery
class Context(RpcQuery):
def __init__(self, *args, **kwargs):
super(Context, self).__init__(*args, **kwargs)
def __call__(self, *args, **kwargs):
return self._node.get(f'{self._path}/raw/json?depth=1', cache=self._cache)
@property
@lru_cache(maxsize=None)
def contracts(self):
"""
Attention: very slow method
:return: list of Contracts
"""
return RpcQuery(
path=f'{self._path}/contracts',
node=self._node,
child_class=Contract,
**self._kwargs
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.