blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
42a62da8e1d51a7a3b3e573cdd0c1b6b3f423315 | 80afa26ba73b53f38e3fc21bf395030762fe8981 | /576. Out of Boundary Paths.py | 5481266d25818462836a2c72949c9f604ad39dc5 | [] | no_license | iamshivamgoswami/Random-DSA-Questions | 45b402063dbd2e31da2eee7590b6991aa624637d | e36250d08cf0de59cd0a59b4f3293e55793b1a6f | refs/heads/main | 2023-07-15T15:48:36.363321 | 2021-08-26T03:40:47 | 2021-08-26T03:40:47 | 392,702,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | class Solution:
def findPaths(self, m: int, n: int, maxMove: int, i: int, j: int) -> int:
d = {}
def dfs(N, i, j):
if (i, j, N) in d:
return d[(i, j, N)]
if i == m or i < 0 or j == n or j < 0:
return 1
if N == 0:
return 0
s = 0
for x, y in [(i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)]:
s += (dfs(N - 1, x, y))
d[(i, j, N)] = s
return s
return dfs(maxMove, i, j) % (10 ** 9 + 7)
| [
"[email protected]"
] | |
0e65583a2f3733544f9d2a193e93f68be851c9df | 4b2c5fe21ffcc35837bba06d2c3b43c5116f74bd | /Blackjack.py | 8d5c04a77bb98a1ca5b4be7994bed8812a47cdf5 | [] | no_license | joydas65/Codeforces-Problems | 8870cbbf1db9fa12b961cee7aaef60960af714ae | eb0f5877d0fede95af18694278029add7385973d | refs/heads/master | 2023-06-23T07:16:49.151676 | 2023-06-17T07:28:24 | 2023-06-17T07:28:24 | 184,123,514 | 5 | 1 | null | 2020-11-28T07:28:03 | 2019-04-29T18:33:23 | Python | UTF-8 | Python | false | false | 150 | py | n = int(input())
if n <= 10 or n > 21:
print(0)
elif (n >= 11 and n <= 19) or n == 21:
print(4)
else:
print(15)
| [
"[email protected]"
] | |
60b4b7f8ae3624a487bdf78b9ff1449db7aa2e84 | 9fdff458f1e20321aaa70d4669eeacb9423c9f36 | /multi/train/train_sources_weights_loop.py | c928b2c8a1b372db96c7899e0a3cd85c0df91aa4 | [] | no_license | leemengwei/GNRX | 11639716f220f4721e521998ff282ee40ca50275 | 80c5b78826187a519588af3d8c71fb40ba7b94fe | refs/heads/main | 2023-04-01T00:34:12.638628 | 2021-03-29T05:02:03 | 2021-03-29T05:02:03 | 352,524,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,721 | py | # python train_sources_weights_loop.py -A
import config
import datetime
import pandas as pd
import numpy as np
import os,sys
import matplotlib.pyplot as plt
import shutil
import argparse
from xgboost.sklearn import XGBRegressor
from sklearn.linear_model import LinearRegression, LassoCV, Ridge, Lasso
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import PolynomialFeatures
import warnings
import grasp_from_zhikong_web
import dependency_wind_bases
#from IPython import embed
#import tqdm
#from sko.GA import GA
#import torch
warnings.filterwarnings('ignore')
def evaluate(cap:float, real:pd.Series, predict:pd.Series, method:str='MSE') -> np.array:
if method == 'MSE':
error = np.nanmean((real - predict)**2)
elif method == 'MAE':
error = np.nanmean(np.abs(real - predict))
else:
import AccuracyFormula #ZouQianKun's lib
data = pd.DataFrame({'time':real.index, 'gt':real, 'predict':predict})
#For NorthWest region
if method == 'PianChaKaoHe':
error = AccuracyFormula.CalcDIP_byDate(data, 'time', 'gt', 'predict', cap, 0.25)
#For other regions
elif method == 'KouDian_RMSE':
error = AccuracyFormula.CalcKouDian_RMSE_byDate(data, 'time', 'gt', 'predict', cap, 0.8) #80% for wind RMSE
elif method == 'KouDian_MAE':
error = AccuracyFormula.CalcKouDian_MAE_byDate(data, 'time', 'gt', 'predict', cap, 0.85) #85% for wind MAE
else:
raise NotImplementedError
error = pd.DataFrame.from_dict(error, orient='index', columns=['koudian'])
error = error.values.sum()
return error
def save_log(Y_test:pd.Series, optimized_combination_test:pd.Series, business_power_test:pd.Series, filename:str='log') -> np.array:
out = pd.DataFrame({'real': Y_test, 'combined': optimized_combination_test, 'business': business_power_test})
out.to_csv(os.path.join('..', 'output', '%s.csv'%filename))
def batchGradientDescent(x, y, theta, alpha = 0.1, maxIterations=10000):
m = x.shape[0]
alpha = alpha/m
for i in range(0, maxIterations):
y_pred = np.dot(x, theta)
ERROR_loss = 1/m * np.sum(np.abs(y_pred - y))
#ERROR_gradient:
mask = (y-y_pred).copy()
mask[y-y_pred>0] = 1
mask[y-y_pred<=0] = -1
#theta = theta - alpha * gradient
theta = theta + alpha * 1/m * mask.dot(x)
print('epoch', i, ERROR_loss)
return np.array(theta)
def obj_func(W):
error = np.nanmean(np.abs((W*X_train.values).sum(axis=1) - Y_train.values))
#error = np.nanmean(np.abs((W*X_train.values).sum(axis=1) - Y_train.values)**2)
return error
def save_output(station_name, meteor_powers, w):
#save output:
if not os.path.exists(os.path.join('..', 'output', station_name)):
os.mkdir(os.path.join('..', 'output', station_name))
meteor_weights = get_ready_output(meteor_powers.columns, w)
meteor_weights.to_csv(os.path.join('..', 'output', station_name, 'weights.csv'))
shutil.copy(os.path.join('..', 'data', 'model_curve_data', '%s.csv'%station_name), os.path.join('..', 'output', station_name, 'curve.csv'))
return meteor_weights
def get_ready_output(column_names, w):
col_names = []
for i in column_names:
col_names.append(i.strip('pow_'))
meteor_weights = pd.DataFrame(w.reshape(1, -1), columns=col_names)
aux = { \
'day_2_factor_for_day_5': 0.333333333,
'day_3_factor_for_day_5': 0.333333333,
'day_4_factor_for_day_5': 0.333333333,
'day_2_factor_for_day_6': 0.45,
'day_4_factor_for_day_6': 0.35,
'day_5_factor_for_day_6': 0.2,
'day_3_factor_for_day_7': 0.4,
'day_4_factor_for_day_7': 0.05,
'day_5_factor_for_day_7': 0.2,
'day_6_factor_for_day_7': 0.35,
'day_5_factor_for_day_8': 0.333333333,
'day_6_factor_for_day_8': 0.333333333,
'day_7_factor_for_day_8': 0.333333333,
'day_5_factor_for_day_9': 0.45,
'day_7_factor_for_day_9': 0.35,
'day_8_factor_for_day_9': 0.2,
'day_6_factor_for_day_10': 0.4,
'day_7_factor_for_day_10': 0.05,
'day_8_factor_for_day_10': 0.2,
'day_9_factor_for_day_10': 0.35,
'day_1_factor_for_day_11': 0.4,
'day_3_factor_for_day_11': 0.3,
'day_4_factor_for_day_11': 0.3,
'day_lowest_power_threshold': 5,
'day_set_lowest_to': 0
}
for name in aux.keys():
meteor_weights[name] = aux[name]
meteor_weights = meteor_weights.T
meteor_weights.columns=['weights']
meteor_weights['source_name'] = meteor_weights.index
meteor_weights.index = meteor_weights['source_name']
meteor_weights = meteor_weights.drop('source_name', axis=1)
return meteor_weights
def get_steady_meteor_powers(meteor_powers):
meteor_powers = meteor_powers.fillna(0)
singular_column_names = list(meteor_powers.columns[meteor_powers.values.mean(axis=0)==0])
if len(singular_column_names) > 0:
meteor_powers = meteor_powers.drop(singular_column_names, axis=1)
print("Notice: %s drop for calculation steadyness"%singular_column_names)
return meteor_powers
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='')
parser.add_argument('--train_length', '-TRAIN', type=int, default=config.train_length)
parser.add_argument('--VISUALIZATION', "-V", action='store_true', default=False)
parser.add_argument('--ANIMATION', "-A", action='store_true', default=False)
parser.add_argument('--use_spd', "-SPD", action='store_true', default=False)
parser.add_argument('--test_length', '-TEST', type=int, default=config.test_length)
parser.add_argument('--shift_months', '-S', type=int, default=config.shift_months)
parser.add_argument('--data_gap_day', '-G', type=int, default=0)
parser.add_argument('--loop_days', '-L', type=int, default=config.loop_days)
parser.add_argument('--method', '-M', type=str, default='ode')
parser.add_argument('--filename', '-F', type=str, default=config.filename)
args = parser.parse_args()
args.test_length = int(args.test_length - 1)
print(args)
shift_now = datetime.datetime.today() - datetime.timedelta(args.shift_months*31)
station_names = pd.read_csv(args.filename, header=None).iloc[:,:]
start_date_grasp_date = shift_now - datetime.timedelta(args.train_length+args.test_length+args.loop_days+args.data_gap_day)
start_date_grasp = start_date_grasp_date.strftime("%Y-%m-%d")
end_date_grasp = shift_now.strftime("%Y-%m-%d")
sources_to_use = pd.read_csv(os.path.join('..', 'data', 'sources_to_use.csv'), index_col=0)
sources_to_use = sources_to_use[sources_to_use['use_or_not'] == 1]
logs = pd.DataFrame(index = list(pd.Series(station_names.values.reshape(-1))), columns= ['ERROR_optimized_train', 'ERROR_optimized_test', 'ERROR_business_train', 'ERROR_business_test', 'improvement_train (%)', 'improvement_test (%)', 'remark'])
for col in logs.columns:
for row in logs.index:
logs.loc[row, col] = []
#STATION LOOP:
overall_LSE_imp = []
overall_ERROR_imp = []
for station_name in station_names.iterrows():
station_name = station_name[1].values[0]
print("\n\nStation: %s"%station_name)
print("grasp data %s~%s"%(start_date_grasp, end_date_grasp))
#read zhikong data:
raw_data_all, cap, plant_name, FarmType, longitude, latitude = grasp_from_zhikong_web.read_statistic_base(station_name, start_date_grasp, end_date_grasp, readweather=1)
raw_data_all = raw_data_all.dropna(subset=['power_true'])
cap = float(cap)
if len(raw_data_all)==0:
log = 'no gt'
logs.loc[station_name, 'remark'] = log
print(log)
continue
assert (FarmType == 0) #0 for wind
raw_data_all = raw_data_all.loc[np.append(True, (raw_data_all.power_true.values[1:] - raw_data_all.power_true.values[:-1]) != 0)]
#get powers:
real_power = np.clip(np.abs(raw_data_all['power_true']), -cap, cap)
if 'fore_power' in raw_data_all.columns:
business_power = np.clip(np.abs(raw_data_all['fore_power']), -cap, cap)
else:
log = 'no fore power'
logs.loc[station_name, 'remark'] = log
raw_data_all['fore_power'] = 0.1
business_power = np.clip(np.abs(raw_data_all['fore_power']), -cap, cap)
print(log)
column_names_to_use = []
for i in raw_data_all.columns:
if args.use_spd:
use_feature = (i.startswith('pow_') or i.startswith('spd_'))
else:
use_feature = i.startswith('pow_')
if use_feature and (i in sources_to_use.index):
column_names_to_use.append(i)
#get de min power:
if 'de_min' in sources_to_use.index:
#Use curve to give power prediction:
DeMin_curve = pd.read_csv(os.path.join('..', 'data', 'model_curve_data', '%s.csv'%station_name), index_col=0)
if 'spd_7' not in raw_data_all.columns:
log = 'No spd_7, thus no de_min'
logs.loc[station_name, 'remark'] = log
print(log)
else:
column_names_to_use += ['de_min']
DeMin_prediction = pd.Series(np.interp(raw_data_all.loc[raw_data_all.index, 'spd_7'], DeMin_curve.values[:,0], DeMin_curve.values[:,1]), index=raw_data_all.index)
raw_data_all['de_min'] = DeMin_prediction
meteor_powers = raw_data_all[column_names_to_use]
if len(raw_data_all) == 0:
log = 'no gt data'
logs.loc[station_name, 'remark'] = log
print(log)
continue
elif meteor_powers.shape[1] == 0:
log = 'no meteor_powers'
logs.loc[station_name, 'remark'] = log
print(log)
continue
else:
#TIMESLICE LOOP: #when loop over train and test
error_recorder = dependency_wind_bases.Recorder()
concat_optimized_test = pd.Series([], dtype=float)
concat_business_test = pd.Series([], dtype=float)
concat_real_test = pd.Series([], dtype=float)
plt.ion()
for i in list(range(args.loop_days)):
print('Time slice', i)
start_date_train = start_date_grasp_date + datetime.timedelta(i)
end_date_train = start_date_train + datetime.timedelta(args.train_length)
start_date_test = end_date_train + datetime.timedelta(int(args.data_gap_day+1))
end_date_test = start_date_test + datetime.timedelta(args.test_length)
start_date_train_str = start_date_train.strftime("%Y-%m-%d")
end_date_train_str = end_date_train.strftime("%Y-%m-%d")
start_date_test_str = start_date_test.strftime("%Y-%m-%d")
end_date_test_str = end_date_test.strftime("%Y-%m-%d")
print("Train from %s to %s"%(start_date_train_str, end_date_train_str))
print("Test from %s to %s"%(start_date_test_str, end_date_test_str))
meteor_powers_slice = meteor_powers.loc[start_date_train_str: end_date_test_str]
real_power_slice = real_power[meteor_powers_slice.index]
#split dataset:
X = meteor_powers_slice
X['bias'] = 1
Y = real_power_slice
X_train, X_test = X.loc[start_date_train_str:end_date_train_str], X.loc[start_date_test_str:end_date_test_str]
Y_train, Y_test = Y.loc[start_date_train_str:end_date_train_str], Y.loc[start_date_test_str:end_date_test_str]
#handle duplicates
X_train = get_steady_meteor_powers(X_train).dropna()
X_test = X_test[X_train.columns].dropna()
Y_train = Y_train.dropna()
Y_test = Y_test.dropna()
if len(set(X_train.columns) - {'de_min', 'bias'}) == 0:
log = 'source not enough'
logs.loc[station_name, 'remark'] = log
print(log)
continue
if X_train.shape[0] < X_train.shape[1]:
print("shape of X 0<1")
continue
if Y_test.shape[0] < 6:
print("len Y <6")
continue
business_power_train = business_power.loc[Y_train.index]
business_power_test = business_power.loc[Y_test.index]
#Choose methods:
if args.method == 'ode':
#solve ODE equation:
try:
w = np.linalg.solve(np.dot(X_train.T.copy(), X_train), np.dot(X_train.T.copy(), Y_train))
except Exception as e:
log = '%s, \n %s, \t %s'%(e, X_train.describe(), X_test.describe())
#log = '%s'%e
logs.loc[station_name, 'remark'] = log
print(log)
continue
elif args.method == 'gd':
init_w = [1/X_train.shape[1]]*(X_train.shape[1]-1) # -1 for bias
w = batchGradientDescent(X_train, Y_train, init_w+[1], alpha=0.1)
elif args.method == 'ga': #ga
w = np.tile(0, (1,5))
n_dim = X_train.shape[1]
lb = [-3]*(n_dim-1);lb.append(-20)
ub = [3]*(n_dim-1);ub.append(20)
ga = GA(func=obj_func, n_dim=n_dim, size_pop=1000, max_iter=1000, lb=lb, ub=ub)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#ga.to(device=deivce)
w, residuals = ga.run()
elif args.method == 'poly': #poly lr
w = np.tile(0, (1,5))
poly = PolynomialFeatures(degree=2)
poly_X_train = poly.fit_transform(X_train.iloc[:,:-1])
poly_X_test = poly.fit_transform(X_test.iloc[:,:-1])
regressor = LinearRegression()
regressor.fit(poly_X_train, Y_train)
elif args.method == 'xgb':
w = np.tile(0, (1,5))
regressor = XGBRegressor(max_depth=4)
regressor.fit(X_train, Y_train)
elif args.method == 'lasso':
w = np.tile(0, (1,5))
regressor = Lasso()
regressor.fit(X_train, Y_train)
elif args.method == 'ridge':
w = np.tile(0, (1,5))
regressor = Ridge()
regressor.fit(X_train, Y_train)
elif args.method == 'mlp':
w = np.tile(0, (1,5))
regressor = MLPRegressor()
regressor.fit(X_train, Y_train)
else:
regressor = None
#eval train:
if args.method == 'ode' or args.method == 'ga' or args.method == 'gd':
optimized_combination_train = (w*X_train).sum(axis=1)
optimized_combination_test = (w*X_test).sum(axis=1)
elif args.method == 'poly':
optimized_combination_train = regressor.predict(poly_X_train)
optimized_combination_test = regressor.predict(poly_X_test)
optimized_combination_train = pd.Series(optimized_combination_train, index=X_train.index)
optimized_combination_test = pd.Series(optimized_combination_test, index=X_test.index)
else:
optimized_combination_train = regressor.predict(X_train)
optimized_combination_test = regressor.predict(X_test)
optimized_combination_train = pd.Series(optimized_combination_train, index=X_train.index)
optimized_combination_test = pd.Series(optimized_combination_test, index=X_test.index)
#eval train:
optimized_combination_train = np.clip(optimized_combination_train, 0, max(Y))
ERROR_optimized_train = evaluate(cap, Y_train, optimized_combination_train, method=config.eval_metric)
ERROR_business_train = evaluate(cap, Y_train, business_power_train, config.eval_metric)
ERROR_improvement_train = (ERROR_business_train-ERROR_optimized_train)/ERROR_business_train*100
#eval test:
optimized_combination_test = np.clip(optimized_combination_test, 0, max(Y))
ERROR_optimized_test = evaluate(cap, Y_test, optimized_combination_test, config.eval_metric)
ERROR_business_test = evaluate(cap, Y_test, business_power_test, config.eval_metric)
ERROR_improvement_test = (ERROR_business_test-ERROR_optimized_test)/ERROR_business_test*100
#save externals:
save_log(Y_test, optimized_combination_test, business_power_test, station_name)
meteor_weights = save_output(station_name, X_train, w)
print("Train Improvement from %s to %s, %s%%"%(ERROR_business_train, ERROR_optimized_train, ERROR_improvement_train))
print("Test Improvement from %s to %s, %s%%"%(ERROR_business_test, ERROR_optimized_test, ERROR_improvement_test))
#print('Weight:', meteor_weights)
if args.ANIMATION:
plt.plot(meteor_powers_slice, 'blue', alpha=0.2, linewidth=3, label='sources')
plt.plot(Y_train, 'k', alpha=0.5)
plt.plot(optimized_combination_train, 'g', alpha=0.5)
plt.plot(business_power_train, 'r', alpha=0.5)
plt.plot(Y_test, 'k', label='real')
plt.plot(optimized_combination_test, 'g', label='optimized', linestyle='--')
plt.plot(business_power_test, 'r', label='business', linestyle=':')
plt.title('%s, %s, %s'%(station_name, ERROR_improvement_train, ERROR_improvement_test))
plt.legend()
plt.grid()
plt.draw()
plt.pause(0.1)
plt.clf()
#Misc
concat_optimized_test = concat_optimized_test.append(optimized_combination_test)
concat_business_test = concat_business_test.append(business_power_test)
concat_real_test = concat_real_test.append(Y_test)
error_recorder.add_one('%s_ERROR_optimized_train'%station_name, ERROR_optimized_train)
error_recorder.add_one('%s_ERROR_optimized_test'%station_name, ERROR_optimized_test)
error_recorder.add_one('%s_ERROR_business_train'%station_name, ERROR_business_train)
error_recorder.add_one('%s_ERROR_business_test'%station_name, ERROR_business_test)
error_recorder.add_one('%s_improvement_train (%%)'%station_name, ERROR_improvement_train)
error_recorder.add_one('%s_improvement_test (%%)'%station_name, ERROR_improvement_test)
#TIME LOOP DONE.
plt.close()
plt.ioff()
#Mean over redudant slices:
if len(concat_optimized_test) == 0:
print("len concat test =0")
continue
#Concatenate all slices of timeloops
optimized_combination_test = concat_optimized_test.resample('15min').mean().dropna()
business_power_test = business_power.reindex(optimized_combination_test.index).dropna()
common_index = optimized_combination_test.index & business_power_test.index
optimized_combination_test = optimized_combination_test.loc[common_index]
business_power_test = business_power_test.loc[common_index]
real_power_test = real_power.loc[common_index]
ERROR_opt = np.nanmean(np.abs(optimized_combination_test - real_power_test))
LSE_opt = np.nanmean((optimized_combination_test - real_power_test)**2)
ERROR_bus = np.nanmean(np.abs(business_power_test - real_power_test))
LSE_bus = np.nanmean((business_power_test - real_power_test)**2)
ERROR_imp = (ERROR_bus-ERROR_opt)/ERROR_bus*100
LSE_imp = (LSE_bus-LSE_opt)/LSE_bus*100
logs.loc[station_name, 'ERROR_optimized_train'] = error_recorder.get_mean('%s_ERROR_optimized_train'%station_name)
logs.loc[station_name, 'ERROR_optimized_test'] = error_recorder.get_mean('%s_ERROR_optimized_test'%station_name)
logs.loc[station_name, 'ERROR_business_train'] = error_recorder.get_mean('%s_ERROR_business_train'%station_name)
logs.loc[station_name, 'ERROR_business_test'] = error_recorder.get_mean('%s_ERROR_business_test'%station_name)
logs.loc[station_name, 'improvement_train (%)'] = error_recorder.get_mean('%s_improvement_train (%%)'%station_name)
logs.loc[station_name, 'improvement_test (%)'] = error_recorder.get_mean('%s_improvement_test (%%)'%station_name)
logs.loc[station_name, 'loop_optimized_output'] = ','.join(np.round(optimized_combination_test, 1).astype(str).to_list())
logs.loc[station_name, 'loop_real_output'] = ','.join(np.round(real_power_test, 1).astype(str).to_list())
logs.loc[station_name, 'loop_business_power_test'] = ','.join(np.round(business_power_test, 1).astype(str).to_list())
logs.loc[station_name, 'loop_test ERROR (%)'] = ERROR_imp
logs.loc[station_name, 'loop_test LSE (%)'] = LSE_imp
print('loop given: ERROR:%s, LSE:%s'%(ERROR_imp, LSE_imp))
#plots:
fig = plt.figure(figsize=(18, 10))
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
ax1.plot(meteor_powers.loc[Y_train.index], alpha=0.4, c='gray', label='sources')
ax1.plot(real_power.loc[Y_train.index], label='real', c='k')
ax1.plot(optimized_combination_train, label='optimized', c='g')
ax1.plot(business_power_train, label='business', c='r')
ax2.plot(meteor_powers.reindex(optimized_combination_test.index), alpha=0.4, c='gray', label='sources')
ax2.plot(real_power_test, label='real', c='k')
ax2.plot(optimized_combination_test, label='optimized', c='g', alpha=1)
ax2.plot(business_power_test, label='business', c='r', alpha=0.8)
ax1.legend()
ax2.legend()
ax1.set_title("%s \nTrain result for meteor sources, improvement(%s): %s%%"%(station_name, config.eval_metric, ERROR_improvement_train))
ax2.set_title("%s \nTest result for meteor sources, improvement(%s): %s%%"%(station_name, config.eval_metric, ERROR_improvement_test))
ax1.grid()
ax2.grid()
if args.VISUALIZATION:
plt.show()
plt.savefig(os.path.join('..', 'png', '%s_%s_%s.png'%(station_name, args.train_length, args.test_length)))
plt.close()
#STATION LOOP DONE
#Statistics:
#logs.to_csv(os.path.join('..', 'output', 'detials_%s_%s_%s_%s_%s_%s-%s_loop%s.csv'%(args.train_length, args.test_length, np.nanmean(overall_LSE_imp), args.method, args.use_spd, start_date_grasp, end_date_grasp, args.loop_days)))
#print(logs)
#print(logs.describe())
print("Finish list", args.filename)
| [
"[email protected]"
] | |
4a3bb92e0a9b95c1fc10eb9db2fd34e8f5cdcb8d | 1669bf106be7e4e88ad957aa1f0a708a49f9ef87 | /first_website/setup.py | d87bfe260f13a957af9d07c566ab6284fad70c61 | [] | no_license | genzj/python-fundamentals-course | 280166037bb6ff25e2400fa3b281de153824c622 | 31218a42c609d923b3ae0c7d785b9dc02c0d9a6e | refs/heads/master | 2023-01-09T22:13:04.040355 | 2021-03-20T02:33:18 | 2021-03-20T02:33:18 | 218,776,587 | 2 | 2 | null | 2022-12-26T20:59:32 | 2019-10-31T13:47:43 | Python | UTF-8 | Python | false | false | 1,512 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [ ]
setup_requirements = [ ]
test_requirements = [ ]
setup(
author="Jie ZHU",
author_email='[email protected]',
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="Python Boilerplate contains all the boilerplate you need to create a Python package.",
install_requires=requirements,
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='first_website',
name='first_website',
packages=find_packages(include=['first_website', 'first_website.*']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/genzj/first_website',
version='0.1.0',
zip_safe=False,
)
| [
"[email protected]"
] | |
93af8f67f99cacadec773970d6e4593f6c1b339e | dd098f8a93f787e38676283679bb39a290ba28b4 | /samples/openapi3/client/3_0_3_unit_test/python-experimental/test/test_paths/test_response_body_post_maxlength_validation_response_body_for_content_types/test_post.py | b5ee61f9396dd84fdab727dd1f5cee04e4b0aa0f | [
"Apache-2.0"
] | permissive | InfoSec812/openapi-generator | 727c0235d3bad9b85ac12068808f844287af6003 | e0c72702c3d5dae2a627a2926f0cddeedca61e32 | refs/heads/master | 2022-10-22T00:31:33.318867 | 2022-08-20T14:10:31 | 2022-08-20T14:10:31 | 152,479,633 | 1 | 0 | Apache-2.0 | 2023-09-04T23:34:09 | 2018-10-10T19:38:43 | Java | UTF-8 | Python | false | false | 7,073 | py | # coding: utf-8
"""
Generated by: https://openapi-generator.tech
"""
import unittest
from unittest.mock import patch
import urllib3
import unit_test_api
from unit_test_api.paths.response_body_post_maxlength_validation_response_body_for_content_types import post # noqa: E501
from unit_test_api import configuration, schemas, api_client
from .. import ApiTestMixin
class TestResponseBodyPostMaxlengthValidationResponseBodyForContentTypes(ApiTestMixin, unittest.TestCase):
"""
ResponseBodyPostMaxlengthValidationResponseBodyForContentTypes unit test stubs
"""
_configuration = configuration.Configuration()
def setUp(self):
used_api_client = api_client.ApiClient(configuration=self._configuration)
self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501
def tearDown(self):
pass
response_status = 200
def test_too_long_is_invalid_fails(self):
# too long is invalid
accept_content_type = 'application/json'
with patch.object(urllib3.PoolManager, 'request') as mock_request:
payload = (
"foo"
)
mock_request.return_value = self.response(
self.json_bytes(payload),
status=self.response_status
)
with self.assertRaises((unit_test_api.ApiValueError, unit_test_api.ApiTypeError)):
self.api.post(
accept_content_types=(accept_content_type,)
)
self.assert_pool_manager_request_called_with(
mock_request,
self._configuration.host + '/responseBody/postMaxlengthValidationResponseBodyForContentTypes',
method='post'.upper(),
content_type=None,
accept_content_type=accept_content_type,
)
def test_ignores_non_strings_passes(self):
# ignores non-strings
accept_content_type = 'application/json'
with patch.object(urllib3.PoolManager, 'request') as mock_request:
payload = (
100
)
mock_request.return_value = self.response(
self.json_bytes(payload),
status=self.response_status
)
api_response = self.api.post(
accept_content_types=(accept_content_type,)
)
self.assert_pool_manager_request_called_with(
mock_request,
self._configuration.host + '/responseBody/postMaxlengthValidationResponseBodyForContentTypes',
method='post'.upper(),
accept_content_type=accept_content_type,
)
assert isinstance(api_response.response, urllib3.HTTPResponse)
assert isinstance(api_response.body, post.SchemaFor200ResponseBodyApplicationJson)
deserialized_response_body = post.SchemaFor200ResponseBodyApplicationJson._from_openapi_data(
payload,
_configuration=self._configuration
)
assert api_response.body == deserialized_response_body
def test_shorter_is_valid_passes(self):
# shorter is valid
accept_content_type = 'application/json'
with patch.object(urllib3.PoolManager, 'request') as mock_request:
payload = (
"f"
)
mock_request.return_value = self.response(
self.json_bytes(payload),
status=self.response_status
)
api_response = self.api.post(
accept_content_types=(accept_content_type,)
)
self.assert_pool_manager_request_called_with(
mock_request,
self._configuration.host + '/responseBody/postMaxlengthValidationResponseBodyForContentTypes',
method='post'.upper(),
accept_content_type=accept_content_type,
)
assert isinstance(api_response.response, urllib3.HTTPResponse)
assert isinstance(api_response.body, post.SchemaFor200ResponseBodyApplicationJson)
deserialized_response_body = post.SchemaFor200ResponseBodyApplicationJson._from_openapi_data(
payload,
_configuration=self._configuration
)
assert api_response.body == deserialized_response_body
def test_two_supplementary_unicode_code_points_is_long_enough_passes(self):
# two supplementary Unicode code points is long enough
accept_content_type = 'application/json'
with patch.object(urllib3.PoolManager, 'request') as mock_request:
payload = (
"💩💩"
)
mock_request.return_value = self.response(
self.json_bytes(payload),
status=self.response_status
)
api_response = self.api.post(
accept_content_types=(accept_content_type,)
)
self.assert_pool_manager_request_called_with(
mock_request,
self._configuration.host + '/responseBody/postMaxlengthValidationResponseBodyForContentTypes',
method='post'.upper(),
accept_content_type=accept_content_type,
)
assert isinstance(api_response.response, urllib3.HTTPResponse)
assert isinstance(api_response.body, post.SchemaFor200ResponseBodyApplicationJson)
deserialized_response_body = post.SchemaFor200ResponseBodyApplicationJson._from_openapi_data(
payload,
_configuration=self._configuration
)
assert api_response.body == deserialized_response_body
def test_exact_length_is_valid_passes(self):
# exact length is valid
accept_content_type = 'application/json'
with patch.object(urllib3.PoolManager, 'request') as mock_request:
payload = (
"fo"
)
mock_request.return_value = self.response(
self.json_bytes(payload),
status=self.response_status
)
api_response = self.api.post(
accept_content_types=(accept_content_type,)
)
self.assert_pool_manager_request_called_with(
mock_request,
self._configuration.host + '/responseBody/postMaxlengthValidationResponseBodyForContentTypes',
method='post'.upper(),
accept_content_type=accept_content_type,
)
assert isinstance(api_response.response, urllib3.HTTPResponse)
assert isinstance(api_response.body, post.SchemaFor200ResponseBodyApplicationJson)
deserialized_response_body = post.SchemaFor200ResponseBodyApplicationJson._from_openapi_data(
payload,
_configuration=self._configuration
)
assert api_response.body == deserialized_response_body
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
38801dbfe2808511d05323af89e49be9254d06bd | 40b5c4a77be465b47fe6fd7ff408db957261cc7f | /python-spake2-0.7/setup.py | c6365b7a7fcc9b95f11fa6dfb09513fabbc2ab8f | [
"MIT"
] | permissive | warner/spake2-interop-server | 7c1f0502a93615d2e2b5b7a323731a7e20040f86 | b3f2ae42971e4217d9f503bb672b2d9288225acc | refs/heads/master | 2021-01-25T11:27:45.696023 | 2017-06-10T22:15:15 | 2017-06-10T22:15:15 | 93,924,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 913 | py | "A server that performs SPAKE2 operations, for interoperability testing."
# Install this, then run "twist spake2_interop" and hit http://HOST:8705/
from setuptools import setup
import versioneer
setup(
name="spake2-interop-python-spake2-0.7",
version=versioneer.get_version(),
author="Brian Warner",
author_email="[email protected]",
package_dir={"": "src"},
# this must be installed into its own virtualenv (e.g. spake2-0.7 can't
# share a venv with spake2-0.3), so we don't need a version-specific
# package name, and keeping it neutral will minimize the diff
packages=["spake2_interop_python"],
license="MIT",
cmdclass=versioneer.get_cmdclass(),
install_requires=[
"spake2==0.7",
],
entry_points={
"console_scripts":
[
"spake2_interop_python_0_7 = spake2_interop_python:run",
]
},
)
| [
"[email protected]"
] | |
9fa28da8427b89b3d954bdd756fd2ebcba4686a1 | 83048ab1abb6941ed0b19fb5e5ff4a9d14b48e8c | /CODEFORCES/two_teams.py | 7d256d350d953f576fe903bf8811b7a18f57716a | [] | no_license | harshitalpha/Algorithms | ebad07cc77516ab5c35ae414462d10a38d5ef97e | 2f7dcf4c3bb4390267231c7c96f7e76399c0166e | refs/heads/master | 2021-07-14T17:34:02.546583 | 2020-06-25T06:38:39 | 2020-06-25T06:38:39 | 178,813,562 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | t = int(input())
while(t):
t = t - 1
n = int(input())
a = [int(s) for s in input().split()]
d = {}
for i in a:
try:
d[i] += 1
except KeyError:
d[i] = 1
max_size = d[a[0]]
ele_in_sec_arr = a[0]
for i in d.keys():
if d[i] > max_size:
max_size = d[i]
ele_in_sec_arr = i
count = 0
for i in d.keys():
if i is not ele_in_sec_arr:
count = count + 1
if count == max_size:
print(max_size)
elif count == max_size - 1:
print(count)
elif count <= max_size-2:
print(count+1)
elif count > max_size:
print(max_size)
| [
"[email protected]"
] | |
b67ec65da5b89ee26ecfac71462afdedf4ad07d3 | a72f39b82966cd6e2a3673851433ce7db550429a | /configs/_base_/models/lxmert/lxmert_vqa_config.py | 781219951d7cd25d348c58e16f382837a1dcbeaf | [
"Apache-2.0"
] | permissive | linxi1158/iMIX | 85841d6b95e1d99ed421a1ac3667658e49cae6fc | af87a17275f02c94932bb2e29f132a84db812002 | refs/heads/master | 2023-06-09T23:37:46.534031 | 2021-06-30T12:09:42 | 2021-06-30T12:09:42 | 381,608,650 | 0 | 0 | Apache-2.0 | 2021-06-30T07:08:40 | 2021-06-30T07:08:39 | null | UTF-8 | Python | false | false | 1,753 | py | # model settings
model = dict(
type='LXMERT',
params=dict(
random_initialize=False,
num_labels=3129,
# BertConfig
vocab_size=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act='gelu',
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
#
mode='lxr',
l_layers=9, # 12
x_layers=5, # 5
r_layers=5, # 0
visual_feat_dim=2048,
visual_pos_dim=4,
freeze_base=False,
max_seq_length=20,
model='bert',
training_head_type='vqa2',
bert_model_name='bert-base-uncased',
pretrained_path='/home/datasets/mix_data/iMIX/data/models/model_LXRT.pth',
label2ans_path='/home/datasets/mix_data/lxmert/vqa/trainval_label2ans.json',
))
loss = dict(type='LogitBinaryCrossEntropy')
optimizer = dict(
type='BertAdam',
lr=5e-5,
weight_decay=0.01,
eps=1e-6,
betas=[0.9, 0.999],
max_grad_norm=-1,
training_encoder_lr_multiply=1,
)
optimizer_config = dict(grad_clip=dict(max_norm=5))
'''
fp16 = dict(
init_scale=2.**16,
growth_factor=2.0,
backoff_factor=0.5,
growth_interval=2000,
)
'''
lr_config = dict(
warmup=0.1,
warmup_method='warmup_linear',
# max_iters=55472, # ceil(totoal 443753 / batch size 32) * epoch size datasets: train
max_iters=79012, # floor(totoal 632117 / batch size 32) * epoch size datasets: train, nominival
policy='BertWarmupLinearLR')
total_epochs = 4
seed = 9595
| [
"[email protected]"
] | |
8626edcebc5d57619798aec921223388d499ef0b | f77327128a8da9702ae3443e2171bc7485ceb915 | /cadence/items.py | 08b1b50b16175e30c34717833ce6af94ae712ed4 | [] | no_license | SimeonYS/cadence | 0eeba6a54c03ffb2d55466f9d8de6f1b1662002f | cdaef13c85a03e031a0050c89c17249cd7d83125 | refs/heads/main | 2023-03-31T08:24:41.408507 | 2021-03-31T10:14:01 | 2021-03-31T10:14:01 | 353,312,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | import scrapy
class CadenceItem(scrapy.Item):
title = scrapy.Field()
content = scrapy.Field()
date = scrapy.Field()
link = scrapy.Field()
| [
"[email protected]"
] | |
17602e35cc61bc0f7fc211873d8f4e8f3498781a | 4ca44b7bdb470fcbbd60c2868706dbd42b1984c9 | /20.12.14/BOJ_20056.py | d156e0cef3cfd5583ae7fcf2a95e7de4fd8f8efa | [] | no_license | titiman1013/Algorithm | 3b3d14b3e2f0cbc4859029eb73ad959ec8778629 | 8a67e36931c42422779a4c90859b665ee468255b | refs/heads/master | 2023-06-29T17:04:40.015311 | 2021-07-06T01:37:29 | 2021-07-06T01:37:29 | 242,510,483 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,827 | py | import sys; sys.stdin = open('text1.txt', 'r')
# solve test1
# dx = [-1, -1, 0, 1, 1, 1, 0, -1]
# dy = [0, 1, 1, 1, 0, -1, -1, -1]
# def move_fireball(x, y, m, s, d):
# for _ in range(s):
# nx = x + dx[d]
# ny = y + dy[d]
# if 0 <= nx < N and 0 <= ny < N:
# x, y = nx, ny
# else:
# if nx < 0 or nx >= N:
# if nx < 0:
# x = N + nx
# else:
# x = nx - N
# if ny < 0 or nx >= N:
# if ny < 0:
# y = N + ny
# else:
# y = ny - N
# if bool(arr[x][y]):
# arr[x][y].append([x, y, m, s, d])
# else:
# arr[x][y] = [[x, y, m, s, d]]
# return
# def sum_fireball(sum_list):
# list_cnt = len(sum_list)
# m = 0
# s = 0
# d = []
# for idx in range(list_cnt):
# m += sum_list[idx][2]
# s += sum_list[idx][3]
# if d % 2:
# d.append(1)
# else:
# d.append(0)
# m = m // 5
# if m == 0:
# return [0]
# s = s // list_cnt
# d_check = True
# temp_d = d[0]
# for i in range(1, len(d)):
# if d[i] != temp_d:
# d_check = False
# break
# if d_check == True:
# d = [0, 2, 4, 6]
# else:
# d = [1, 3, 5, 7]
# temp_list = []
# for i in range(4):
# temp_list.append([sum_list[0], sum_list[1], m, s, d[i]])
# return temp_list
# # 방향
# # 인접한 행렬 12시부터 시계방향
# # 7 0 1
# # 6 2
# # 5 4 3
# for tc in range(1, int(input()) + 1):
# N, M, K = map(int, input().split())
# # [r, c, m, s, d]
# items = [list(map(int, input().split())) for _ in range(M)]
# arr = [[0] * N for _ in range(N)]
# if K > 0:
# # 처음 시행
# for item in items:
# move_fireball(item[0] - 1, item[1] - 1, item[2], item[3], item[4])
# print(arr)
# move_cnt = 1
# while move_cnt <= K:
# # 움직이기
# for i in range(N):
# for j in range(N):
# if bool(arr[i][j]):
# if len(arr[i][j]) >= 2:
# temp_list = arr[i][j][0]
# arr[i][j] = 0
# for k in range(len(temp_list)):
# move_fireball(temp_list[k][0], temp_list[k][1], temp_list[k][2], temp_list[k][3], temp_list[k][4])
# else:
# temp_list = arr[i][j][0]
# arr[i][j] = 0
# print(arr)
# move_fireball(temp_list[0], temp_list[1], temp_list[2], temp_list[3], temp_list[4])
# # 합치기
# for i in range(N):
# for j in range(N):
# if len(arr[i][j]) >= 2:
# arr[i][j] = sum_fireball(arr[i][j])
# move_cnt += 1
# res = 0
# for i in range(N):
# for j in range(N):
# if bool(arr[i][j]):
# if len(arr[i][j]) >= 2:
# for k in range(len(arr[i][j])):
# res += arr[i][j][k][2]
# else:
# res += arr[i][j][0][2]
# print(f'#{tc} {res}')
# solve test2
from collections import deque
for tc in range(1, int(input()) + 1):
N, M, K = map(int, input().split())
# [r, c, m, s, d]
items = [list(map(int, input().split())) for _ in range(M)]
arr = [[0] * N for _ in range(N)]
q = deque()
for item in items:
q.append(item)
for _ in range(K):
while q:
| [
"[email protected]"
] | |
16f4f84a799fbad2d4951affd28a3893ee356839 | a667b52cb8d2ec857c55d33f04fc0e81d36dc681 | /options/data/mc/pipipi0_DecProdCut_PHSP_2012_MC_2012_Beam4000GeV-2012-MagUp-Nu2.5-Pythia8_Sim08e_Digi13_Trig0x409f0045_Reco14a_Stripping20r0p2NoPrescalingFlagged_27163403_ALLSTREAMS.DST.py | befddb5e34f37022361b1b2ddd67efe8ea3fa6bd | [] | no_license | wenyanyin/CP_violation_simulation | 639d73333a3795654275cb43cc7dad7c742d1be1 | 7b93b2fe1050fb30d0b809b758cd5a3b2824b875 | refs/heads/master | 2022-04-29T14:19:23.744004 | 2022-04-01T13:05:18 | 2022-04-01T13:05:18 | 168,570,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,745 | py | # lb-run LHCbDirac/prod dirac-bookkeeping-get-files -B /MC/2012/Beam4000GeV-2012-MagUp-Nu2.5-Pythia8/Sim08e/Digi13/Trig0x409f0045/Reco14a/Stripping20r0p2NoPrescalingFlagged/27163403/ALLSTREAMS.DST
from Gaudi.Configuration import *
from GaudiConf import IOHelper
IOHelper('ROOT').inputFiles(
['LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000001_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000003_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000004_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000005_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000006_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000008_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000009_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000011_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000012_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000013_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000014_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000015_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000016_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000017_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000018_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000019_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000020_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000021_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000022_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000024_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000025_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000027_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000028_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000029_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000030_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000033_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000034_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000036_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000037_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000039_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000040_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000042_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000044_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000046_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000048_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000049_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000050_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000052_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000053_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000054_2.AllStreams.dst'],
clear=True)
| [
"[email protected]"
] | |
f6aff156beb68f479d76392ed5097e84546ed4e6 | 764a157c1ef369664144a112f390165809c37861 | /apps/app/views.py | 75a6c7124d899b9dae8673fed292fa32dbe61aff | [] | no_license | Maliaotw/gogoweb | b044678b0a34c2748267c8f8ac1f6af91d42bcd0 | aad84f11163e62716a239972436eb92e7cc601d0 | refs/heads/main | 2023-07-07T19:46:37.470811 | 2021-08-31T15:01:58 | 2021-08-31T15:01:58 | 341,254,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | from django.shortcuts import render
from apps.app import models
from apps.app import serializers
from rest_framework import viewsets
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.views import APIView, Response
from rest_framework import status
# Create your views here.
class TaskModelViewSet(viewsets.ModelViewSet):
queryset = models.Task.objects.all()
serializer_class = serializers.TaskSerializer
pagination_class = LimitOffsetPagination
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
self.perform_destroy(instance)
return Response(status=status.HTTP_200_OK)
| [
"[email protected]"
] | |
50482a45f14d167f9dd6e9fc7d00d93c3fcaad60 | 5b93930ce8280b3cbc7d6b955df0bfc5504ee99c | /nodes/Geron17Hands/B_PartI/H_Chapter8/C_PCA/D_UsingScikitLearn/index.py | f3808ea9cd7e1c90a935491935c7d8dd01be2ef0 | [] | no_license | nimra/module_gen | 8749c8d29beb700cac57132232861eba4eb82331 | 2e0a4452548af4fefd4cb30ab9d08d7662122cf4 | refs/heads/master | 2022-03-04T09:35:12.443651 | 2019-10-26T04:40:49 | 2019-10-26T04:40:49 | 213,980,247 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,414 | py | # Lawrence McAfee
# ~~~~~~~~ import ~~~~~~~~
from modules.node.HierNode import HierNode
from modules.node.LeafNode import LeafNode
from modules.node.Stage import Stage
from modules.node.block.CodeBlock import CodeBlock as cbk
from modules.node.block.HierBlock import HierBlock as hbk
from modules.node.block.ImageBlock import ImageBlock as ibk
from modules.node.block.ListBlock import ListBlock as lbk
from modules.node.block.MarkdownBlock import MarkdownBlock as mbk
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
blocks = [
# Download from finelybook www.finelybook.com
# ing the first d principal components (i.e., the matrix composed of the first d columns
# of VT), as shown in Equation 8-2.
#
# Equation 8-2. Projecting the training set down to d dimensions
# �d‐proj = � · �d
#
# The following Python code projects the training set onto the plane defined by the first
# two principal components:
# W2 = V.T[:, :2]
# X2D = X_centered.dot(W2)
# There you have it! You now know how to reduce the dimensionality of any dataset
# down to any number of dimensions, while preserving as much variance as possible.
#
# Using Scikit-Learn
# Scikit-Learn’s PCA class implements PCA using SVD decomposition just like we did
# before. The following code applies PCA to reduce the dimensionality of the dataset
# down to two dimensions (note that it automatically takes care of centering the data):
# from sklearn.decomposition import PCA
#
# pca = PCA(n_components = 2)
# X2D = pca.fit_transform(X)
#
# After fitting the PCA transformer to the dataset, you can access the principal compo‐
# nents using the components_ variable (note that it contains the PCs as horizontal vec‐
# tors, so, for example, the first principal component is equal to pca.components_.T[:,
# 0]).
#
# Explained Variance Ratio
# Another very useful piece of information is the explained variance ratio of each prin‐
# cipal component, available via the explained_variance_ratio_ variable. It indicates
# the proportion of the dataset’s variance that lies along the axis of each principal com‐
# ponent. For example, let’s look at the explained variance ratios of the first two compo‐
# nents of the 3D dataset represented in Figure 8-2:
# >>> print(pca.explained_variance_ratio_)
# array([ 0.84248607, 0.14631839])
# This tells you that 84.2% of the dataset’s variance lies along the first axis, and 14.6%
# lies along the second axis. This leaves less than 1.2% for the third axis, so it is reason‐
# able to assume that it probably carries little information.
#
#
#
#
# 214 | Chapter 8: Dimensionality Reduction
#
]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Content(LeafNode):
def __init__(self):
super().__init__(
"Using Scikit-Learn",
# Stage.REMOVE_EXTRANEOUS,
# Stage.ORIG_BLOCKS,
# Stage.CUSTOM_BLOCKS,
# Stage.ORIG_FIGURES,
# Stage.CUSTOM_FIGURES,
# Stage.CUSTOM_EXERCISES,
)
[self.add(a) for a in blocks]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class UsingScikitLearn(HierNode):
def __init__(self):
super().__init__("Using Scikit-Learn")
self.add(Content(), "content")
# eof
| [
"[email protected]"
] | |
cb50337db2d8006a698aab101b52e25241b61b67 | 292437b85108504a7ca91571f26a639a313501b6 | /venv2/lib/python2.7/site-packages/keystoneclient/auth/identity/generic/token.py | 6a5d15b281e8931b3199251c3a6ea2c8f77eef3e | [] | no_license | heekof/monitoring-agent | c86bebcf77091490df7a6b8c881b85fdb2b9e4eb | b1c079efdf2dabe854f2aa3d96f36d2ec7021070 | refs/heads/master | 2021-01-15T15:39:01.512801 | 2016-08-31T20:53:38 | 2016-08-31T20:53:38 | 58,620,098 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,656 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from oslo_config import cfg
from keystoneclient import _discover
from keystoneclient.auth.identity.generic import base
from keystoneclient.auth.identity import v2
from keystoneclient.auth.identity import v3
LOG = logging.getLogger(__name__)
def get_options():
return [
cfg.StrOpt('token', secret=True, help='Token to authenticate with'),
]
class Token(base.BaseGenericPlugin):
"""Generic token auth plugin.
:param string token: Token for authentication.
"""
def __init__(self, auth_url, token=None, **kwargs):
super(Token, self).__init__(auth_url, **kwargs)
self._token = token
def create_plugin(self, session, version, url, raw_status=None):
if _discover.version_match((2,), version):
return v2.Token(url, self._token, **self._v2_params)
elif _discover.version_match((3,), version):
return v3.Token(url, self._token, **self._v3_params)
@classmethod
def get_options(cls):
options = super(Token, cls).get_options()
options.extend(get_options())
return options
| [
"[email protected]"
] | |
867b3f98c1c1451d19180642f61929115b3606d1 | d4f4bff5d4412abbb73ce534fae0c87ea9a62362 | /model/boarding2/unassigned_integration_settings.py | fdc774918694894dc6fb81064c5daafc67d14b5a | [] | no_license | icorso/wn_api | 4f023905bcf83fd19eb7826191a6fcf66345e38f | b7e558b30d57b62ed3333cbfb7a9359bf954e320 | refs/heads/master | 2023-05-25T11:05:02.203211 | 2021-05-22T15:10:57 | 2021-05-22T15:10:57 | 366,672,359 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,171 | py | # coding: utf-8
from model.serializable import SwaggerSerializable
class UnassignedIntegrationSettings(SwaggerSerializable):
swagger_types = {
'enable_background_validation': 'bool',
'background_validation_url': 'str',
'receipt_page_url': 'str',
'enable_additional_field_xml_response_tag': 'bool',
'enable_supports_apple_pay_xml_response_tag': 'bool',
'enable_supports_google_pay_xml_response_tag': 'bool',
'enable_enable3ds_xml_response_tag': 'bool',
'enable_supported_cards_xml_response_tag': 'bool'
}
attribute_map = {
'enable_background_validation': 'enableBackgroundValidation',
'background_validation_url': 'backgroundValidationUrl',
'receipt_page_url': 'receiptPageUrl',
'enable_additional_field_xml_response_tag': 'enableAdditionalFieldXmlResponseTag',
'enable_supports_apple_pay_xml_response_tag': 'enableSupportsApplePayXmlResponseTag',
'enable_supports_google_pay_xml_response_tag': 'enableSupportsGooglePayXmlResponseTag',
'enable_enable3ds_xml_response_tag': 'enableEnable3dsXmlResponseTag',
'enable_supported_cards_xml_response_tag': 'enableSupportedCardsXmlResponseTag'
}
def __init__(self, enable_background_validation=False, background_validation_url=None, receipt_page_url=None, enable_additional_field_xml_response_tag=True, enable_supports_apple_pay_xml_response_tag=True, enable_supports_google_pay_xml_response_tag=True, enable_enable3ds_xml_response_tag=True, enable_supported_cards_xml_response_tag=True): # noqa: E501
"""UnassignedIntegrationSettings - a model defined in Swagger""" # noqa: E501
self._enable_background_validation = None
self._background_validation_url = None
self._receipt_page_url = None
self._enable_additional_field_xml_response_tag = None
self._enable_supports_apple_pay_xml_response_tag = None
self._enable_supports_google_pay_xml_response_tag = None
self._enable_enable3ds_xml_response_tag = None
self._enable_supported_cards_xml_response_tag = None
self.discriminator = None
if enable_background_validation is not None:
self.enable_background_validation = enable_background_validation
if background_validation_url is not None:
self.background_validation_url = background_validation_url
if receipt_page_url is not None:
self.receipt_page_url = receipt_page_url
if enable_additional_field_xml_response_tag is not None:
self.enable_additional_field_xml_response_tag = enable_additional_field_xml_response_tag
if enable_supports_apple_pay_xml_response_tag is not None:
self.enable_supports_apple_pay_xml_response_tag = enable_supports_apple_pay_xml_response_tag
if enable_supports_google_pay_xml_response_tag is not None:
self.enable_supports_google_pay_xml_response_tag = enable_supports_google_pay_xml_response_tag
if enable_enable3ds_xml_response_tag is not None:
self.enable_enable3ds_xml_response_tag = enable_enable3ds_xml_response_tag
if enable_supported_cards_xml_response_tag is not None:
self.enable_supported_cards_xml_response_tag = enable_supported_cards_xml_response_tag
@property
def enable_background_validation(self):
"""Gets the enable_background_validation of this UnassignedIntegrationSettings. # noqa: E501
:return: The enable_background_validation of this UnassignedIntegrationSettings. # noqa: E501
:rtype: bool
"""
return self._enable_background_validation
@enable_background_validation.setter
def enable_background_validation(self, enable_background_validation):
"""Sets the enable_background_validation of this UnassignedIntegrationSettings.
:param enable_background_validation: The enable_background_validation of this UnassignedIntegrationSettings. # noqa: E501
:type: bool
"""
self._enable_background_validation = enable_background_validation
@property
def background_validation_url(self):
"""Gets the background_validation_url of this UnassignedIntegrationSettings. # noqa: E501
:return: The background_validation_url of this UnassignedIntegrationSettings. # noqa: E501
:rtype: str
"""
return self._background_validation_url
@background_validation_url.setter
def background_validation_url(self, background_validation_url):
"""Sets the background_validation_url of this UnassignedIntegrationSettings.
:param background_validation_url: The background_validation_url of this UnassignedIntegrationSettings. # noqa: E501
:type: str
"""
self._background_validation_url = background_validation_url
@property
def receipt_page_url(self):
"""Gets the receipt_page_url of this UnassignedIntegrationSettings. # noqa: E501
:return: The receipt_page_url of this UnassignedIntegrationSettings. # noqa: E501
:rtype: str
"""
return self._receipt_page_url
@receipt_page_url.setter
def receipt_page_url(self, receipt_page_url):
"""Sets the receipt_page_url of this UnassignedIntegrationSettings.
:param receipt_page_url: The receipt_page_url of this UnassignedIntegrationSettings. # noqa: E501
:type: str
"""
self._receipt_page_url = receipt_page_url
@property
def enable_additional_field_xml_response_tag(self):
"""Gets the enable_additional_field_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:return: The enable_additional_field_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:rtype: bool
"""
return self._enable_additional_field_xml_response_tag
@enable_additional_field_xml_response_tag.setter
def enable_additional_field_xml_response_tag(self, enable_additional_field_xml_response_tag):
"""Sets the enable_additional_field_xml_response_tag of this UnassignedIntegrationSettings.
:param enable_additional_field_xml_response_tag: The enable_additional_field_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:type: bool
"""
self._enable_additional_field_xml_response_tag = enable_additional_field_xml_response_tag
@property
def enable_supports_apple_pay_xml_response_tag(self):
"""Gets the enable_supports_apple_pay_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:return: The enable_supports_apple_pay_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:rtype: bool
"""
return self._enable_supports_apple_pay_xml_response_tag
@enable_supports_apple_pay_xml_response_tag.setter
def enable_supports_apple_pay_xml_response_tag(self, enable_supports_apple_pay_xml_response_tag):
"""Sets the enable_supports_apple_pay_xml_response_tag of this UnassignedIntegrationSettings.
:param enable_supports_apple_pay_xml_response_tag: The enable_supports_apple_pay_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:type: bool
"""
self._enable_supports_apple_pay_xml_response_tag = enable_supports_apple_pay_xml_response_tag
@property
def enable_supports_google_pay_xml_response_tag(self):
"""Gets the enable_supports_google_pay_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:return: The enable_supports_google_pay_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:rtype: bool
"""
return self._enable_supports_google_pay_xml_response_tag
@enable_supports_google_pay_xml_response_tag.setter
def enable_supports_google_pay_xml_response_tag(self, enable_supports_google_pay_xml_response_tag):
"""Sets the enable_supports_google_pay_xml_response_tag of this UnassignedIntegrationSettings.
:param enable_supports_google_pay_xml_response_tag: The enable_supports_google_pay_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:type: bool
"""
self._enable_supports_google_pay_xml_response_tag = enable_supports_google_pay_xml_response_tag
@property
def enable_enable3ds_xml_response_tag(self):
"""Gets the enable_enable3ds_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:return: The enable_enable3ds_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:rtype: bool
"""
return self._enable_enable3ds_xml_response_tag
@enable_enable3ds_xml_response_tag.setter
def enable_enable3ds_xml_response_tag(self, enable_enable3ds_xml_response_tag):
"""Sets the enable_enable3ds_xml_response_tag of this UnassignedIntegrationSettings.
:param enable_enable3ds_xml_response_tag: The enable_enable3ds_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:type: bool
"""
self._enable_enable3ds_xml_response_tag = enable_enable3ds_xml_response_tag
@property
def enable_supported_cards_xml_response_tag(self):
"""Gets the enable_supported_cards_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:return: The enable_supported_cards_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:rtype: bool
"""
return self._enable_supported_cards_xml_response_tag
@enable_supported_cards_xml_response_tag.setter
def enable_supported_cards_xml_response_tag(self, enable_supported_cards_xml_response_tag):
"""Sets the enable_supported_cards_xml_response_tag of this UnassignedIntegrationSettings.
:param enable_supported_cards_xml_response_tag: The enable_supported_cards_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:type: bool
"""
self._enable_supported_cards_xml_response_tag = enable_supported_cards_xml_response_tag
| [
"[email protected]"
] | |
64a694d6c95f4ea237880b1e4abbce5a36e03343 | a8b37bd399dd0bad27d3abd386ace85a6b70ef28 | /airbyte-integrations/connectors/destination-weaviate/destination_weaviate/client.py | 3ba83b2a4a53a92af1f8413bd85c69ca41b056c9 | [
"MIT",
"LicenseRef-scancode-free-unknown",
"Elastic-2.0"
] | permissive | thomas-vl/airbyte | 5da2ba9d189ba0b202feb952cadfb550c5050871 | 258a8eb683634a9f9b7821c9a92d1b70c5389a10 | refs/heads/master | 2023-09-01T17:49:23.761569 | 2023-08-25T13:13:11 | 2023-08-25T13:13:11 | 327,604,451 | 1 | 0 | MIT | 2021-01-07T12:24:20 | 2021-01-07T12:24:19 | null | UTF-8 | Python | false | false | 6,094 | py | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import json
import logging
import time
import uuid
from dataclasses import dataclass
from typing import Any, List, Mapping, MutableMapping
import weaviate
from .utils import generate_id, parse_id_schema, parse_vectors, stream_to_class_name
@dataclass
class BufferedObject:
id: str
properties: Mapping[str, Any]
vector: List[Any]
class_name: str
class WeaviatePartialBatchError(Exception):
pass
class Client:
def __init__(self, config: Mapping[str, Any], schema: Mapping[str, str]):
self.client = self.get_weaviate_client(config)
self.config = config
self.batch_size = int(config.get("batch_size", 100))
self.schema = schema
self.vectors = parse_vectors(config.get("vectors"))
self.id_schema = parse_id_schema(config.get("id_schema"))
self.buffered_objects: MutableMapping[str, BufferedObject] = {}
self.objects_with_error: MutableMapping[str, BufferedObject] = {}
def buffered_write_operation(self, stream_name: str, record: MutableMapping):
if self.id_schema.get(stream_name, "") in record:
id_field_name = self.id_schema.get(stream_name, "")
record_id = generate_id(record.get(id_field_name))
del record[id_field_name]
else:
if "id" in record:
record_id = generate_id(record.get("id"))
del record["id"]
# Weaviate will throw an error if you try to store a field with name _id
elif "_id" in record:
record_id = generate_id(record.get("_id"))
del record["_id"]
else:
record_id = uuid.uuid4()
record_id = str(record_id)
# TODO support nested objects instead of converting to json string when weaviate supports this
for k, v in record.items():
if self.schema[stream_name].get(k, "") == "jsonify":
record[k] = json.dumps(v)
# Handling of empty list that's not part of defined schema otherwise Weaviate throws invalid string property
if isinstance(v, list) and len(v) == 0 and k not in self.schema[stream_name]:
record[k] = ""
missing_properties = set(self.schema[stream_name].keys()).difference(record.keys()).discard("id")
for prop in missing_properties or []:
record[prop] = None
additional_props = set(record.keys()).difference(self.schema[stream_name].keys())
for prop in additional_props or []:
if isinstance(record[prop], dict):
record[prop] = json.dumps(record[prop])
if isinstance(record[prop], list) and len(record[prop]) > 0 and isinstance(record[prop][0], dict):
record[prop] = json.dumps(record[prop])
# Property names in Weaviate have to start with lowercase letter
record = {k[0].lower() + k[1:]: v for k, v in record.items()}
vector = None
if stream_name in self.vectors:
vector_column_name = self.vectors.get(stream_name)
vector = record.get(vector_column_name)
del record[vector_column_name]
class_name = stream_to_class_name(stream_name)
self.client.batch.add_data_object(record, class_name, record_id, vector=vector)
self.buffered_objects[record_id] = BufferedObject(record_id, record, vector, class_name)
if self.client.batch.num_objects() >= self.batch_size:
self.flush()
def flush(self, retries: int = 3):
if len(self.objects_with_error) > 0 and retries == 0:
error_msg = f"Objects had errors and retries failed as well. Object IDs: {self.objects_with_error.keys()}"
raise WeaviatePartialBatchError(error_msg)
results = self.client.batch.create_objects()
self.objects_with_error.clear()
for result in results:
errors = result.get("result", {}).get("errors", [])
if errors:
obj_id = result.get("id")
self.objects_with_error[obj_id] = self.buffered_objects.get(obj_id)
logging.info(f"Object {obj_id} had errors: {errors}. Going to retry.")
for buffered_object in self.objects_with_error.values():
self.client.batch.add_data_object(
buffered_object.properties, buffered_object.class_name, buffered_object.id, buffered_object.vector
)
if len(self.objects_with_error) > 0 and retries > 0:
logging.info("sleeping 2 seconds before retrying batch again")
time.sleep(2)
self.flush(retries - 1)
self.buffered_objects.clear()
def delete_stream_entries(self, stream_name: str):
class_name = stream_to_class_name(stream_name)
try:
original_schema = self.client.schema.get(class_name=class_name)
self.client.schema.delete_class(class_name=class_name)
logging.info(f"Deleted class {class_name}")
self.client.schema.create_class(original_schema)
logging.info(f"Recreated class {class_name}")
except weaviate.exceptions.UnexpectedStatusCodeException as e:
if e.message.startswith("Get schema! Unexpected status code: 404"):
logging.info(f"Class {class_name} did not exist.")
else:
raise e
@staticmethod
def get_weaviate_client(config: Mapping[str, Any]) -> weaviate.Client:
url, username, password = config.get("url"), config.get("username"), config.get("password")
if username and not password:
raise Exception("Password is required when username is set")
if password and not username:
raise Exception("Username is required when password is set")
if username and password:
credentials = weaviate.auth.AuthClientPassword(username, password)
return weaviate.Client(url=url, auth_client_secret=credentials)
return weaviate.Client(url=url, timeout_config=(2, 2))
| [
"[email protected]"
] | |
3e95f067fba14f5bd1ebdb04147f9f4ed532c262 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/60d6f99657e4479ab9beda33d53f774e.py | 3a8da1104c746329450661a00dc2b7bf64a87b09 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 227 | py | #test
def hey(string):
if string.isupper():
return 'Whoa, chill out!'
elif len(string) > 0 and string[-1] == '?':
return 'Sure.'
elif len(string.strip()) == 0:
return 'Fine. Be that way!'
else:
return 'Whatever.'
| [
"[email protected]"
] | |
80f8c431d1b474ac08e8b673e9393bc4a84c4003 | fed18966525169edf96e6178e36c6fb5ab5bfe5c | /easyTools/print_zh.py | d75657157b6f737a0b168fdf91a3fe9c5d45d5e0 | [] | no_license | huashuolee/borqs_stress | cef50c37b0dc0abdfcecd4f5de90925a61e53e88 | e3375b1032ec5a0dc625dc04c4924192ffc90f26 | refs/heads/master | 2021-01-19T01:37:11.626225 | 2019-07-12T13:21:25 | 2019-07-12T13:21:25 | 11,167,198 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | import os
import sys
print sys.argv[1]
os.mkdir(sys.argv[1])
| [
"[email protected]"
] | |
bf64c9862aa6fd295ce0cc911835562fde0bac8f | 55fc41d645e2f2cb4e94eaeb01c21a8f36b522e3 | /data_processing/split_dataset.py | b6b410c63593ed0c4727101c19b45e3069e4d2bb | [] | no_license | andreiqv/pytorch_scale_classifier | 6c4515127ee9ad182242cc429326ed99984c2398 | 9448690ab0a2c5e9ec4c235ff85360be22572949 | refs/heads/master | 2020-04-04T17:34:27.169290 | 2018-11-08T09:24:35 | 2018-11-08T09:24:35 | 156,126,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,137 | py | import os
import sys
import random
if os.path.exists('.local'):
src_dir = '/w/WORK/ineru/06_scales/_dataset/copy/'
dst_dir = '/w/WORK/ineru/06_scales/_dataset/splited/'
else:
src_dir = '/home/andrei/Data/Datasets/Scales/classifier_dataset_181018/'
dst_dir = '/home/andrei/Data/Datasets/Scales/splited/'
parts = ['train', 'valid', 'test']
def copy_files_to_subdirs(src_dir, dst_dir, parts, ratio=[1,1,1]):
src_dir = src_dir.rstrip('/')
dst_dir = dst_dir.rstrip('/')
os.system('mkdir -p {}'.format(dst_dir))
for p in parts:
os.system('mkdir -p {}'.format(dst_dir + '/' + p))
subdirs = os.listdir(src_dir)
for class_name in subdirs:
subdir = src_dir + '/' + class_name
if not os.path.isdir(subdir): continue
file_names = os.listdir(subdir)
if len(file_names) == 0:
print('{0} - empty subdir'.format(class_name))
continue
# calculate train, valid and test sizes
num_files = len(file_names)
num_valid = num_files * ratio[1] // sum(ratio)
num_test = num_files * ratio[2] // sum(ratio)
num_train = num_files - num_valid - num_test
min_num_train = 0 # if 0, then do nothing
if min_num_train > 0:
if num_train < min_num_train:
(num_train, num_valid, num_test) = (num_files, 0, 0)
# SHUFFLE OR SORT
random.shuffle(file_names)
#file_names.sort()
files = dict()
files['train'] = file_names[ : num_train]
files['valid'] = file_names[num_train : num_train + num_valid]
files['test'] = file_names[num_train + num_valid : ]
print('[{}] - {} - {}:{}:{}'.\
format(class_name, num_files, num_train, num_valid, num_test))
#print('train:valid:test = ', len(files['train']),\
# len(files['valid']), len(files['test']))
for part in parts:
cmd = 'mkdir -p {}'.format(dst_dir + '/' + part + '/' + class_name)
os.system(cmd)
#print(cmd)
for file_name in files[part]:
src_path = subdir + '/' + file_name
dst_path = dst_dir + '/' + part + '/' + class_name + '/' + file_name
cmd = 'cp {} {}'.format(src_path, dst_path)
os.system(cmd)
#print(cmd)
if __name__ == '__main__':
copy_files_to_subdirs(src_dir, dst_dir, parts, ratio=[16,3,1])
| [
"[email protected]"
] | |
455d14cf9f53cdf563bf65094e78b103076f2743 | 7922714a4fd81acd2dac3875d2dd75a2bf24ef5e | /handlers/inlines/search.py | 57caa66a47de26c22dcbb842b488ae9e5bcde09f | [
"MIT"
] | permissive | hexatester/ut-telegram-bot | 32bf9a20ffaf82a5b6f1420d6bb041249ff93d6c | 20f6f063726913cb6d21e42538103e3498b929a7 | refs/heads/master | 2023-01-20T06:50:30.941786 | 2020-11-18T08:31:03 | 2020-11-18T08:31:03 | 290,542,370 | 0 | 0 | MIT | 2020-09-16T03:09:47 | 2020-08-26T16:02:02 | Python | UTF-8 | Python | false | false | 1,141 | py | from telegram import Update, InlineQuery, InlineQueryResult
from telegram.ext import CallbackContext
from typing import List
from core.utils.inline_query import article
from libs.rss.rss import Rss
from libs.search.search import Search
RSS = Rss()
SEARCH = Search()
EMPTY = article(
title="❌ Tidak ada hasil",
description="",
message_text="Pm @UniversitasTerbukaBot untuk mengakses layanan UT. 😁",
)
def search(update: Update, context: CallbackContext):
inline_query: InlineQuery = update.inline_query
query = inline_query.query
results_list: List[InlineQueryResult] = []
if len(query) > 0:
results_list.extend(SEARCH(query))
results_list.extend(RSS(query))
if not results_list:
if RSS.inline_results:
results_list.extend(RSS.inline_results)
else:
results_list.append(EMPTY)
inline_query.answer(
results_list, switch_pm_text="Bantuan", switch_pm_parameter="inline-help"
)
return -1
inline_query.answer(
results_list, switch_pm_text="Bantuan", switch_pm_parameter="inline-help"
)
return -1
| [
"[email protected]"
] | |
8f8c577a98fec3fb5d6a1d25c2d0f8350c64abb4 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2311/60829/306892.py | 9a645be64e8410c702a8a8169df2d59fed3ed6d4 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | def dl(x):
res=""
for i in range(len(x)):
if not x[len(x)-1-i]==" ":
break
res=x[0:i+1]
return res
a=[]
b=[int(x) for x in dl(input()).split(" ")]
c=[int(x) for x in dl(input()).split(" ")]
a.append(b)
a.append(c)
aa=[[[10], [8]]]
bb=["0 4 0 20 0 12 0 "]
for i in range(len(aa)):
if aa[i]==a:
a=bb[i]
print(a) | [
"[email protected]"
] | |
c7f523807f996cae2f07692c4918cebcb18a824f | b37fdefb01d7b93a4f56a7c7cc60f9f78549de4c | /DI_Bootcamp/Week_9/Day_1/Exercise_XP/film_project_root/account_app/views.py | 15d82374b6660e2d3071afe8839fff8d9102006d | [] | no_license | AchimGoral/DI_Bootcamp | e7b13d7397ab5c9e5ad8041430c8bfbafec13c88 | 9345731503e2bb298bd3a579ffad590350f13df5 | refs/heads/main | 2023-04-18T20:06:45.631067 | 2021-05-01T08:08:45 | 2021-05-01T08:08:45 | 328,769,128 | 0 | 1 | null | 2021-01-27T14:30:09 | 2021-01-11T19:24:48 | HTML | UTF-8 | Python | false | false | 2,040 | py | from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from django.contrib import messages
from .models import *
from .forms import *
def sign_up(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
form.save()
# Stay logged in after signing up
user = authenticate(username=form.cleaned_data['username'], password=form.cleaned_data['password1'],)
login(request, user)
return redirect('homepage')
else:
form = RegistrationForm()
return render(request, 'sign_up.html', {'form': form})
def login_view(request):
if request.method == "GET":
my_form = LoginForm()
return render(request, 'login.html', {'my_form': my_form})
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('homepage')
else:
messages.error(request, 'Username and/or password incorrect. Please try again')
return redirect('login')
def logout_view(request):
logout(request)
return redirect ('homepage')
def profile(request, pk):
my_profile = User.objects.get(id=pk)
return render(request, 'profile.html', {'my_profile': my_profile})
def profile_edit(request):
if request.method == "GET":
user_form = UserChange()
return render(request, 'edit_user.html', {'user_form': user_form})
if request.method == "POST":
user_form = UserChange(request.POST, instance = request.user)
if user_form.is_valid():
user_form.save()
return redirect('homepage')
else:
user_form = UserChange()
return render(request, 'edit_user.html', {'user_form': user_form}) | [
"[email protected]"
] | |
4f445597c5ac30039c0f3c3333dae8b68184c0c5 | 9c862bb7f9ac093a9bcf17d9060389dbbb8b655b | /examples/instrumentation/19_show_window_tree.py | 3f1c33ed09182131494b8863549ee7626b2aad1c | [] | no_license | fabioz/winappdbg | 24917ce29a90a08e890e8cd7d44feaad22daf0c4 | 1603870dc3fa3d2984ef23b6d77e400fb0a21b99 | refs/heads/master | 2023-08-02T07:40:10.496090 | 2020-04-22T15:18:42 | 2020-04-22T15:18:42 | 23,669,656 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,702 | py | #!~/.wine/drive_c/Python25/python.exe
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# $Id$
from winappdbg import System, HexDump
def show_window_tree( window, indent = 0 ):
# Show this window's handle and caption.
# Use some ASCII art to show the layout. :)
handle = HexDump.integer( window.get_handle() )
caption = window.get_text()
line = ""
if indent > 0:
print "| " * indent
line = "| " * (indent - 1) + "|---"
else:
print "|"
if caption is not None:
line += handle + ": " + caption
else:
line += handle
print line
# Recursively show the child windows.
for child in window.get_children():
show_window_tree( child, indent + 1 )
def main():
# Create a system snaphot.
system = System()
# Get the Desktop window.
root = system.get_desktop_window()
# Now show the window tree.
show_window_tree(root)
# You can also ge the tree as a Python dictionary:
# tree = root.get_tree()
# print tree
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
dfd2e20af52b997ca2c30f3e5abe74095b8ad76b | e5d5fa28999bcc6c642bb42dda93afd38e272b81 | /UVA/531 - Compromise/generate.py | 6dee0d625c29b89113c0412c8f3c2aec4602f471 | [] | no_license | chiahsun/problem_solving | cd3105969983d16d3d5d416d4a0d5797d4b58e91 | 559fafa92dd5516058bdcea82a438eadf5aa1ede | refs/heads/master | 2023-02-05T06:11:27.536617 | 2023-01-26T10:51:23 | 2023-01-26T10:51:23 | 30,732,382 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | import random;
import string;
random.seed(0);
def generate_seq(n_char = 100):
for _ in range(n_char):
print(random.choice(string.ascii_lowercase), end=' ');
print('\n#');
generate_seq();
generate_seq();
| [
"[email protected]"
] | |
98ecdbfc8fd401271a92625f86f3b760770d1b20 | 5066d38ad1a808fb6e849780f259c29e58495af0 | /ville.py | dc13449cdfb6980690cda49e09caa4ac2a19f1a4 | [] | no_license | Khopa/pyglet-taxi | 301b1075f695727f6b598fe9dd08b6e08455433f | 6d94ec9ff2c53a24089cd7f008d8a7bf8bfb8b72 | refs/heads/master | 2016-08-12T19:24:27.208530 | 2016-02-07T13:59:01 | 2016-02-07T13:59:01 | 51,248,853 | 1 | 0 | null | null | null | null | ISO-8859-1 | Python | false | false | 8,884 | py | # -*- coding: cp1252 -*-
import pyglet
from pyglet.gl import *
from config import *
import random
from pieton import *
from client import *
import charge_ville
import time
import primitives as prims
import display_list as disp
ville = charge_ville.MATRICE_VILLE
class Ville:
"""
Classe pour representer la Ville
(Au moyen d'une matrice de Bloc)
"""
def __init__(self, game):
"""
Constructeur
"""
self.parent = game
self.destination_possible = []
self.matrice = []
for i,ligne in enumerate(ville):
self.matrice.append([])
for j,tile in enumerate(ligne):
if tile == 12: # repertorie les destinations possibles
self.destination_possible.append([i,j])
self.matrice[i].append(Bloc(i,j, tile, self))
def draw(self, cam_pos = None):
"""
Fonction d'affichage
"""
#temps_affichage = time.time()
# Optimisation de type Frustum Culling, on n'affiche que ce qui est a 'DISTANCE_VUE' de la camera
range_min_i = int(cam_pos[0]) - DISTANCE_VUE
if range_min_i < 0 : range_min_i = 0
range_min_j = int(cam_pos[1]) - DISTANCE_VUE
if range_min_j < 0 : range_min_j = 0
range_max_i = int(cam_pos[0]) + DISTANCE_VUE
if range_max_i > len(self.matrice) + 1 : range_max_i = len(self.matrice) + 1
range_max_j = int(cam_pos[1]) + DISTANCE_VUE
if range_max_j > len(self.matrice[0]) : range_max_j = len(self.matrice) + 1
for i in range(range_min_i, range_max_i):
for j in range(range_min_j, range_max_j):
try:
self.matrice[i][j].draw(cam_pos)
except IndexError:
pass
self.gerer_pietons(cam_pos)
#print float(temps_affichage) - float(time.time())
def gerer_pietons(self, cam_pos):
"""
--> I - les pietons situé dans les blocs distants de DISTANCE_GESTION_PIETON sont supprimés
--> II - les blocs situe à DISTANCE_GESTION_PIETON-1 bloc de distance genere des pietons aléatoirement
"""
# I
range_i_max = int(cam_pos[0]) + DISTANCE_GESTION_PIETON
range_i_min = int(cam_pos[0]) - DISTANCE_GESTION_PIETON
range_j_max = int(cam_pos[1]) + DISTANCE_GESTION_PIETON
range_j_min = int(cam_pos[1]) - DISTANCE_GESTION_PIETON
# Cas ligne i
for i in range(range_i_min, range_i_max):
for j in [range_j_max, range_j_min]:
try:
self.matrice[i][j].supprimerPietons()
except:
pass
# Cas ligne j
for j in range(range_j_min+1, range_j_max-1):
for i in [range_i_max, range_i_min]:
try:
self.matrice[i][j].supprimerPietons()
except:
pass
# II
range_i_max = int(cam_pos[0]) + DISTANCE_GESTION_PIETON -1
range_i_min = int(cam_pos[0]) - DISTANCE_GESTION_PIETON +1
range_j_max = int(cam_pos[1]) + DISTANCE_GESTION_PIETON -1
range_j_min = int(cam_pos[1]) - DISTANCE_GESTION_PIETON +1
# Cas ligne i
for i in range(range_i_min, range_i_max):
for j in [range_j_max, range_j_min]:
try:
self.matrice[i][j].genererPietons()
except IndexError:
pass
# Cas ligne j
for j in range(range_j_min+1, range_j_max-1):
for i in [range_i_max, range_i_min]:
try:
self.matrice[i][j].genererPietons()
except IndexError:
pass
def getMatrice(self):
"""
Accesseur de la matrice
"""
return self.matrice
class Bloc:
"""
Un Bloc peut être un morceau de route ou un batiment, en fonction de la valeur text
Un Bloc est un element de la matrice de Ville
"""
def __init__(self, i, j, text, ville):
"""
Constructeur
"""
self.parent = ville
self.listePieton = []
self.pietonGenere = False
self.i = i
self.j = j
hauteur = random.randint(2, 15)
if text == 1:
self.mur = True
self.RANDOM_ID = random.choice([disp.ID_IMMEUBLE, disp.ID_IMMEUBLE2, disp.ID_IMMEUBLE3, disp.ID_IMMEUBLE4])
elif text in [8,9,11]:
self.mur = True
else: self.mur = False
self.p0 = (i*TAILLE_BLOC,0,j*TAILLE_BLOC)
self.p1 = (i*TAILLE_BLOC+TAILLE_BLOC,0,j*TAILLE_BLOC)
self.p2 = (i*TAILLE_BLOC+TAILLE_BLOC,0,j*TAILLE_BLOC+TAILLE_BLOC)
self.position_centrale = [self.p0[0]+TAILLE_BLOC/2,0,self.p0[2]+TAILLE_BLOC/2]
self.texture = text
self.d_fleche = 0
self.sens_fleche = 0
def supprimerPietons(self):
self.listePieton = []
self.pietonGenere = False
def genererPietons(self):
if self.texture in [2,3] and not(self.pietonGenere): # dans les parcs et les places
nb_pieton = random.randint(0, MAX_PIETON)
for i in range(nb_pieton):
pos = [(random.randint(self.p0[0], self.p1[0])), 0, random.randint(self.p0[2], self.p2[2])]
if random.randint(0,10) == 5 and not(self.parent.parent.vehicule.transporte_un_client):
self.listePieton.append(Client(self.parent.parent, pos))
else:
self.listePieton.append(Pieton(self.parent.parent, pos))
self.pietonGenere = True
def draw(self, cam_pos):
"""
Fonction d'affichage
"""
if self.texture == 0:
disp.afficher_bloc(disp.ID_DESSIN_ROUTEZ, self.i, self.j)
elif self.texture == 1:
disp.afficher_bloc(disp.ID_PAVE, self.i, self.j)
# Gestion du cas ou la camera est dans le mur, alors, on l'affiche pas
if int(self.p0[0]/TAILLE_BLOC) == int(cam_pos[0]) and int(self.p0[2]/TAILLE_BLOC) == int(cam_pos[1]):
pass
else:
disp.afficher_bloc(self.RANDOM_ID, self.i, self.j)
elif self.texture == 2:
disp.afficher_bloc(disp.ID_PAVE, self.i, self.j)
elif self.texture == 3:
disp.afficher_bloc(disp.ID_PARC, self.i, self.j)
# Cette ligne ne peut etre stockee dans la display list, car l'affichage de l'arbre est dynamique (depend de l'angle de la camera)
prims.afficherRectangle(position = self.position_centrale,\
dimensions = TAILLE_ARBRE, angleY = self.parent.parent.vehicule.angle,
texture = TEXTURE_ARBRE)
elif self.texture == 4:
disp.afficher_bloc(disp.ID_PASSAGE, self.i, self.j)
elif self.texture == 5:
disp.afficher_bloc(disp.ID_DESSIN_ROUTEX, self.i, self.j)
elif self.texture == 6:
disp.afficher_bloc(disp.ID_DESSIN_ROUTES, self.i, self.j)
elif self.texture == 7:
disp.afficher_bloc(disp.ID_PARC, self.i, self.j)
elif self.texture == 8:
disp.afficher_bloc(disp.ID_HAIE, self.i, self.j)
elif self.texture == 9:
if int(self.p0[0]/TAILLE_BLOC) == int(cam_pos[0]) and int(self.p0[2]/TAILLE_BLOC) == int(cam_pos[1]):
pass
else:
disp.afficher_bloc(disp.ID_FALAISE, self.i, self.j)
elif self.texture == 10:
disp.afficher_bloc(disp.ID_DESSIN_ROUTES, self.i, self.j)
disp.afficher_bloc(disp.ID_TUNNEL, self.i, self.j)
elif self.texture == 11:
disp.afficher_bloc(disp.ID_MAISON, self.i, self.j)
elif self.texture == 12:
disp.afficher_bloc(disp.ID_PAVE, self.i, self.j)
if self.parent.parent.vehicule.destination_client == [self.i, self.j]:
prims.afficherRectangle(position = [self.position_centrale[0],self.d_fleche, self.position_centrale[2]],\
dimensions = TAILLE_ARBRE, angleY = self.parent.parent.vehicule.angle,
texture = FLECHE2)
if self.sens_fleche == 0:
self.d_fleche += 0.1
if self.d_fleche >= 1: self.sens_fleche = 1
else:
self.d_fleche -= 0.1
if self.d_fleche <= 0: self.sens_fleche = 0
for p in self.listePieton:
p.actualiser()
p.afficher()
| [
"[email protected]"
] | |
1e4d8f144546bb2e1eeb8157dd23da79dbb06467 | 897cb969990a5ae319547fd572a262d58a1e33a8 | /JEC_Plotter/python/utilities/plot/flavor_fractions.py | a1986ca608672c79d5104191d741376aa0415eaf | [] | no_license | KIT-CMS/Excalibur | cc5a028bf6ad29a636536c3dfc0ebdc0eacfbbb7 | 8c27e2fdd7b7d5a0439f6e63be2299b16f5291c0 | refs/heads/master | 2023-07-24T05:28:08.156998 | 2023-07-17T15:29:15 | 2023-07-17T15:29:15 | 29,307,758 | 1 | 5 | null | 2023-05-24T11:41:22 | 2015-01-15T16:59:28 | Python | UTF-8 | Python | false | false | 8,593 | py | from copy import deepcopy
from ...core import (
PlotHistograms1D,
PlotHistograms1DFractions,
PlotHistograms2D,
CutSet
)
__all__ = ["plot_flavors", "plot_flavor_fractions"]
_flavor_fraction_cuts_parton_matching = dict(
u={
'cut': CutSet(name='u',
weights=["abs(matchedgenparton1flavour)==2"],
labels=[]),
'label': r"u",
'color': 'pink'
},
d={
'cut': CutSet(name='d',
weights=["abs(matchedgenparton1flavour)==1"],
labels=[]),
'label': r"d",
'color': 'darkred'
},
ud={
'cut': CutSet(name='ud',
weights=["(abs(matchedgenparton1flavour)==2||abs(matchedgenparton1flavour)==1)"],
labels=[]),
'label': r"ud",
'color': 'red'
},
s={
'cut': CutSet(name='s',
weights=["abs(matchedgenparton1flavour)==3"],
labels=[]),
'label': r"s",
'color': 'green'
},
c={
'cut': CutSet(name='c',
weights=["abs(matchedgenparton1flavour)==4"],
labels=[]),
'label': r"c",
'color': 'violet'
},
b={
'cut': CutSet(name='b',
weights=["abs(matchedgenparton1flavour)==5"],
labels=[]),
'label': r"b",
'color': 'cornflowerblue'
},
g={
'cut': CutSet(name='g',
weights=["abs(matchedgenparton1flavour)==21"],
labels=[]),
'label': r"g",
'color': 'orange'
},
undef={
'cut': CutSet(name='undef',
weights=["abs(matchedgenparton1flavour)>900"],
labels=[]),
'label': r"undef",
'color': 'lightgray'
},
)
_flavor_fraction_cuts_miniAOD = dict(
u={
'cut': CutSet(name='u',
weights=["abs(jet1flavor)==2"],
labels=[]),
'label': r"u",
'color': 'pink'
},
d={
'cut': CutSet(name='d',
weights=["abs(jet1flavor)==1"],
labels=[]),
'label': r"d",
'color': 'darkred'
},
ud={
'cut': CutSet(name='ud',
weights=["(abs(jet1flavor)==2||abs(jet1flavor)==1)"],
labels=[]),
'label': r"ud",
'color': 'red'
},
s={
'cut': CutSet(name='s',
weights=["abs(jet1flavor)==3"],
labels=[]),
'label': r"s",
'color': 'green'
},
c={
'cut': CutSet(name='c',
weights=["abs(jet1flavor)==4"],
labels=[]),
'label': r"c",
'color': 'violet'
},
b={
'cut': CutSet(name='b',
weights=["abs(jet1flavor)==5"],
labels=[]),
'label': r"b",
'color': 'cornflowerblue'
},
g={
'cut': CutSet(name='g',
weights=["abs(jet1flavor)==21"],
labels=[]),
'label': r"g",
'color': 'orange'
},
undef={
'cut': CutSet(name='undef',
weights=["abs(jet1flavor)==0"],
labels=[]),
'label': r"undef",
'color': 'lightgray'
},
)
def _get_flavor_cuts_colors_labels(flavors, flavor_definition="miniAOD"):
"""return flavor cuts for a particular flavor definition"""
if flavor_definition == 'miniAOD':
_flavor_fraction_cuts = _flavor_fraction_cuts_miniAOD
elif flavor_definition == 'parton matching':
_flavor_fraction_cuts = _flavor_fraction_cuts_parton_matching
else:
print ("ERROR: Unknown flavor definition '{}': "
"expected one of {}".format(flavor_definition,
set(['miniAOD', 'parton matching'])))
_unknown_flavors = (set(flavors) - set(_flavor_fraction_cuts.keys()))
if _unknown_flavors:
raise ValueError(
"Unknown flavors: {}! Available: {}".format(
_unknown_flavors, set(_flavor_fraction_cuts.keys())
)
)
_cuts = []
_colors = []
_labels = []
for _flavor in flavors:
_ac = _flavor_fraction_cuts[_flavor]
_cuts.append(_ac['cut'])
_colors.append(_ac['color'])
_labels.append(_ac['label'])
return _cuts, _colors, _labels
def plot_flavors(sample,
jec_correction_string,
quantities_or_quantity_pairs,
selection_cuts,
www_folder_label,
flavors_to_include=('ud', 's', 'c', 'b', 'g', 'undef'),
flavor_definition='miniAOD',
force_n_bins=None,
stacked=False,
y_log=False):
"""Plot contributions from various jet flavors."""
_cuts, _colors, _labels = _get_flavor_cuts_colors_labels(flavors_to_include, flavor_definition=flavor_definition)
_qs = []
_qpairs = []
for _q_or_qp in quantities_or_quantity_pairs:
if isinstance(_q_or_qp, tuple) or isinstance(_q_or_qp, tuple):
assert len(_q_or_qp) == 2
_qpairs.append(_q_or_qp)
else:
_qs.append(_q_or_qp)
# color each histogram by flavor
_samples = []
for _color, _label in zip(_colors, _labels):
_samples.append(deepcopy(sample))
_samples[-1]['source_label'] = _label
_samples[-1]['color'] = _color
_ph = None
if _qs:
_ph = PlotHistograms1D(
basename="flavors_{}".format(www_folder_label),
# there is one subplot per sample and cut in each plot
samples=_samples,
jec_correction_string=jec_correction_string,
additional_cuts=_cuts,
# each quantity cut generates a different plot
quantities=_qs,
# each selection cut generates a new plot
selection_cuts=selection_cuts,
stacked=stacked,
)
if force_n_bins is not None:
for _plot in _ph._plots:
_plot._basic_dict['x_bins'] = ",".join([str(force_n_bins)] + _plot._basic_dict['x_bins'].split(",")[1:])
_ph_log = deepcopy(_ph)
if y_log:
for _plot in _ph_log._plots:
_plot._basic_dict['y_log'] = True
_ph2 = None
if _qpairs:
_ph2 = PlotProfiles(
basename="flavors_{}".format(www_folder_label),
# there is one subplot per sample and cut in each plot
samples=_samples,
jec_correction_string=jec_correction_string,
additional_cuts=_cuts,
# each quantity cut generates a different plot
quantity_pairs=_qpairs,
# each selection cut generates a new plot
selection_cuts=selection_cuts,
# show_ratio_to_first=True,
)
for _plot2D in _ph2._plots:
if _plot2D._qy in ('jet1pt_over_jet1ptraw',):
_plot2D._basic_dict['lines'] = ['1.0'] # guide to the eye
if _ph is not None:
_ph.make_plots()
if _ph2 is not None:
_ph2.make_plots()
def plot_flavor_fractions(
sample,
jec_correction_string,
quantities,
selection_cuts,
www_folder_label,
flavors_to_include=('ud', 's', 'c', 'b', 'g', 'undef'),
flavor_definition='miniAOD',
force_n_bins=None):
"""Plot flavor composition as a fraction of total. Always stacked."""
_cuts, _colors, _labels = _get_flavor_cuts_colors_labels(flavors_to_include, flavor_definition=flavor_definition)
_ph = PlotHistograms1DFractions(
basename="flavor_fractions_{}".format(www_folder_label),
# there is one subplot per sample and cut in each plot
jec_correction_string=jec_correction_string,
reference_cut_set=None,
sample=sample,
fraction_cut_sets=_cuts,
fraction_colors=_colors,
fraction_labels=_labels,
# each quantity cut generates a different plot
quantities=quantities,
# each selection cut generates a new plot
selection_cuts=selection_cuts,
y_label="Fraction of Total Events"
)
for _plot in _ph._plots:
if force_n_bins is not None:
_plot._basic_dict['x_bins'] = ",".join([str(force_n_bins)] + _plot._basic_dict['x_bins'].split(",")[1:])
return _ph
| [
"[email protected]"
] | |
d1eac20d6ecb9450cba8f91e1a7e1d4e1e5741a0 | a8933adda6b90ca158096009165bf27b74a2733d | /auroracallback/index.py | 8e612d31c3268553e12c1b19be4ad251306e88d6 | [] | no_license | knighton/aurora-callback | 6c40db9c271b782ca8c14119b8937e3656980a36 | 26efc9069fcd5d48ae55bca3b06e3adf3927164e | refs/heads/master | 2020-12-18T11:53:16.516590 | 2020-01-21T15:05:41 | 2020-01-21T15:05:41 | 235,369,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | _INDEX = """
<!DOCTYPE HTML>
<head>
<style type="text/css">
html, body, #image {
width: 100%;
height: 100%;
}
body {
background: radial-gradient(
circle at center,
#000 0%,
#002 50%,
#004 65%,
#408 75%,
#824 85%,
#f40 90%,
#fb0 95%,
white 100%
);
}
</style>
</head>
<body>
<img id="image" src="/aurora.png"></img>
</body>
</html>
"""
def get_index():
return _INDEX
| [
"[email protected]"
] | |
06240216f9210c8e6d145968274d7682c2efaa25 | 5364927a0f594958ef226cd8b42120e96a970beb | /detectors/countauditor.py | 2ba6d830b4429d9f01dfd0aa9dab54dc2415fc0b | [] | no_license | psf/bpo-tracker-cpython | 883dd13f557179ee2f16e38d4f38e53c7f257a4a | 1a94f0977ca025d2baf45ef712ef87f394a59b25 | refs/heads/master | 2023-06-11T23:59:46.300683 | 2023-04-25T12:18:00 | 2023-04-25T12:18:00 | 276,213,165 | 24 | 10 | null | 2023-04-11T14:16:30 | 2020-06-30T21:32:40 | Python | UTF-8 | Python | false | false | 507 | py |
def count_nosy_msg(db, cl, nodeid, newvalues):
''' Update the counts of messages and nosy users on issue edit'''
if 'nosy' in newvalues:
newvalues['nosy_count'] = len(set(newvalues['nosy']))
if 'messages' in newvalues:
newvalues['message_count'] = len(set(newvalues['messages']))
def init(db):
# Should run after the creator and auto-assignee are added
db.issue.audit('create', count_nosy_msg, priority=120)
db.issue.audit('set', count_nosy_msg, priority=120)
| [
"devnull@localhost"
] | devnull@localhost |
00678ab8ff79facecf814370e31c6cd5fe27add6 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Flask/Book_evaluator/venv/Lib/encodings/latin_1.py | dc74012c5ec50ada8637c3b65596d11567dc8a16 | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:b75503e532a27c636477396c855209ff5f3036536d2a4bede0a576c89382b60c
size 1264
| [
"[email protected]"
] | |
d9ed45e757f36c4737c4f53b459548e973a94c38 | 042b3e6553dbd61b204bdbad25e05aaeba79dde8 | /tests/ope/test_fqe.py | 2a871634bfc6235fdfc70ba63e851fba1934a267 | [
"MIT"
] | permissive | jkbjh/d3rlpy | 822e51e1c5b4ef37795aa2be089ff5a7ff18af07 | 43f0ba7e420aba077d85c897a38207f0b3ca6d17 | refs/heads/master | 2023-03-20T06:36:55.424681 | 2021-03-17T14:17:40 | 2021-03-17T14:17:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,859 | py | import pytest
import numpy as np
from unittest.mock import Mock
from d3rlpy.ope.fqe import FQE, DiscreteFQE
from d3rlpy.algos import DDPG, DQN
from tests.base_test import base_tester
from tests.algos.algo_test import algo_update_tester
from tests.algos.algo_test import DummyImpl
def ope_tester(ope, observation_shape, action_size=2):
# dummy impl object
impl = DummyImpl(observation_shape, action_size)
base_tester(ope, impl, observation_shape, action_size)
ope._algo.impl = impl
ope.impl = impl
# check save policy
impl.save_policy = Mock()
ope.save_policy("policy.pt", False)
impl.save_policy.assert_called_with("policy.pt", False)
# check predict
x = np.random.random((2, 3)).tolist()
ref_y = np.random.random((2, action_size)).tolist()
impl.predict_best_action = Mock(return_value=ref_y)
y = ope.predict(x)
assert y == ref_y
impl.predict_best_action.assert_called_with(x)
# check predict_value
action = np.random.random((2, action_size)).tolist()
ref_value = np.random.random((2, 3)).tolist()
impl.predict_value = Mock(return_value=ref_value)
value = ope.predict_value(x, action)
assert value == ref_value
impl.predict_value.assert_called_with(x, action, False)
# check sample_action
impl.sample_action = Mock(return_value=ref_y)
try:
y = ope.sample_action(x)
assert y == ref_y
impl.sample_action.assert_called_with(x)
except NotImplementedError:
pass
ope.impl = None
ope._algo.impl = None
@pytest.mark.parametrize("observation_shape", [(100,), (4, 84, 84)])
@pytest.mark.parametrize("action_size", [2])
@pytest.mark.parametrize("q_func_factory", ["mean", "qr", "iqn", "fqf"])
@pytest.mark.parametrize("scaler", [None, "min_max"])
@pytest.mark.parametrize("action_scaler", [None, "min_max"])
def test_fqe(
observation_shape, action_size, q_func_factory, scaler, action_scaler
):
algo = DDPG()
fqe = FQE(
algo=algo,
scaler=scaler,
action_scaler=action_scaler,
q_func_factory=q_func_factory,
)
ope_tester(fqe, observation_shape)
algo.create_impl(observation_shape, action_size)
algo_update_tester(fqe, observation_shape, action_size, discrete=False)
@pytest.mark.parametrize("observation_shape", [(100,), (4, 84, 84)])
@pytest.mark.parametrize("action_size", [2])
@pytest.mark.parametrize("q_func_factory", ["mean", "qr", "iqn", "fqf"])
@pytest.mark.parametrize("scaler", [None, "standard"])
def test_discrete_fqe(observation_shape, action_size, q_func_factory, scaler):
algo = DQN()
fqe = DiscreteFQE(algo=algo, scaler=scaler, q_func_factory=q_func_factory)
ope_tester(fqe, observation_shape)
algo.create_impl(observation_shape, action_size)
algo_update_tester(fqe, observation_shape, action_size, discrete=True)
| [
"[email protected]"
] | |
1f45f423f9b9c7a6771aa411b46fc92b4c8473ea | c4520d8327124e78a892ef5a75a38669f8cd7d92 | /venv/bin/pip3.6 | 5de7730cde95e1872365e26e4f9afc03673e919d | [] | no_license | arsh9806/GW2019PA1 | 81d62d3d33cfe3bd9e23aff909dd529b91c17035 | c3d12aed77d2810117ce741c48208edc2b6a1f34 | refs/heads/master | 2020-05-31T09:18:13.112929 | 2019-06-04T06:51:12 | 2019-06-04T06:51:12 | 190,209,074 | 2 | 0 | null | 2019-06-04T13:38:46 | 2019-06-04T13:38:46 | null | UTF-8 | Python | false | false | 412 | 6 | #!/Users/ishantkumar/PycharmProjects/GW2019PA1/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3.6'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3.6')()
)
| [
"[email protected]"
] | |
05a8f71fc5e7b421ee098845806cc55f6460df06 | 9e204a5b1c5ff4ea3b115ff0559b5af803ab4d15 | /086 Scramble String.py | 24fb5940b4e91bad75604cd71f6ca376a0c51d99 | [
"MIT"
] | permissive | Aminaba123/LeetCode | 178ed1be0733cc7390f30e676eb47cc7f900c5b2 | cbbd4a67ab342ada2421e13f82d660b1d47d4d20 | refs/heads/master | 2020-04-20T10:40:00.424279 | 2019-01-31T08:13:58 | 2019-01-31T08:13:58 | 168,795,374 | 1 | 0 | MIT | 2019-02-02T04:50:31 | 2019-02-02T04:50:30 | null | UTF-8 | Python | false | false | 2,347 | py | """
Given a string s1, we may represent it as a binary tree by partitioning it to two non-empty substrings recursively.
Below is one possible representation of s1 = "great":
great
/ \
gr eat
/ \ / \
g r e at
/ \
a t
To scramble the string, we may choose any non-leaf node and swap its two children.
For example, if we choose the node "gr" and swap its two children, it produces a scrambled string "rgeat".
rgeat
/ \
rg eat
/ \ / \
r g e at
/ \
a t
We say that "rgeat" is a scrambled string of "great".
Similarly, if we continue to swap the children of nodes "eat" and "at", it produces a scrambled string "rgtae".
rgtae
/ \
rg tae
/ \ / \
r g ta e
/ \
t a
We say that "rgtae" is a scrambled string of "great".
Given two strings s1 and s2 of the same length, determine if s2 is a scrambled string of s1.
"""
__author__ = 'Danyang'
class Solution:
def isScramble(self, s1, s2):
"""
dfs
partition and compare
Compare two trees constructed from the two strings respectively. Two trees are scramble of the other iff A's
left/right subtree is the scramble of B's left/right subtree or A's left/right subtree is the scramble of B's
right/left subtree.
.....|... vs. .....|... or
...|..... vs. .....|...
:param s1:
:param s2:
:return: boolean
"""
if len(s1)!=len(s2):
return False
chars = [0 for _ in xrange(26)]
for char in s1:
chars[ord(char)-ord('a')] += 1
for char in s2:
chars[ord(char)-ord('a')] -= 1
# if filter(lambda x: x!=0, chars):
# return False
for val in chars:
if val!=0:
return False
if len(s1)==1:
return True
for i in xrange(1, len(s1)):
if self.isScramble(s1[:i], s2[:i]) and self.isScramble(s1[i:], s2[i:]) or \
self.isScramble(s1[:i], s2[-i:]) and self.isScramble(s1[i:], s2[:len(s2)-i]):
return True
return False
if __name__=="__main__":
assert Solution().isScramble("abc", "bca")==True
| [
"[email protected]"
] | |
c37b5e7c091393c55c01af84e23f3f883de3ea13 | 7ae9081aff882476ad0caa687ca41796e2035f85 | /planout/apps/accounts/migrations/0005_auto_20150301_1811.py | 176b7b2a6ed4e66694227f5ede41151cde8c9ee6 | [] | no_license | siolag161/planout | eb6b8720dfe0334d379c1040d607bb459a8e695a | f967db9636618906345132d006c2f9a597025a0f | refs/heads/master | 2020-04-14T13:17:26.011810 | 2015-03-21T11:53:48 | 2015-03-21T11:53:48 | 32,376,449 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import core.fields
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_basicuser_description'),
]
operations = [
migrations.AddField(
model_name='basicuser',
name='modified',
field=core.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False),
preserve_default=True,
),
migrations.AlterField(
model_name='basicuser',
name='date_joined',
field=core.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='date joined', editable=False),
preserve_default=True,
),
]
| [
"[email protected]"
] | |
0ceccfc0f20161b467e5f633c3340f79cb489e0b | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/kubernetesconfiguration/v20210301/source_control_configuration.py | dde54f5999b02095632c8983dd8935df94af9835 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,142 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['SourceControlConfigurationArgs', 'SourceControlConfiguration']
@pulumi.input_type
class SourceControlConfigurationArgs:
def __init__(__self__, *,
cluster_name: pulumi.Input[str],
cluster_resource_name: pulumi.Input[str],
cluster_rp: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
configuration_protected_settings: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
enable_helm_operator: Optional[pulumi.Input[bool]] = None,
helm_operator_properties: Optional[pulumi.Input['HelmOperatorPropertiesArgs']] = None,
operator_instance_name: Optional[pulumi.Input[str]] = None,
operator_namespace: Optional[pulumi.Input[str]] = None,
operator_params: Optional[pulumi.Input[str]] = None,
operator_scope: Optional[pulumi.Input[Union[str, 'OperatorScopeType']]] = None,
operator_type: Optional[pulumi.Input[Union[str, 'OperatorType']]] = None,
repository_url: Optional[pulumi.Input[str]] = None,
source_control_configuration_name: Optional[pulumi.Input[str]] = None,
ssh_known_hosts_contents: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a SourceControlConfiguration resource.
:param pulumi.Input[str] cluster_name: The name of the kubernetes cluster.
:param pulumi.Input[str] cluster_resource_name: The Kubernetes cluster resource name - either managedClusters (for AKS clusters) or connectedClusters (for OnPrem K8S clusters).
:param pulumi.Input[str] cluster_rp: The Kubernetes cluster RP - either Microsoft.ContainerService (for AKS clusters) or Microsoft.Kubernetes (for OnPrem K8S clusters).
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] configuration_protected_settings: Name-value pairs of protected configuration settings for the configuration
:param pulumi.Input[bool] enable_helm_operator: Option to enable Helm Operator for this git configuration.
:param pulumi.Input['HelmOperatorPropertiesArgs'] helm_operator_properties: Properties for Helm operator.
:param pulumi.Input[str] operator_instance_name: Instance name of the operator - identifying the specific configuration.
:param pulumi.Input[str] operator_namespace: The namespace to which this operator is installed to. Maximum of 253 lower case alphanumeric characters, hyphen and period only.
:param pulumi.Input[str] operator_params: Any Parameters for the Operator instance in string format.
:param pulumi.Input[Union[str, 'OperatorScopeType']] operator_scope: Scope at which the operator will be installed.
:param pulumi.Input[Union[str, 'OperatorType']] operator_type: Type of the operator
:param pulumi.Input[str] repository_url: Url of the SourceControl Repository.
:param pulumi.Input[str] source_control_configuration_name: Name of the Source Control Configuration.
:param pulumi.Input[str] ssh_known_hosts_contents: Base64-encoded known_hosts contents containing public SSH keys required to access private Git instances
"""
pulumi.set(__self__, "cluster_name", cluster_name)
pulumi.set(__self__, "cluster_resource_name", cluster_resource_name)
pulumi.set(__self__, "cluster_rp", cluster_rp)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if configuration_protected_settings is not None:
pulumi.set(__self__, "configuration_protected_settings", configuration_protected_settings)
if enable_helm_operator is not None:
pulumi.set(__self__, "enable_helm_operator", enable_helm_operator)
if helm_operator_properties is not None:
pulumi.set(__self__, "helm_operator_properties", helm_operator_properties)
if operator_instance_name is not None:
pulumi.set(__self__, "operator_instance_name", operator_instance_name)
if operator_namespace is None:
operator_namespace = 'default'
if operator_namespace is not None:
pulumi.set(__self__, "operator_namespace", operator_namespace)
if operator_params is not None:
pulumi.set(__self__, "operator_params", operator_params)
if operator_scope is not None:
pulumi.set(__self__, "operator_scope", operator_scope)
if operator_type is not None:
pulumi.set(__self__, "operator_type", operator_type)
if repository_url is not None:
pulumi.set(__self__, "repository_url", repository_url)
if source_control_configuration_name is not None:
pulumi.set(__self__, "source_control_configuration_name", source_control_configuration_name)
if ssh_known_hosts_contents is not None:
pulumi.set(__self__, "ssh_known_hosts_contents", ssh_known_hosts_contents)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> pulumi.Input[str]:
"""
The name of the kubernetes cluster.
"""
return pulumi.get(self, "cluster_name")
@cluster_name.setter
def cluster_name(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster_name", value)
@property
@pulumi.getter(name="clusterResourceName")
def cluster_resource_name(self) -> pulumi.Input[str]:
"""
The Kubernetes cluster resource name - either managedClusters (for AKS clusters) or connectedClusters (for OnPrem K8S clusters).
"""
return pulumi.get(self, "cluster_resource_name")
@cluster_resource_name.setter
def cluster_resource_name(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster_resource_name", value)
@property
@pulumi.getter(name="clusterRp")
def cluster_rp(self) -> pulumi.Input[str]:
"""
The Kubernetes cluster RP - either Microsoft.ContainerService (for AKS clusters) or Microsoft.Kubernetes (for OnPrem K8S clusters).
"""
return pulumi.get(self, "cluster_rp")
@cluster_rp.setter
def cluster_rp(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster_rp", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="configurationProtectedSettings")
def configuration_protected_settings(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Name-value pairs of protected configuration settings for the configuration
"""
return pulumi.get(self, "configuration_protected_settings")
@configuration_protected_settings.setter
def configuration_protected_settings(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "configuration_protected_settings", value)
@property
@pulumi.getter(name="enableHelmOperator")
def enable_helm_operator(self) -> Optional[pulumi.Input[bool]]:
"""
Option to enable Helm Operator for this git configuration.
"""
return pulumi.get(self, "enable_helm_operator")
@enable_helm_operator.setter
def enable_helm_operator(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_helm_operator", value)
@property
@pulumi.getter(name="helmOperatorProperties")
def helm_operator_properties(self) -> Optional[pulumi.Input['HelmOperatorPropertiesArgs']]:
"""
Properties for Helm operator.
"""
return pulumi.get(self, "helm_operator_properties")
@helm_operator_properties.setter
def helm_operator_properties(self, value: Optional[pulumi.Input['HelmOperatorPropertiesArgs']]):
pulumi.set(self, "helm_operator_properties", value)
@property
@pulumi.getter(name="operatorInstanceName")
def operator_instance_name(self) -> Optional[pulumi.Input[str]]:
"""
Instance name of the operator - identifying the specific configuration.
"""
return pulumi.get(self, "operator_instance_name")
@operator_instance_name.setter
def operator_instance_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operator_instance_name", value)
@property
@pulumi.getter(name="operatorNamespace")
def operator_namespace(self) -> Optional[pulumi.Input[str]]:
"""
The namespace to which this operator is installed to. Maximum of 253 lower case alphanumeric characters, hyphen and period only.
"""
return pulumi.get(self, "operator_namespace")
@operator_namespace.setter
def operator_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operator_namespace", value)
@property
@pulumi.getter(name="operatorParams")
def operator_params(self) -> Optional[pulumi.Input[str]]:
"""
Any Parameters for the Operator instance in string format.
"""
return pulumi.get(self, "operator_params")
@operator_params.setter
def operator_params(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operator_params", value)
@property
@pulumi.getter(name="operatorScope")
def operator_scope(self) -> Optional[pulumi.Input[Union[str, 'OperatorScopeType']]]:
"""
Scope at which the operator will be installed.
"""
return pulumi.get(self, "operator_scope")
@operator_scope.setter
def operator_scope(self, value: Optional[pulumi.Input[Union[str, 'OperatorScopeType']]]):
pulumi.set(self, "operator_scope", value)
@property
@pulumi.getter(name="operatorType")
def operator_type(self) -> Optional[pulumi.Input[Union[str, 'OperatorType']]]:
"""
Type of the operator
"""
return pulumi.get(self, "operator_type")
@operator_type.setter
def operator_type(self, value: Optional[pulumi.Input[Union[str, 'OperatorType']]]):
pulumi.set(self, "operator_type", value)
@property
@pulumi.getter(name="repositoryUrl")
def repository_url(self) -> Optional[pulumi.Input[str]]:
"""
Url of the SourceControl Repository.
"""
return pulumi.get(self, "repository_url")
@repository_url.setter
def repository_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repository_url", value)
@property
@pulumi.getter(name="sourceControlConfigurationName")
def source_control_configuration_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Source Control Configuration.
"""
return pulumi.get(self, "source_control_configuration_name")
@source_control_configuration_name.setter
def source_control_configuration_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_control_configuration_name", value)
@property
@pulumi.getter(name="sshKnownHostsContents")
def ssh_known_hosts_contents(self) -> Optional[pulumi.Input[str]]:
"""
Base64-encoded known_hosts contents containing public SSH keys required to access private Git instances
"""
return pulumi.get(self, "ssh_known_hosts_contents")
@ssh_known_hosts_contents.setter
def ssh_known_hosts_contents(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ssh_known_hosts_contents", value)
class SourceControlConfiguration(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
cluster_resource_name: Optional[pulumi.Input[str]] = None,
cluster_rp: Optional[pulumi.Input[str]] = None,
configuration_protected_settings: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
enable_helm_operator: Optional[pulumi.Input[bool]] = None,
helm_operator_properties: Optional[pulumi.Input[pulumi.InputType['HelmOperatorPropertiesArgs']]] = None,
operator_instance_name: Optional[pulumi.Input[str]] = None,
operator_namespace: Optional[pulumi.Input[str]] = None,
operator_params: Optional[pulumi.Input[str]] = None,
operator_scope: Optional[pulumi.Input[Union[str, 'OperatorScopeType']]] = None,
operator_type: Optional[pulumi.Input[Union[str, 'OperatorType']]] = None,
repository_url: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
source_control_configuration_name: Optional[pulumi.Input[str]] = None,
ssh_known_hosts_contents: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
The SourceControl Configuration object returned in Get & Put response.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cluster_name: The name of the kubernetes cluster.
:param pulumi.Input[str] cluster_resource_name: The Kubernetes cluster resource name - either managedClusters (for AKS clusters) or connectedClusters (for OnPrem K8S clusters).
:param pulumi.Input[str] cluster_rp: The Kubernetes cluster RP - either Microsoft.ContainerService (for AKS clusters) or Microsoft.Kubernetes (for OnPrem K8S clusters).
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] configuration_protected_settings: Name-value pairs of protected configuration settings for the configuration
:param pulumi.Input[bool] enable_helm_operator: Option to enable Helm Operator for this git configuration.
:param pulumi.Input[pulumi.InputType['HelmOperatorPropertiesArgs']] helm_operator_properties: Properties for Helm operator.
:param pulumi.Input[str] operator_instance_name: Instance name of the operator - identifying the specific configuration.
:param pulumi.Input[str] operator_namespace: The namespace to which this operator is installed to. Maximum of 253 lower case alphanumeric characters, hyphen and period only.
:param pulumi.Input[str] operator_params: Any Parameters for the Operator instance in string format.
:param pulumi.Input[Union[str, 'OperatorScopeType']] operator_scope: Scope at which the operator will be installed.
:param pulumi.Input[Union[str, 'OperatorType']] operator_type: Type of the operator
:param pulumi.Input[str] repository_url: Url of the SourceControl Repository.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] source_control_configuration_name: Name of the Source Control Configuration.
:param pulumi.Input[str] ssh_known_hosts_contents: Base64-encoded known_hosts contents containing public SSH keys required to access private Git instances
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SourceControlConfigurationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The SourceControl Configuration object returned in Get & Put response.
:param str resource_name: The name of the resource.
:param SourceControlConfigurationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SourceControlConfigurationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
cluster_resource_name: Optional[pulumi.Input[str]] = None,
cluster_rp: Optional[pulumi.Input[str]] = None,
configuration_protected_settings: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
enable_helm_operator: Optional[pulumi.Input[bool]] = None,
helm_operator_properties: Optional[pulumi.Input[pulumi.InputType['HelmOperatorPropertiesArgs']]] = None,
operator_instance_name: Optional[pulumi.Input[str]] = None,
operator_namespace: Optional[pulumi.Input[str]] = None,
operator_params: Optional[pulumi.Input[str]] = None,
operator_scope: Optional[pulumi.Input[Union[str, 'OperatorScopeType']]] = None,
operator_type: Optional[pulumi.Input[Union[str, 'OperatorType']]] = None,
repository_url: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
source_control_configuration_name: Optional[pulumi.Input[str]] = None,
ssh_known_hosts_contents: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SourceControlConfigurationArgs.__new__(SourceControlConfigurationArgs)
if cluster_name is None and not opts.urn:
raise TypeError("Missing required property 'cluster_name'")
__props__.__dict__["cluster_name"] = cluster_name
if cluster_resource_name is None and not opts.urn:
raise TypeError("Missing required property 'cluster_resource_name'")
__props__.__dict__["cluster_resource_name"] = cluster_resource_name
if cluster_rp is None and not opts.urn:
raise TypeError("Missing required property 'cluster_rp'")
__props__.__dict__["cluster_rp"] = cluster_rp
__props__.__dict__["configuration_protected_settings"] = configuration_protected_settings
__props__.__dict__["enable_helm_operator"] = enable_helm_operator
__props__.__dict__["helm_operator_properties"] = helm_operator_properties
__props__.__dict__["operator_instance_name"] = operator_instance_name
if operator_namespace is None:
operator_namespace = 'default'
__props__.__dict__["operator_namespace"] = operator_namespace
__props__.__dict__["operator_params"] = operator_params
__props__.__dict__["operator_scope"] = operator_scope
__props__.__dict__["operator_type"] = operator_type
__props__.__dict__["repository_url"] = repository_url
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["source_control_configuration_name"] = source_control_configuration_name
__props__.__dict__["ssh_known_hosts_contents"] = ssh_known_hosts_contents
__props__.__dict__["compliance_status"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["repository_public_key"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:kubernetesconfiguration:SourceControlConfiguration"), pulumi.Alias(type_="azure-native:kubernetesconfiguration/v20191101preview:SourceControlConfiguration"), pulumi.Alias(type_="azure-native:kubernetesconfiguration/v20200701preview:SourceControlConfiguration"), pulumi.Alias(type_="azure-native:kubernetesconfiguration/v20201001preview:SourceControlConfiguration"), pulumi.Alias(type_="azure-native:kubernetesconfiguration/v20210501preview:SourceControlConfiguration"), pulumi.Alias(type_="azure-native:kubernetesconfiguration/v20211101preview:SourceControlConfiguration")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SourceControlConfiguration, __self__).__init__(
'azure-native:kubernetesconfiguration/v20210301:SourceControlConfiguration',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SourceControlConfiguration':
"""
Get an existing SourceControlConfiguration resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SourceControlConfigurationArgs.__new__(SourceControlConfigurationArgs)
__props__.__dict__["compliance_status"] = None
__props__.__dict__["configuration_protected_settings"] = None
__props__.__dict__["enable_helm_operator"] = None
__props__.__dict__["helm_operator_properties"] = None
__props__.__dict__["name"] = None
__props__.__dict__["operator_instance_name"] = None
__props__.__dict__["operator_namespace"] = None
__props__.__dict__["operator_params"] = None
__props__.__dict__["operator_scope"] = None
__props__.__dict__["operator_type"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["repository_public_key"] = None
__props__.__dict__["repository_url"] = None
__props__.__dict__["ssh_known_hosts_contents"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
return SourceControlConfiguration(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="complianceStatus")
def compliance_status(self) -> pulumi.Output['outputs.ComplianceStatusResponse']:
"""
Compliance Status of the Configuration
"""
return pulumi.get(self, "compliance_status")
@property
@pulumi.getter(name="configurationProtectedSettings")
def configuration_protected_settings(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Name-value pairs of protected configuration settings for the configuration
"""
return pulumi.get(self, "configuration_protected_settings")
@property
@pulumi.getter(name="enableHelmOperator")
def enable_helm_operator(self) -> pulumi.Output[Optional[bool]]:
"""
Option to enable Helm Operator for this git configuration.
"""
return pulumi.get(self, "enable_helm_operator")
@property
@pulumi.getter(name="helmOperatorProperties")
def helm_operator_properties(self) -> pulumi.Output[Optional['outputs.HelmOperatorPropertiesResponse']]:
"""
Properties for Helm operator.
"""
return pulumi.get(self, "helm_operator_properties")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="operatorInstanceName")
def operator_instance_name(self) -> pulumi.Output[Optional[str]]:
"""
Instance name of the operator - identifying the specific configuration.
"""
return pulumi.get(self, "operator_instance_name")
@property
@pulumi.getter(name="operatorNamespace")
def operator_namespace(self) -> pulumi.Output[Optional[str]]:
"""
The namespace to which this operator is installed to. Maximum of 253 lower case alphanumeric characters, hyphen and period only.
"""
return pulumi.get(self, "operator_namespace")
@property
@pulumi.getter(name="operatorParams")
def operator_params(self) -> pulumi.Output[Optional[str]]:
"""
Any Parameters for the Operator instance in string format.
"""
return pulumi.get(self, "operator_params")
@property
@pulumi.getter(name="operatorScope")
def operator_scope(self) -> pulumi.Output[Optional[str]]:
"""
Scope at which the operator will be installed.
"""
return pulumi.get(self, "operator_scope")
@property
@pulumi.getter(name="operatorType")
def operator_type(self) -> pulumi.Output[Optional[str]]:
"""
Type of the operator
"""
return pulumi.get(self, "operator_type")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the resource provider.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="repositoryPublicKey")
def repository_public_key(self) -> pulumi.Output[str]:
"""
Public Key associated with this SourceControl configuration (either generated within the cluster or provided by the user).
"""
return pulumi.get(self, "repository_public_key")
@property
@pulumi.getter(name="repositoryUrl")
def repository_url(self) -> pulumi.Output[Optional[str]]:
"""
Url of the SourceControl Repository.
"""
return pulumi.get(self, "repository_url")
@property
@pulumi.getter(name="sshKnownHostsContents")
def ssh_known_hosts_contents(self) -> pulumi.Output[Optional[str]]:
"""
Base64-encoded known_hosts contents containing public SSH keys required to access private Git instances
"""
return pulumi.get(self, "ssh_known_hosts_contents")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Top level metadata https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/common-api-contracts.md#system-metadata-for-all-azure-resources
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
| [
"[email protected]"
] | |
deec298358d4449942d8f95f300d77c1da85a33b | 1a3d6caf89e5b51a33627458ae7c0bbb00efdc1d | /src/gluonts/torch/model/deep_npts/__init__.py | e664774be4903e7274f0dcb979a150dd03d6169c | [
"Apache-2.0"
] | permissive | zoolhasson/gluon-ts | e9ff8e4ead4d040d9f8fa8e9db5f07473cb396ed | 3dfc0af66b68e3971032a6bd0f75cd216988acd6 | refs/heads/master | 2023-01-25T01:52:57.126499 | 2023-01-13T17:50:38 | 2023-01-13T17:50:38 | 241,743,126 | 0 | 1 | Apache-2.0 | 2020-08-06T16:53:11 | 2020-02-19T22:45:54 | Python | UTF-8 | Python | false | false | 911 | py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from ._estimator import DeepNPTSEstimator
from ._network import (
DeepNPTSNetwork,
DeepNPTSMultiStepPredictor,
DeepNPTSNetworkDiscrete,
DeepNPTSNetworkSmooth,
)
__all__ = [
"DeepNPTSEstimator",
"DeepNPTSNetwork",
"DeepNPTSMultiStepPredictor",
"DeepNPTSNetworkDiscrete",
"DeepNPTSNetworkSmooth",
]
| [
"[email protected]"
] | |
32ed9575258f7991c5a3e8769bf12f728676802c | dcc193058602f3cdd5ad9ab1cf8ae24d5ffbae28 | /king_phisher/job.py | ede8906855f3f766dab2c8194a79a268602183ea | [
"BSD-3-Clause"
] | permissive | udibott/king-phisher | 0ce6dd7636476fcd7c4e2d16fee58a6f910390cb | a61998daa70d07db6da9c23bac54032c5561c20e | refs/heads/master | 2021-01-16T00:31:34.477399 | 2015-02-19T21:12:28 | 2015-02-19T21:12:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,103 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/job.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import datetime
import logging
import threading
import time
import uuid
__version__ = '0.1'
__all__ = ['JobManager', 'JobRequestDelete']
def normalize_job_id(job_id):
"""
Convert a value to a job id.
:param job_id: Value to convert.
:type job_id: int, str
:return: The job id.
:rtype: :py:class:`uuid.UUID`
"""
if not isinstance(job_id, uuid.UUID):
job_id = uuid.UUID(job_id)
return job_id
class JobRequestDelete(object):
"""
An instance of this class can be returned by a job callback to request
that the job be deleted and not executed again.
"""
pass
class JobRun(threading.Thread):
def __init__(self, callback, args):
super(JobRun, self).__init__()
self.daemon = False
self.callback = callback
self.callback_args = args
self.request_delete = False
self.exception = None
self.reaped = False
def run(self):
try:
result = self.callback(*self.callback_args)
if isinstance(result, JobRequestDelete):
self.request_delete = True
except Exception as error:
self.exception = error
return
# Job Dictionary Details:
# last_run: datetime.datetime
# run_every: datetime.timedelta
# job: None or JobRun instance
# callback: function
# parameters: list of parameters to be passed to the callback function
# enabled: boolean if false do not run the job
# tolerate_exceptions: boolean if true this job will run again after a failure
# run_count: number of times the job has been ran
# expiration: number of times to run a job, datetime.timedelta instance or None
class JobManager(object):
"""
This class provides a threaded job manager for periodically executing
arbitrary functions in an asynchronous fashion.
"""
def __init__(self, use_utc=True):
"""
:param bool use_utc: Whether or not to use UTC time internally.
"""
self._thread = threading.Thread(target=self._run)
self._thread.daemon = True
self._jobs = {}
self._thread_running = threading.Event()
self._thread_shutdown = threading.Event()
self._thread_shutdown.set()
self._job_lock = threading.RLock()
self.use_utc = use_utc
self.logger = logging.getLogger(self.__class__.__name__)
def _job_execute(self, job_id):
self._job_lock.acquire()
job_desc = self._jobs[job_id]
job_desc['last_run'] = self.now()
job_desc['run_count'] += 1
self.logger.debug('executing job with id: ' + str(job_id) + ' and callback function: ' + job_desc['callback'].__name__)
job_desc['job'] = JobRun(job_desc['callback'], job_desc['parameters'])
job_desc['job'].start()
self._job_lock.release()
def _run(self):
self.logger.info('the job manager has been started')
self._thread_running.set()
self._thread_shutdown.clear()
self._job_lock.acquire()
while self._thread_running.is_set():
self._job_lock.release()
time.sleep(1)
self._job_lock.acquire()
if not self._thread_running.is_set():
break
# reap jobs
jobs_for_removal = set()
for job_id, job_desc in self._jobs.items():
job_obj = job_desc['job']
if job_obj.is_alive() or job_obj.reaped:
continue
if job_obj.exception != None:
if job_desc['tolerate_exceptions'] == False:
self.logger.error('job ' + str(job_id) + ' encountered an error and is not set to tolerate exceptions')
jobs_for_removal.add(job_id)
else:
self.logger.warning('job ' + str(job_id) + ' encountered exception: ' + job_obj.exception.__class__.__name__)
if isinstance(job_desc['expiration'], int):
if job_desc['expiration'] <= 0:
jobs_for_removal.add(job_id)
else:
job_desc['expiration'] -= 1
elif isinstance(job_desc['expiration'], datetime.datetime):
if self.now_is_after(job_desc['expiration']):
jobs_for_removal.add(job_id)
if job_obj.request_delete:
jobs_for_removal.add(job_id)
job_obj.reaped = True
for job_id in jobs_for_removal:
self.job_delete(job_id)
# sow jobs
for job_id, job_desc in self._jobs.items():
if job_desc['last_run'] != None and self.now_is_before(job_desc['last_run'] + job_desc['run_every']):
continue
if job_desc['job'].is_alive():
continue
if not job_desc['job'].reaped:
continue
if not job_desc['enabled']:
continue
self._job_execute(job_id)
self._job_lock.release()
self._thread_shutdown.set()
def now(self):
"""
Return a :py:class:`datetime.datetime` instance representing the current time.
:rtype: :py:class:`datetime.datetime`
"""
if self.use_utc:
return datetime.datetime.utcnow()
else:
return datetime.datetime.now()
def now_is_after(self, dt):
"""
Check whether the datetime instance described in dt is after the
current time.
:param dt: Value to compare.
:type dt: :py:class:`datetime.datetime`
:rtype: bool
"""
return bool(dt <= self.now())
def now_is_before(self, dt):
"""
Check whether the datetime instance described in dt is before the
current time.
:param dt: Value to compare.
:type dt: :py:class:`datetime.datetime`
:rtype: bool
"""
return bool(dt >= self.now())
def start(self):
"""
Start the JobManager thread.
"""
if self._thread_running.is_set():
raise RuntimeError('the JobManager has already been started')
return self._thread.start()
def stop(self):
"""
Stop the JobManager thread.
"""
self.logger.debug('stopping the job manager')
self._thread_running.clear()
self._thread_shutdown.wait()
self._job_lock.acquire()
self.logger.debug('waiting on ' + str(len(self._jobs)) + ' job threads')
for job_desc in self._jobs.values():
if job_desc['job'] == None:
continue
if not job_desc['job'].is_alive():
continue
job_desc['job'].join()
self._thread.join()
self._job_lock.release()
self.logger.info('the job manager has been stopped')
return
def job_run(self, callback, parameters=None):
"""
Add a job and run it once immediately.
:param function callback: The function to run asynchronously.
:param parameters: The parameters to be provided to the callback.
:type parameters: list, tuple
:return: The job id.
:rtype: :py:class:`uuid.UUID`
"""
if not self._thread_running.is_set():
raise RuntimeError('the JobManager is not running')
parameters = (parameters or ())
if not isinstance(parameters, (list, tuple)):
parameters = (parameters,)
job_desc = {}
job_desc['job'] = JobRun(callback, parameters)
job_desc['last_run'] = None
job_desc['run_every'] = datetime.timedelta(0, 1)
job_desc['callback'] = callback
job_desc['parameters'] = parameters
job_desc['enabled'] = True
job_desc['tolerate_exceptions'] = False
job_desc['run_count'] = 0
job_desc['expiration'] = 0
job_id = uuid.uuid4()
self.logger.info('adding new job with id: ' + str(job_id) + ' and callback function: ' + callback.__name__)
with self._job_lock:
self._jobs[job_id] = job_desc
self._job_execute(job_id)
return job_id
def job_add(self, callback, parameters=None, hours=0, minutes=0, seconds=0, tolerate_exceptions=True, expiration=None):
"""
Add a job to the job manager.
:param function callback: The function to run asynchronously.
:param parameters: The parameters to be provided to the callback.
:type parameters: list, tuple
:param int hours: Number of hours to sleep between running the callback.
:param int minutes: Number of minutes to sleep between running the callback.
:param int seconds: Number of seconds to sleep between running the callback.
:param bool tolerate_execptions: Whether to continue running a job after it has thrown an exception.
:param expiration: When to expire and remove the job. If an integer
is provided, the job will be executed that many times. If a
datetime or timedelta instance is provided, then the job will
be removed after the specified time.
:type expiration: int, :py:class:`datetime.timedelta`, :py:class:`datetime.datetime`
:return: The job id.
:rtype: :py:class:`uuid.UUID`
"""
if not self._thread_running.is_set():
raise RuntimeError('the JobManager is not running')
parameters = (parameters or ())
if not isinstance(parameters, (list, tuple)):
parameters = (parameters,)
job_desc = {}
job_desc['job'] = JobRun(callback, parameters)
job_desc['last_run'] = None
job_desc['run_every'] = datetime.timedelta(0, ((hours * 60 * 60) + (minutes * 60) + seconds))
job_desc['callback'] = callback
job_desc['parameters'] = parameters
job_desc['enabled'] = True
job_desc['tolerate_exceptions'] = tolerate_exceptions
job_desc['run_count'] = 0
if isinstance(expiration, int):
job_desc['expiration'] = expiration
elif isinstance(expiration, datetime.timedelta):
job_desc['expiration'] = self.now() + expiration
elif isinstance(expiration, datetime.datetime):
job_desc['expiration'] = expiration
else:
job_desc['expiration'] = None
job_id = uuid.uuid4()
self.logger.info('adding new job with id: ' + str(job_id) + ' and callback function: ' + callback.__name__)
with self._job_lock:
self._jobs[job_id] = job_desc
return job_id
def job_count(self):
"""
Return the number of jobs.
:return: The number of jobs.
:rtype: int
"""
return len(self._jobs)
def job_count_enabled(self):
"""
Return the number of enabled jobs.
:return: The number of jobs that are enabled.
:rtype: int
"""
enabled = 0
for job_desc in self._jobs.values():
if job_desc['enabled']:
enabled += 1
return enabled
def job_enable(self, job_id):
"""
Enable a job.
:param job_id: Job identifier to enable.
:type job_id: :py:class:`uuid.UUID`
"""
job_id = normalize_job_id(job_id)
with self._job_lock:
job_desc = self._jobs[job_id]
job_desc['enabled'] = True
def job_disable(self, job_id):
"""
Disable a job. Disabled jobs will not be executed.
:param job_id: Job identifier to disable.
:type job_id: :py:class:`uuid.UUID`
"""
job_id = normalize_job_id(job_id)
with self._job_lock:
job_desc = self._jobs[job_id]
job_desc['enabled'] = False
def job_delete(self, job_id, wait=True):
"""
Delete a job.
:param job_id: Job identifier to delete.
:type job_id: :py:class:`uuid.UUID`
:param bool wait: If the job is currently running, wait for it to complete before deleting it.
"""
job_id = normalize_job_id(job_id)
self.logger.info('deleting job with id: ' + str(job_id) + ' and callback function: ' + self._jobs[job_id]['callback'].__name__)
job_desc = self._jobs[job_id]
with self._job_lock:
job_desc['enabled'] = False
if wait and self.job_is_running(job_id):
job_desc['job'].join()
del self._jobs[job_id]
def job_exists(self, job_id):
"""
Check if a job identifier exists.
:param job_id: Job identifier to check.
:type job_id: :py:class:`uuid.UUID`
:rtype: bool
"""
job_id = normalize_job_id(job_id)
return job_id in self._jobs
def job_is_enabled(self, job_id):
"""
Check if a job is enabled.
:param job_id: Job identifier to check the status of.
:type job_id: :py:class:`uuid.UUID`
:rtype: bool
"""
job_id = normalize_job_id(job_id)
job_desc = self._jobs[job_id]
return job_desc['enabled']
def job_is_running(self, job_id):
"""
Check if a job is currently running. False is returned if the job does
not exist.
:param job_id: Job identifier to check the status of.
:type job_id: :py:class:`uuid.UUID`
:rtype: bool
"""
job_id = normalize_job_id(job_id)
if not job_id in self._jobs:
return False
job_desc = self._jobs[job_id]
if job_desc['job']:
return job_desc['job'].is_alive()
return False
| [
"[email protected]"
] | |
9f827b5cd072b3c5a7b8abb08cbeb1c57976822f | b3ac12dfbb8fa74500b406a0907337011d4aac72 | /goldcoin/cmds/units.py | f39f52b9ed6ece8e4515e68efda51a35c69354ac | [
"Apache-2.0"
] | permissive | chia-os/goldcoin-blockchain | ab62add5396b7734c11d3c37c41776994489d5e7 | 5c294688dbbe995ae1d4422803f6fcf3e1cc6077 | refs/heads/main | 2023-08-11T23:58:53.617051 | 2021-09-12T15:33:26 | 2021-09-12T15:33:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | from typing import Dict
# The rest of the codebase uses mojos everywhere.
# Only use these units for user facing interfaces.
units: Dict[str, int] = {
"goldcoin": 10 ** 12, # 1 goldcoin (ozt) is 1,000,000,000,000 mojo (1 trillion)
"mojo:": 1,
"colouredcoin": 10 ** 3, # 1 coloured coin is 1000 colouredcoin mojos
}
| [
"[email protected]"
] | |
605166acc000057f4f8e1a72739b30cd9d77d644 | 17fe4529fd2772b7d046f039bde140768634d028 | /misc/samples/unittest_sample_fixture.py | ec183aa51203926248509bf02996e096d24dc86e | [] | no_license | namesuqi/tapir | b9c21f30bf781eec314f0ae4f57c232f167e4734 | a5d4e9bb45d8cbf7e41d42d9006b43b753f3ecf1 | refs/heads/master | 2020-03-07T04:16:45.213561 | 2018-03-29T08:34:46 | 2018-03-29T08:34:46 | 127,261,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,112 | py | # coding=utf-8
# author: zengyuetian
import unittest
def setUpModule():
print("setUpModule >>>")
def tearDownModule():
print("tearDownModule >>>")
class Test1(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("setUpClass for Test1 >>")
@classmethod
def tearDownClass(cls):
print("tearDownClass for Test1 >>")
def setUp(self):
print("setUp for Test1 >")
def tearDown(self):
print("tearDown for Test1 >")
def testCase1(self):
print("testCase1 for Test1")
def testCase2(self):
print("testCase2 for Test1")
class Test2(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("setUpClass for Test2 >>")
@classmethod
def tearDownClass(cls):
print("tearDownClass for Test2 >>")
def setUp(self):
print("setUp for Test2 >")
def tearDown(self):
print("tearDown for Test2 >")
def testCase1(self):
print("testCase1 for Test2")
def testCase2(self):
print("testCase2 for Test2")
if __name__ == "__main__":
unittest.main() | [
"[email protected]"
] | |
79d06d973f4350530acd4a498fc14d7d9edb3e00 | 124b35ccbae76ba33b9044071a056b9109752283 | /Understanding_Concepts/viz/IntegratedGradientsTF/integrated_gradients_tf.py | d6198201ac70bf6560adfe7d8e5fd6aa4984b345 | [] | no_license | anilmaddu/Daily-Neural-Network-Practice-2 | 94bc78fe4a5a429f5ba911bae5f231f3d8246f61 | 748de55c1a17eae9f65d7ea08d6b2b3fc156b212 | refs/heads/master | 2023-03-08T22:04:45.535964 | 2019-03-15T23:10:35 | 2019-03-15T23:10:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,654 | py | #################################################################
# Implementation of Integrated Gradients function in Tensorflow #
# Naozumi Hiranuma ([email protected]) #
#################################################################
import tensorflow as tf
import numpy as np
# INPUT: tensor of samples to explain
# OUTPUT: interpolated: linearly interpolated samples between input samples and references.
# stepsize: stepsizes between samples and references
# reference: a placeholder tensor for optionally specifying reference values.
def linear_inpterpolation(sample, num_steps=50):
# Constrtuct reference values if not available.
reference = tf.placeholder_with_default(tf.zeros_like(sample), shape=sample.get_shape())
# Expand sample and reference
sample_ = tf.stack([sample for _ in range(num_steps)])
reference_ = tf.stack([reference for _ in range(num_steps)])
# Get difference between sample and reference
dif = sample_ - reference_
stepsize = tf.divide(dif, num_steps)
# Get multipliers
multiplier = tf.divide(tf.stack([tf.ones_like(sample)*i for i in range(num_steps)]), num_steps)
interploated_dif = tf.multiply(dif, multiplier)
# Get parameters for reshaping
_shape = [-1] + [int(s) for s in sample.get_shape()[1:]]
perm = [1, 0]+[i for i in range(2,len(sample_.get_shape()))]
# Reshape
interploated = tf.reshape(reference_ + interploated_dif, shape=_shape)
stepsize = tf.reshape(stepsize, shape=_shape)
return interploated, stepsize, reference
# INPUT: samples: linearly interpolated samples between input samples and references. output of linear_interpolation()
# stepsizse: output of linear_interpolation()
# _output: output tensor to be explained. It needs to be connected to samples.
# OUTPUT: explanations: A list of tensors with explanation values.
def build_ig(samples, stepsizes, _output, num_steps=50):
grads = tf.gradients(ys=_output, xs=samples)
flag = False
if not isinstance(samples, list):
samples = [samples]
stepsizes = [stepsizes]
flag=True
# Estimate riemann sum
output = []
for i in range(len(samples)):
s = stepsizes[i]
g = grads[i]
riemann = tf.multiply(s, g)
riemann = tf.reshape(riemann, shape=[num_steps,-1]+[int(s) for s in s.get_shape()[1:]])
explanation = tf.reduce_sum(riemann, axis=0)
output.append(explanation)
# Return the values.
if flag:
return output[0]
else:
return output
# -- end code -- | [
"[email protected]"
] | |
e5f8dd86564f6f2ac9a03aeef761b298c102eb92 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /gH3QMvF3czMDjENkk_9.py | 19552a338cba87d2d304d1d2bbfb9850243e1af0 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | """
Create a function that takes a list and string. The function should remove the
letters in the string from the list, and return the list.
### Examples
remove_letters(["s", "t", "r", "i", "n", "g", "w"], "string") ➞ ["w"]
remove_letters(["b", "b", "l", "l", "g", "n", "o", "a", "w"], "balloon") ➞ ["b", "g", "w"]
remove_letters(["d", "b", "t", "e", "a", "i"], "edabit") ➞ []
### Notes
* If number of times a letter appears in the list is greater than the number of times the letter appears in the string, the extra letters should be left behind (see example #2).
* If all the letters in the list are used in the string, the function should return an empty list (see example #3).
"""
def remove_letters(letters, word):
l = letters
for i in word:
if i in l:
l.remove(i)
return l
| [
"[email protected]"
] | |
4728042cbeb6201f811568fdf61bb553ebc3dfae | bd19334c4698932a708afce4bcc208c7d9a3616b | /Q41.py | 8f99ea1ed698ca838b80d8808108f180dd96c636 | [] | no_license | Timothy-py/100-PythonChallenges | b9607cfc5fd27992321d6638a046f2f335f6e05d | f64a1b923a555268f4db38af04dcd354885aa231 | refs/heads/master | 2023-05-31T09:48:31.130970 | 2023-05-23T22:45:04 | 2023-05-23T22:45:04 | 208,028,848 | 2 | 1 | null | 2023-05-23T22:39:06 | 2019-09-12T10:50:43 | Python | UTF-8 | Python | false | false | 82 | py | # Pleas raise a RuntimeError exception.
raise RuntimeError("something is fishy")
| [
"[email protected]"
] | |
26f18c303e12dd1ea296568f3185d5b1df7582fe | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/sample/op_cmp_int-106.py | 89011902bfe43fdb6bd7bee90efed2d33564d626 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | x:int = 42
y:int = 7
print(x == y)
print(x != y)
print(x < y)
print(x <= y)
print(x > y)
print(x >= y)
$Var(x == x)
print(x != x)
print(x < x)
print(x <= x)
print(x > x)
print(x >= x)
| [
"[email protected]"
] | |
cc9411b7251704073d70f510559e49b20473e415 | 4e30d990963870478ed248567e432795f519e1cc | /tests/models/validators/v3_1_patch_1/jsd_df4fb303a3e5661ba12058f18b225af.py | f31472450dc722d87e16a1a2c2c919e92e4c5463 | [
"MIT"
] | permissive | CiscoISE/ciscoisesdk | 84074a57bf1042a735e3fc6eb7876555150d2b51 | f468c54998ec1ad85435ea28988922f0573bfee8 | refs/heads/main | 2023-09-04T23:56:32.232035 | 2023-08-25T17:31:49 | 2023-08-25T17:31:49 | 365,359,531 | 48 | 9 | MIT | 2023-08-25T17:31:51 | 2021-05-07T21:43:52 | Python | UTF-8 | Python | false | false | 8,158 | py | # -*- coding: utf-8 -*-
"""Identity Services Engine getNetworkAccessConditions data model.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import json
from builtins import *
import fastjsonschema
from ciscoisesdk.exceptions import MalformedRequest
class JSONSchemaValidatorDf4Fb303A3E5661Ba12058F18B225Af(object):
"""getNetworkAccessConditions request schema definition."""
def __init__(self):
super(JSONSchemaValidatorDf4Fb303A3E5661Ba12058F18B225Af, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"response": {
"items": {
"properties": {
"attributeName": {
"type": "string"
},
"attributeValue": {
"type": "string"
},
"children": {
"items": {
"properties": {
"conditionType": {
"enum": [
"ConditionAndBlock",
"ConditionAttributes",
"ConditionOrBlock",
"ConditionReference",
"LibraryConditionAndBlock",
"LibraryConditionAttributes",
"LibraryConditionOrBlock",
"TimeAndDateCondition"
],
"type": "string"
},
"isNegate": {
"type": "boolean"
},
"link": {
"properties": {
"href": {
"type": "string"
},
"rel": {
"enum": [
"next",
"previous",
"self",
"status"
],
"type": "string"
},
"type": {
"type": "string"
}
},
"type": "object"
}
},
"type": "object"
},
"type": "array"
},
"conditionType": {
"enum": [
"ConditionAndBlock",
"ConditionAttributes",
"ConditionOrBlock",
"ConditionReference",
"LibraryConditionAndBlock",
"LibraryConditionAttributes",
"LibraryConditionOrBlock",
"TimeAndDateCondition"
],
"type": "string"
},
"datesRange": {
"properties": {
"endDate": {
"type": "string"
},
"startDate": {
"type": "string"
}
},
"type": "object"
},
"datesRangeException": {
"properties": {
"endDate": {
"type": "string"
},
"startDate": {
"type": "string"
}
},
"type": "object"
},
"description":
{
"type": "string"
},
"dictionaryName": {
"type": "string"
},
"dictionaryValue": {
"type": "string"
},
"hoursRange": {
"properties": {
"endTime": {
"type": "string"
},
"startTime": {
"type": "string"
}
},
"type": "object"
},
"hoursRangeException": {
"properties": {
"endTime": {
"type": "string"
},
"startTime": {
"type": "string"
}
},
"type": "object"
},
"id": {
"type": "string"
},
"isNegate": {
"type": "boolean"
},
"link": {
"properties": {
"href": {
"type": "string"
},
"rel": {
"enum": [
"next",
"previous",
"self",
"status"
],
"type": "string"
},
"type": {
"type": "string"
}
},
"type": "object"
},
"name": {
"type": "string"
},
"operator": {
"enum": [
"contains",
"endsWith",
"equals",
"greaterOrEquals",
"greaterThan",
"in",
"ipEquals",
"ipGreaterThan",
"ipLessThan",
"ipNotEquals",
"lessOrEquals",
"lessThan",
"matches",
"notContains",
"notEndsWith",
"notEquals",
"notIn",
"notStartsWith",
"startsWith"
],
"type": "string"
},
"weekDays": {
"items": {
"enum": [
"Friday",
"Monday",
"Saturday",
"Sunday",
"Thursday",
"Tuesday",
"Wednesday"
],
"type": "string"
},
"type": "array"
},
"weekDaysException": {
"items": {
"enum": [
"Friday",
"Monday",
"Saturday",
"Sunday",
"Thursday",
"Tuesday",
"Wednesday"
],
"type": "string"
},
"type": "array"
}
},
"type": "object"
},
"type": "array"
},
"version": {
"type": "string"
}
},
"required": [
"response",
"version"
],
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| [
"[email protected]"
] | |
df9e1ed2fbda2454efd1784b026e5a2e8aa25d2a | 66d339399671f9520e88d79b7118b6670f6a40a2 | /CheckWeb/Checkapp/apps.py | 4ee45b7130ebc5a7e7d5ea707111249ce199c855 | [
"MIT"
] | permissive | Tarpelite/OJ_research | 038ba1b3a5d8add01642cddd45b59722144ac110 | 5c23591a50e755dac800dfaedb561290ce35fc5b | refs/heads/master | 2020-06-07T10:52:17.059468 | 2019-06-21T10:47:56 | 2019-06-21T10:47:56 | 193,003,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | from django.apps import AppConfig
class CheckappConfig(AppConfig):
name = 'Checkapp'
| [
"[email protected]"
] | |
6076c919a7fc64e1832cdfff14fd936313f6f605 | 3fd7adb56bf78d2a5c71a216d0ac8bc53485b034 | /tensorflow_data/position_ctrl_action5r3_rel/conf.py | 968527a0c3070d794fdb27d5931531bfada19c90 | [] | no_license | anair13/lsdc | 6d1675e493f183f467cab0bfe9b79a4f70231e4e | 7760636bea24ca0231b4f99e3b5e8290c89b9ff5 | refs/heads/master | 2021-01-19T08:02:15.613362 | 2017-05-12T17:13:54 | 2017-05-12T17:13:54 | 87,596,344 | 0 | 0 | null | 2017-04-08T00:18:55 | 2017-04-08T00:18:55 | null | UTF-8 | Python | false | false | 1,872 | py | import os
current_dir = os.path.dirname(os.path.realpath(__file__))
# tf record data location:
DATA_DIR = '/'.join(str.split(current_dir, '/')[:-2]) + '/pushing_data/position_control_a5r3rel/train'
# local output directory
OUT_DIR = current_dir + '/modeldata'
from video_prediction.prediction_model_downsized_lesslayer import construct_model
configuration = {
'experiment_name': 'position_rel',
'data_dir': DATA_DIR, # 'directory containing data.' ,
'output_dir': OUT_DIR, #'directory for model checkpoints.' ,
'current_dir': current_dir, #'directory for writing summary.' ,
'num_iterations': 50000, #'number of training iterations.' ,
'pretrained_model': '', # 'filepath of a pretrained model to resume training from.' ,
'sequence_length': 15, # 'sequence length, including context frames.' ,
'skip_frame': 1, # 'use ever i-th frame to increase prediction horizon' ,
'context_frames': 2, # of frames before predictions.' ,
'use_state': 1, #'Whether or not to give the state+action to the model' ,
'model': 'DNA', #'model architecture to use - CDNA, DNA, or STP' ,
'num_masks': 1, # 'number of masks, usually 1 for DNA, 10 for CDNA, STN.' ,
'schedsamp_k': 900.0, # 'The k hyperparameter for scheduled sampling -1 for no scheduled sampling.' ,
'train_val_split': 0.95, #'The percentage of files to use for the training set vs. the validation set.' ,
'batch_size': 32, #'batch size for training' ,
'learning_rate': 0.001, #'the base learning rate of the generator' ,
'visualize': '', #'load model from which to generate visualizations
'downsize': construct_model, #'create downsized model'
'file_visual': '', # datafile used for making visualizations
'penal_last_only': False # penalize only the last state, to get sharper predictions
}
| [
"[email protected]"
] | |
75ccc26a4c4472390ed15c91ff1250d21f8742ba | 9bb521d515a2401b69df797efed11b04e04401a7 | /tests/runtests-herd.py | 6b8ab527d581912293ea513b8d1152d11ea11811 | [
"BSD-3-Clause"
] | permissive | risent/django-redis | be512f1bf6c51b8e238e2fa8b1eec5073c03916e | 46bfd076c197846035e3f31348748d464ace74d0 | refs/heads/master | 2021-01-14T14:10:59.664982 | 2015-06-11T20:15:28 | 2015-06-11T20:15:28 | 37,450,021 | 0 | 0 | null | 2015-06-15T07:25:20 | 2015-06-15T07:25:20 | null | UTF-8 | Python | false | false | 311 | py | # -*- coding: utf-8 -*-
import os, sys
sys.path.insert(0, '..')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_sqlite_herd")
if __name__ == "__main__":
from django.core.management import execute_from_command_line
args = sys.argv
args.insert(1, "test")
execute_from_command_line(args)
| [
"[email protected]"
] | |
98a5ba2fce68657fdaed702892ee3ed449bf727e | 3e862ce90e7f17c1f1c586aad20bda6c4fc6cbd4 | /home/management/commands/load_initial_data.py | 19443015cfb4110723cc564ebfbfb35c06d46937 | [] | no_license | crowdbotics-users/kailashacrowdboticscom-kai-638 | 621fc891f449a843e0334f4443462f78d1a1d5b6 | e3753824bbd240c64eeadde9671438cc77a8dc0b | refs/heads/master | 2020-04-09T19:42:14.674638 | 2018-12-05T17:00:39 | 2018-12-05T17:00:39 | 160,551,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py |
from django.core.management import BaseCommand
from home.models import CustomText, HomePage
def load_initial_data():
homepage_body = """
<h1 class="display-4 text-center">image-to-text-converter-211</h1>
<p class="lead">
This is the sample application created and deployed from the crowdbotics slack app. You can
view list of packages selected for this application below
</p>"""
customtext_title = 'image-to-text-converter-211'
CustomText.objects.create(title=customtext_title)
HomePage.objects.create(body=homepage_body)
class Command(BaseCommand):
can_import_settings = True
help = 'Load initial data to db'
def handle(self, *args, **options):
load_initial_data()
| [
"[email protected]"
] | |
bc898e40424cb1cafb5b4b23ba444477869ae983 | 5c1531b47fb4dc4d7e5998d44f7200bf1786b12b | /__UNSORTED/130_surrounded_regions/surrounded_regions_TLE.py | 3c923ea0a7709fbedcb124df62b7253ab7f96642 | [] | no_license | Web-Dev-Collaborative/Leetcode-JS-PY-MD | d1f560051aad1896a80eccdd4b4fbb389e7033e3 | 675b94fa5da8d40f0ea79efe6d3ef1393221425f | refs/heads/master | 2023-09-01T22:30:32.313793 | 2021-10-26T02:17:03 | 2021-10-26T02:17:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,019 | py | class Solution:
# @param {character[][]} board
# @return {void} Do not return anything, modify board in-place instead.
def solve(self, board):
if not board:
return
lx = len(board)
ly = len(board[0])
for x in range(lx):
for y in range(ly):
if board[x][y] == "O":
self.area = []
if self.explore(board, x, y):
for xx, yy in self.area:
board[xx][yy] = "X"
def explore(self, board, x, y):
if board[x][y] != "O":
return True
if x == 0 or x == len(board) - 1 or y == 0 or y == len(board[0]) - 1:
return False
if (x, y) in self.area:
return True
self.area.append((x, y))
return (
self.explore(board, x, y + 1)
and self.explore(board, x + 1, y)
and self.explore(board, x - 1, y)
and self.explore(board, x, y - 1)
)
| [
"[email protected]"
] | |
8e204756e205394482650c812c5b994b021ff48c | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2007.2/programming/libs/geoip/actions.py | 8ea47d4a7aaa461b4099e2a3b64026df8fc2b019 | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2006,2007 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
WorkDir = "GeoIP-%s" % get.srcVERSION()
def setup():
autotools.configure("--enable-shared \
--disable-static")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("AUTHORS", "ChangeLog", "COPYING", "NEWS", "README", "TODO")
| [
"[email protected]"
] | |
564d007f30314f626af2a6f9ebbfe6aa75131c69 | 1c4a19c0d1953280f79193f30ad8c4759e3aff58 | /ansys/dpf/core/operators/math/conjugate.py | ddeae28fea91fca0f6c68e3c561790131c01251f | [
"MIT"
] | permissive | hoangxuyenle/DPF-Core | d02c843b678560f12715ea90dc8c9764b3bffc99 | a404dd290c7b3ee75463b2487cafb8bf48468691 | refs/heads/master | 2023-06-15T15:27:02.597938 | 2021-06-22T15:19:04 | 2021-06-22T15:19:04 | 381,611,135 | 0 | 0 | MIT | 2021-06-30T07:18:30 | 2021-06-30T07:18:30 | null | UTF-8 | Python | false | false | 4,859 | py | """
conjugate
=========
"""
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.outputs import Output, _Outputs, _modify_output_spec_with_one_type
from ansys.dpf.core.operators.specification import PinSpecification, Specification
"""Operators from Ans.Dpf.Native plugin, from "math" category
"""
class conjugate(Operator):
"""Computes element-wise conjugate of field containers containing complex fields.
available inputs:
- fields_container (FieldsContainer)
available outputs:
- fields_container (FieldsContainer)
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.math.conjugate()
>>> # Make input connections
>>> my_fields_container = dpf.FieldsContainer()
>>> op.inputs.fields_container.connect(my_fields_container)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.math.conjugate(fields_container=my_fields_container)
>>> # Get output data
>>> result_fields_container = op.outputs.fields_container()"""
def __init__(self, fields_container=None, config=None, server=None):
super().__init__(name="conjugate", config = config, server = server)
self._inputs = InputsConjugate(self)
self._outputs = OutputsConjugate(self)
if fields_container !=None:
self.inputs.fields_container.connect(fields_container)
@staticmethod
def _spec():
spec = Specification(description="""Computes element-wise conjugate of field containers containing complex fields.""",
map_input_pin_spec={
0 : PinSpecification(name = "fields_container", type_names=["fields_container"], optional=False, document="""""")},
map_output_pin_spec={
0 : PinSpecification(name = "fields_container", type_names=["fields_container"], optional=False, document="""""")})
return spec
@staticmethod
def default_config():
return Operator.default_config(name = "conjugate")
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsConjugate
"""
return super().inputs
@property
def outputs(self):
"""Enables to get outputs of the operator by evaluationg it
Returns
--------
outputs : OutputsConjugate
"""
return super().outputs
#internal name: conjugate
#scripting name: conjugate
class InputsConjugate(_Inputs):
"""Intermediate class used to connect user inputs to conjugate operator
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.conjugate()
>>> my_fields_container = dpf.FieldsContainer()
>>> op.inputs.fields_container.connect(my_fields_container)
"""
def __init__(self, op: Operator):
super().__init__(conjugate._spec().inputs, op)
self._fields_container = Input(conjugate._spec().input_pin(0), 0, op, -1)
self._inputs.append(self._fields_container)
@property
def fields_container(self):
"""Allows to connect fields_container input to the operator
Parameters
----------
my_fields_container : FieldsContainer,
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.conjugate()
>>> op.inputs.fields_container.connect(my_fields_container)
>>> #or
>>> op.inputs.fields_container(my_fields_container)
"""
return self._fields_container
class OutputsConjugate(_Outputs):
"""Intermediate class used to get outputs from conjugate operator
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.conjugate()
>>> # Connect inputs : op.inputs. ...
>>> result_fields_container = op.outputs.fields_container()
"""
def __init__(self, op: Operator):
super().__init__(conjugate._spec().outputs, op)
self._fields_container = Output(conjugate._spec().output_pin(0), 0, op)
self._outputs.append(self._fields_container)
@property
def fields_container(self):
"""Allows to get fields_container output of the operator
Returns
----------
my_fields_container : FieldsContainer,
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.conjugate()
>>> # Connect inputs : op.inputs. ...
>>> result_fields_container = op.outputs.fields_container()
"""
return self._fields_container
| [
"[email protected]"
] | |
76223c165e5e9ac07147392a1c676096c926a704 | 9f1039075cc611198a988034429afed6ec6d7408 | /tensorflow-stubs/python/debug/cli/ui_factory.pyi | b43ca70100629b7c956effd95df1bc66726070c7 | [] | no_license | matangover/tensorflow-stubs | 9422fbb1cb3a3638958d621461291c315f9c6ec2 | 664bd995ef24f05ba2b3867d979d23ee845cb652 | refs/heads/master | 2020-05-23T12:03:40.996675 | 2019-05-15T06:21:43 | 2019-05-15T06:21:43 | 186,748,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | pyi | # Stubs for tensorflow.python.debug.cli.ui_factory (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from typing import Any as Any, Optional as Optional
SUPPORTED_UI_TYPES: Any
def get_ui(ui_type: Any, on_ui_exit: Optional[Any] = ..., available_ui_types: Optional[Any] = ..., config: Optional[Any] = ...): ...
| [
"[email protected]"
] | |
a3102fcc5d0e0bfb6ee0b1bf3111b652fc63dcb7 | 07fa9a51d737d0a1fbe217b1a6a956abbef4ef87 | /pytorchvideo/layers/accelerator/mobile_cpu/conv_helper.py | 9d9d7c228c92e93dca3ca889ed62c9c8350ab955 | [
"Apache-2.0"
] | permissive | xchani/pytorchvideo | 2b6decf3a1076b9256745f0ae81d86e2f43e14a7 | 16f2abf2f8aa174915316007622bbb260215dee8 | refs/heads/main | 2023-08-27T16:36:23.346066 | 2021-11-11T11:40:56 | 2021-11-11T11:40:56 | 414,095,913 | 0 | 0 | Apache-2.0 | 2021-10-06T06:32:33 | 2021-10-06T06:32:32 | null | UTF-8 | Python | false | false | 21,774 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
This file contains helper classes for building conv3d efficient blocks.
The helper classes are intended to be instantiated inside efficient block,
not to be used by user to build network.
"""
from copy import deepcopy
from typing import Tuple
import torch
import torch.nn as nn
class _Reshape(nn.Module):
"""
Helper class to implement data reshape as a module.
Args:
reshape_size (tuple): size of data after reshape.
"""
def __init__(
self,
reshape_size: Tuple,
):
super().__init__()
self.reshape_size = reshape_size
def forward(self, x):
return torch.reshape(x, self.reshape_size)
class _SkipConnectMul(nn.Module):
"""
Helper class to implement skip multiplication.
Args:
layer (nn.Module): layer for skip multiplication. With input x, _SkipConnectMul
implements layer(x)*x.
"""
def __init__(
self,
layer: nn.Module,
):
super().__init__()
self.layer = layer
self.mul_func = nn.quantized.FloatFunctional()
def forward(self, x):
return self.mul_func.mul(x, self.layer(x))
class _Conv3dTemporalKernel3Decomposed(nn.Module):
"""
Helper class for decomposing conv3d with temporal kernel of 3 into equivalent conv2ds.
In conv3d with temporal kernel 3 and input I, for output temporal index of t (O[:,:,t,:,:]),
the conv can be expressed as:
O[:,:,t,:,:] = conv3d(I[:,:,t:t+3,:,:])
= conv2d_0(I[:,:,t,:,:]) + conv2d_1(I[:,:,t+1,:,:]) + conv2d_2(I[:,:,t+2,:,:])
If bias is considered:
O[:,:,t,:,:] = conv3d_w_bias(I[:,:,t:t+3,:,:])
= conv2d_0_wo_bias(I[:,:,t,:,:])
+ conv2d_1_w_bias(I[:,:,t+1,:,:]) + conv2d_2_wo_bias(I[:,:,t+2,:,:])
The input Conv3d also needs zero padding of size 1 in temporal dimension.
"""
def __init__(
self,
conv3d_in: nn.Conv3d,
input_THW_tuple: Tuple,
):
"""
Args:
conv3d_in (nn.Module): input nn.Conv3d module to be converted
into equivalent conv2d.
input_THW_tuple (tuple): input THW size for conv3d_in during forward.
"""
super().__init__()
assert conv3d_in.padding[0] == 1, (
"_Conv3dTemporalKernel3Eq only support temporal padding of 1, "
f"but got {conv3d_in.padding[0]}"
)
assert conv3d_in.padding_mode == "zeros", (
"_Conv3dTemporalKernel3Eq only support zero padding, "
f"but got {conv3d_in.padding_mode}"
)
self._input_THW_tuple = input_THW_tuple
padding_2d = conv3d_in.padding[1:]
in_channels = conv3d_in.in_channels
out_channels = conv3d_in.out_channels
kernel_size = conv3d_in.kernel_size[1:]
groups = conv3d_in.groups
stride_2d = conv3d_in.stride[1:]
# Create 3 conv2d to emulate conv3d.
if (
self._input_THW_tuple[0] > 1
): # Those two conv2d are needed only when temporal input > 1.
self._conv2d_3_3_0 = nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
padding=padding_2d,
stride=stride_2d,
groups=groups,
bias=False,
)
self._conv2d_3_3_2 = nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
padding=padding_2d,
stride=stride_2d,
groups=groups,
bias=False,
)
self._conv2d_3_3_1 = nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
padding=padding_2d,
stride=stride_2d,
groups=groups,
bias=(conv3d_in.bias is not None),
)
state_dict = conv3d_in.state_dict()
state_dict_1 = deepcopy(state_dict)
state_dict_1["weight"] = state_dict["weight"][:, :, 1]
self._conv2d_3_3_1.load_state_dict(state_dict_1)
if self._input_THW_tuple[0] > 1:
state_dict_0 = deepcopy(state_dict)
state_dict_0["weight"] = state_dict["weight"][:, :, 0]
if conv3d_in.bias is not None:
"""
Don't need bias for other conv2d instances to avoid duplicated addition of bias.
"""
state_dict_0.pop("bias")
self._conv2d_3_3_0.load_state_dict(state_dict_0)
state_dict_2 = deepcopy(state_dict)
state_dict_2["weight"] = state_dict["weight"][:, :, 2]
if conv3d_in.bias is not None:
state_dict_2.pop("bias")
self._conv2d_3_3_2.load_state_dict(state_dict_2)
self._add_funcs = nn.ModuleList(
[
nn.quantized.FloatFunctional()
for _ in range(2 * (self._input_THW_tuple[0] - 1))
]
)
self._cat_func = nn.quantized.FloatFunctional()
def forward(self, x):
"""
Use three conv2d to emulate conv3d.
This forward assumes zero padding of size 1 in temporal dimension.
"""
if self._input_THW_tuple[0] > 1:
out_tensor_list = []
"""
First output plane in temporal dimension,
conv2d_3_3_0 is skipped due to zero padding.
"""
cur_tensor = (
self._add_funcs[0]
.add(self._conv2d_3_3_1(x[:, :, 0]), self._conv2d_3_3_2(x[:, :, 1]))
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
for idx in range(2, self._input_THW_tuple[0]):
cur_tensor = (
self._add_funcs[2 * idx - 3]
.add(
self._add_funcs[2 * idx - 2].add(
self._conv2d_3_3_0(x[:, :, idx - 2]),
self._conv2d_3_3_1(x[:, :, idx - 1]),
),
self._conv2d_3_3_2(x[:, :, idx]),
)
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
"""
Last output plane in temporal domain, conv2d_3_3_2 is skipped due to zero padding.
"""
cur_tensor = (
self._add_funcs[-1]
.add(self._conv2d_3_3_0(x[:, :, -2]), self._conv2d_3_3_1(x[:, :, -1]))
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
return self._cat_func.cat(out_tensor_list, 2)
else: # Degenerated to simple conv2d
return self._conv2d_3_3_1(x[:, :, 0]).unsqueeze(2)
class _Conv3dTemporalKernel5Decomposed(nn.Module):
"""
Helper class for decomposing conv3d with kernel size of (5, k, k) into equivalent conv2ds.
In such conv3d and input I, for output temporal index of t (O[:,:,t,:,:]), the conv
can be expressed as:
O[:,:,t,:,:] = conv3d(I[:,:,t:t+5,:,:])
= conv2d_0(I[:,:,t,:,:]) + conv2d_1(I[:,:,t+1,:,:]) + conv2d_2(I[:,:,t+2,:,:])
+ conv2d_3(I[:,:,t+3,:,:]) + conv2d_4(I[:,:,t+4,:,:])
If bias is considered:
O[:,:,t,:,:] = conv3d_w_bias(I[:,:,t:t+3,:,:])
= conv2d_0_wo_bias(I[:,:,t,:,:])
+ conv2d_1_wo_bias(I[:,:,t+1,:,:]) + conv2d_2_w_bias(I[:,:,t+2,:,:])
+ conv2d_3_wo_bias(I[:,:,t+1,:,:]) + conv2d_4_wo_bias(I[:,:,t+2,:,:])
The input Conv3d also needs zero padding of size 2 in temporal dimension at begin and end.
"""
def __init__(
self,
conv3d_in: nn.Conv3d,
thw_shape: Tuple[int, int, int],
):
"""
Args:
conv3d_in (nn.Module): input nn.Conv3d module to be converted
into equivalent conv2d.
thw_shape (tuple): input THW size for conv3d_in during forward.
"""
super().__init__()
assert conv3d_in.padding[0] == 2, (
"_Conv3dTemporalKernel5Eq only support temporal padding of 2, "
f"but got {conv3d_in.padding[0]}"
)
assert conv3d_in.padding_mode == "zeros", (
"_Conv3dTemporalKernel5Eq only support zero padding, "
f"but got {conv3d_in.padding_mode}"
)
self._thw_shape = thw_shape
padding_2d = conv3d_in.padding[1:]
in_channels = conv3d_in.in_channels
out_channels = conv3d_in.out_channels
kernel_size = conv3d_in.kernel_size[1:]
groups = conv3d_in.groups
stride_2d = conv3d_in.stride[1:]
# Create 3 conv2d to emulate conv3d.
t, h, w = self._thw_shape
args_dict = {
"in_channels": in_channels,
"out_channels": out_channels,
"kernel_size": kernel_size,
"padding": padding_2d,
"stride": stride_2d,
"groups": groups,
}
for iter_idx in range(5):
if iter_idx != 2:
if t > 1: # Those four conv2d are needed only when temporal input > 1.
self.add_module(
f"_conv2d_{iter_idx}", nn.Conv2d(**args_dict, bias=False)
)
else: # _conv2d_2 is needed for all circumstances.
self.add_module(
f"_conv2d_{iter_idx}",
nn.Conv2d(**args_dict, bias=(conv3d_in.bias is not None)),
)
# State dict for _conv2d_2
original_state_dict = conv3d_in.state_dict()
state_dict_to_load = deepcopy(original_state_dict)
state_dict_to_load["weight"] = original_state_dict["weight"][:, :, 2]
self._conv2d_2.load_state_dict(state_dict_to_load)
if t > 1:
if conv3d_in.bias is not None:
# Don't need bias for other conv2d instances to avoid duplicated
# addition of bias.
state_dict_to_load.pop("bias")
# State dict for _conv2d_0, _conv2d_1, _conv2d_3, _conv2d_4
state_dict_to_load["weight"] = original_state_dict["weight"][:, :, 0]
self._conv2d_0.load_state_dict(state_dict_to_load)
state_dict_to_load["weight"] = original_state_dict["weight"][:, :, 1]
self._conv2d_1.load_state_dict(state_dict_to_load)
state_dict_to_load["weight"] = original_state_dict["weight"][:, :, 3]
self._conv2d_3.load_state_dict(state_dict_to_load)
state_dict_to_load["weight"] = original_state_dict["weight"][:, :, 4]
self._conv2d_4.load_state_dict(state_dict_to_load)
# Elementwise add are needed in forward function, use nn.quantized.FloatFunctional()
# for better quantization support. One convolution needs at most 4 elementwise adds
# without zero padding; for boundary planes fewer elementwise adds are needed.
# See forward() for more details.
self._add_funcs = nn.ModuleList(
[nn.quantized.FloatFunctional() for _ in range(4 * t - 6)]
)
self._cat_func = nn.quantized.FloatFunctional()
def forward(self, x):
"""
Use three conv2d to emulate conv3d.
Args:
x (torch.Tensor): 5D tensor of (B, C, T, H, W)
"""
t, h, w = self._thw_shape
out_tensor_list = []
if (
t == 1
): # Degenerated to simple conv2d, but make sure output still has T dimension
return self._conv2d_2(x[:, :, 0]).unsqueeze(2)
elif t == 2:
# out_tensor_list[0]: conv2d_1_1_0, conv2d_1_1_1 and conv2d_1_1_4 are
# applied to zero padding.
cur_tensor = (
self._add_funcs[0]
.add(self._conv2d_2(x[:, :, 0]), self._conv2d_3(x[:, :, 1]))
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
# out_tensor_list[1]: conv2d_1_1_0, conv2d_1_1_3 and conv2d_1_1_4 are
# applied to zero padding.
cur_tensor = (
self._add_funcs[1]
.add(self._conv2d_1(x[:, :, 0]), self._conv2d_2(x[:, :, 1]))
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
elif t == 3:
# out_tensor_list[0]: conv2d_1_1_0, conv2d_1_1_1 are applied to zero padding.
cur_tensor = (
self._add_funcs[0]
.add(
self._add_funcs[1].add(
self._conv2d_2(x[:, :, 0]), self._conv2d_3(x[:, :, 1])
),
self._conv2d_4(x[:, :, 2]),
)
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
# out_tensor_list[1]: conv2d_1_1_0, conv2d_1_1_4 are applied to zero padding.
cur_tensor = (
self._add_funcs[2]
.add(
self._add_funcs[3].add(
self._conv2d_1(x[:, :, 0]), self._conv2d_2(x[:, :, 1])
),
self._conv2d_3(x[:, :, 2]),
)
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
# out_tensor_list[2]: conv2d_1_1_3, conv2d_1_1_4 are applied to zero padding.
cur_tensor = (
self._add_funcs[4]
.add(
self._add_funcs[5].add(
self._conv2d_0(x[:, :, 0]), self._conv2d_1(x[:, :, 1])
),
self._conv2d_2(x[:, :, 2]),
)
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
elif t == 4:
# out_tensor_list[0]: conv2d_1_1_0, conv2d_1_1_1 are applied to zero padding.
cur_tensor = (
self._add_funcs[0]
.add(
self._add_funcs[1].add(
self._conv2d_2(x[:, :, 0]), self._conv2d_3(x[:, :, 1])
),
self._conv2d_4(x[:, :, 2]),
)
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
# out_tensor_list[1]: conv2d_1_1_0 is applied to zero padding.
cur_tensor = (
self._add_funcs[2]
.add(
self._add_funcs[3].add(
self._add_funcs[4].add(
self._conv2d_1(x[:, :, 0]),
self._conv2d_2(x[:, :, 1]),
),
self._conv2d_3(x[:, :, 2]),
),
self._conv2d_4(x[:, :, 3]),
)
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
# out_tensor_list[2]: conv2d_1_1_4 is applied to zero padding.
cur_tensor = (
self._add_funcs[5]
.add(
self._add_funcs[6].add(
self._add_funcs[7].add(
self._conv2d_0(x[:, :, 0]),
self._conv2d_1(x[:, :, 1]),
),
self._conv2d_2(x[:, :, 2]),
),
self._conv2d_3(x[:, :, 3]),
)
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
# out_tensor_list[3]: conv2d_1_1_3, conv2d_1_1_4 are applied to zero padding.
cur_tensor = (
self._add_funcs[8]
.add(
self._add_funcs[9].add(
self._conv2d_0(x[:, :, 1]), self._conv2d_1(x[:, :, 2])
),
self._conv2d_2(x[:, :, 3]),
)
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
else: # t >= 5
# out_tensor_list[0]: conv2d_1_1_0, conv2d_1_1_1 are applied to zero padding.
add_func_idx_base = 0
cur_tensor = (
self._add_funcs[add_func_idx_base]
.add(
self._add_funcs[add_func_idx_base + 1].add(
self._conv2d_2(x[:, :, 0]), self._conv2d_3(x[:, :, 1])
),
self._conv2d_4(x[:, :, 2]),
)
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
add_func_idx_base += 2
# out_tensor_list[1]: conv2d_1_1_0 is applied to zero padding.
cur_tensor = (
self._add_funcs[add_func_idx_base]
.add(
self._add_funcs[add_func_idx_base + 1].add(
self._add_funcs[add_func_idx_base + 2].add(
self._conv2d_1(x[:, :, 0]),
self._conv2d_2(x[:, :, 1]),
),
self._conv2d_3(x[:, :, 2]),
),
self._conv2d_4(x[:, :, 3]),
)
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
add_func_idx_base += 3
# out_tensor_list[2:-2]: zero padding has no effect.
for idx in range(4, t):
cur_tensor = (
self._add_funcs[add_func_idx_base]
.add(
self._add_funcs[add_func_idx_base + 1].add(
self._add_funcs[add_func_idx_base + 2].add(
self._add_funcs[add_func_idx_base + 3].add(
self._conv2d_0(x[:, :, idx - 4]),
self._conv2d_1(x[:, :, idx - 3]),
),
self._conv2d_2(x[:, :, idx - 2]),
),
self._conv2d_3(x[:, :, idx - 1]),
),
self._conv2d_4(x[:, :, idx]),
)
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
add_func_idx_base += 4
# out_tensor_list[-2]: conv2d_1_1_4 is applied to zero padding.
cur_tensor = (
self._add_funcs[add_func_idx_base]
.add(
self._add_funcs[add_func_idx_base + 1].add(
self._add_funcs[add_func_idx_base + 2].add(
self._conv2d_0(x[:, :, -4]),
self._conv2d_1(x[:, :, -3]),
),
self._conv2d_2(x[:, :, -2]),
),
self._conv2d_3(x[:, :, -1]),
)
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
add_func_idx_base += 3
# out_tensor_list[-1]: conv2d_1_1_3, conv2d_1_1_4 are applied to zero padding.
cur_tensor = (
self._add_funcs[add_func_idx_base]
.add(
self._add_funcs[add_func_idx_base + 1].add(
self._conv2d_0(x[:, :, -3]),
self._conv2d_1(x[:, :, -2]),
),
self._conv2d_2(x[:, :, -1]),
)
.unsqueeze(2)
)
out_tensor_list.append(cur_tensor)
return self._cat_func.cat(out_tensor_list, 2)
class _Conv3dTemporalKernel1Decomposed(nn.Module):
"""
Helper class for decomposing conv3d with temporal kernel of 1 into conv2d on
multiple temporal planes.
In conv3d with temporal kernel 1 and input I, for output temporal index of t (O[:,:,t,:,:]),
the conv can be expressed as:
O[:,:,t,:,:] = conv3d(I[:,:,t,:,:])
= conv2d(I[:,:,t,:,:])
The full output can be obtained by concat O[:,:,t,:,:] for t in 0...T,
where T is the length of I in temporal dimension.
"""
def __init__(
self,
conv3d_eq: nn.Conv3d,
input_THW_tuple: Tuple,
):
"""
Args:
conv3d_eq (nn.Module): input nn.Conv3d module to be converted
into equivalent conv2d.
input_THW_tuple (tuple): input THW size for conv3d_eq during forward.
"""
super().__init__()
# create equivalent conv2d module
in_channels = conv3d_eq.in_channels
out_channels = conv3d_eq.out_channels
bias_flag = conv3d_eq.bias is not None
self.conv2d_eq = nn.Conv2d(
in_channels,
out_channels,
kernel_size=(conv3d_eq.kernel_size[1], conv3d_eq.kernel_size[2]),
stride=(conv3d_eq.stride[1], conv3d_eq.stride[2]),
groups=conv3d_eq.groups,
bias=bias_flag,
padding=(conv3d_eq.padding[1], conv3d_eq.padding[2]),
dilation=(conv3d_eq.dilation[1], conv3d_eq.dilation[2]),
)
state_dict = conv3d_eq.state_dict()
state_dict["weight"] = state_dict["weight"].squeeze(2)
self.conv2d_eq.load_state_dict(state_dict)
self.input_THW_tuple = input_THW_tuple
def forward(self, x):
out_tensor_list = []
for idx in range(self.input_THW_tuple[0]):
cur_tensor = self.conv2d_eq(x[:, :, idx]).unsqueeze(2)
out_tensor_list.append(cur_tensor)
return torch.cat(out_tensor_list, 2)
| [
"[email protected]"
] | |
3f45a0ead1cf4666a92a4e6dc6450c3ac923cd4a | e5efada3529d94875455c4230c8dabe27fb72a89 | /apps/api/migrations/0015_auto_20230210_1801.py | aa364ba5d0abda9b62dcdf65fd85023772bbf6fc | [] | no_license | alexmon1989/uma | d8c321fb0ec9b1a9039b1c83aeaaff774f657416 | 5dea579d634eeb1c8103c21157299b33ca5590f0 | refs/heads/master | 2023-08-03T04:31:13.598577 | 2023-07-22T18:17:13 | 2023-07-22T18:17:13 | 154,835,498 | 0 | 0 | null | 2023-03-02T11:20:54 | 2018-10-26T13:02:12 | Nunjucks | UTF-8 | Python | false | false | 411 | py | # Generated by Django 3.2.12 on 2023-02-10 18:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0014_auto_20211213_1900'),
]
operations = [
migrations.AddField(
model_name='opendata',
name='files_path',
field=models.CharField(blank=True, max_length=500, null=True),
),
]
| [
"[email protected]"
] | |
a220aea2b5c78023a22076d9c19a6dd6523da5d2 | 30d1902232eb9ddb84fdf5404a3a1dfd6232406a | /wxpython/project/panels/WorkExperience.py | 17bf63ac737356239469250d54b41bd0999928ea | [] | no_license | sxnys/mypython | c3a768b054077ed97ff1e2fac31cb93f0765deb3 | de48cd883ad2de3320cb0c6b46b451ebb2311ac7 | refs/heads/master | 2022-11-07T15:11:48.936412 | 2019-04-14T12:04:30 | 2019-04-14T12:04:30 | 119,686,106 | 0 | 1 | null | 2022-10-31T05:13:00 | 2018-01-31T12:46:06 | Python | UTF-8 | Python | false | false | 791 | py | # -*- coding: utf-8
__author__ = 'Sxn'
__date__ = '2017/5/22 19:09'
from . import StudyExperience
from extra import JsonIO
class TabPanel(StudyExperience.TabPanel):
def __init__(self, parent):
StudyExperience.TabPanel.__init__(self, parent, tabName=u'工作经历', instructText=u'含学术兼职情况', numLimit=10, editInfo=[u'起止年月', u'工作单位', u'职务/职称'], colSize=[250, 250, 250], childOrder=2)
def addToJsonDict(self):
JsonIO.working_exp = []
for i in xrange(self.gridRow):
tmp = {}
tmp['start_end_date'] = self.infoGrid.GetCellValue(i, 0)
tmp['working_dep'] = self.infoGrid.GetCellValue(i, 1)
tmp['job'] = self.infoGrid.GetCellValue(i, 2)
JsonIO.working_exp.append(tmp) | [
"[email protected]"
] | |
5e65254844f16b658ad6828501d1c3536c170e7f | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/63/usersdata/230/28042/submittedfiles/swamee.py | 88094a850a0912518cfb951ac45f0e9faca901c7 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | # -*- coding: utf-8 -*-
import math
#COMECE SEU CÓDIGO AQUI
f = float(input('Digite o valor de f: '))
L = float(input('Digite o valor de L: '))
Q = float(input('Digite o valor de Q: '))
DeltaH = float(input('Digite o valor de DeltaH: '))
v = float(input('Digite o valor de v: '))
g = 9.81
E = 0.000002
D = ((8*f*L*(Q**2))/((math.pi**2)*g*DeltaH))**0.2
Rey = (4*Q)/(math.pi*D*v)
k = (0.25)/(math.log10((E/3.7*D))+(5.74/(Rey**0.9)))**2
print ('%.4f' % D)
print ('%.4f' % Rey)
print ('%.4f' % k) | [
"[email protected]"
] | |
2d1ff66d90a2adb3e0779f18b5a50d2212b45545 | 13f5984be7be77852e4de29ab98d5494a7fc6767 | /LeetCode/binary_serach_tree.py | ac894fb9c1a5994df4054cf4407beae85859a72b | [] | no_license | YuanXianguo/Python-Interview-Master | 4252514763fc3f563d9b94e751aa873de1719f91 | 2f73786e8c51dbd248341559de171e18f67f9bf2 | refs/heads/master | 2020-11-26T18:14:50.190812 | 2019-12-20T02:18:03 | 2019-12-20T02:18:03 | 229,169,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,513 | py | from tree import Tree
class Node(object):
"""结点"""
def __init__(self, val=None):
self.val = val
self.left = None
self.right = None
class BinarySearchTree(Tree):
"""二叉搜索树"""
def __init__(self, node=None):
super().__init__(node)
def insert(self, val):
"""二叉搜索树插入"""
node = Node(val)
if not self.root:
self.root = node
else:
if val < self.root.val:
self.root.left = BinarySearchTree(
self.root.left).insert(val)
elif val > self.root.val:
self.root.right = BinarySearchTree(
self.root.right).insert(val)
return self.root
def find(self, val):
"""递归查找值"""
if not self.root:
return "查找失败"
if val < self.root.val:
return BinarySearchTree(self.root.left).find(val)
elif val > self.root.val:
return BinarySearchTree(self.root.right).find(val)
else: # 找到了
return self.root
def find2(self, val):
"""非递归查找"""
root = self.root
while root:
if val < root.val:
root = root.left
elif val > root.val:
root = root.right
else:
return root
return "查找失败"
def find_min(self):
"""递归查找最小值,一定是在树的最左分支的端结点上"""
if not self.root:
return "查找失败"
if not self.root.left:
return self.root # 最小值没有左子树
else:
return BinarySearchTree(self.root.left).find_min()
def find_max(self):
"""迭代查找最大值,一定是在树的最右分支的端结点上"""
root = self.root
if not root:
return "查找失败"
while root.right:
root = root.right
return root
def delete(self, val):
"""每次递归删除都把删除后的子树返回"""
if not self.root:
return "删除失败"
elif val < self.root.val:
self.root.left = BinarySearchTree(
self.root.left).delete(val)
elif val > self.root.val:
self.root.right = BinarySearchTree(
self.root.right).delete(val)
else: # 该结点为要删除结点
# 如果左右子树都不为空
if self.root.left and self.root.right:
# 找到右子树最小值或左子树最大值
right_min = BinarySearchTree(self.root.right).find_min()
# 将找到的右子树最小值填充要删除的根结点
self.root.val = right_min.val
# 删除右子树最小值
self.root.right = BinarySearchTree(
self.root.right).delete(right_min)
else: # 被删除结点有一个或无子树
if not self.root.left:
self.root = self.root.right
elif not self.root.right:
self.root = self.root.left
return self.root
if __name__ == '__main__':
bt = BinarySearchTree()
for i in range(10):
bt.insert(i)
print(bt.find_min().val)
print(bt.find_max().val)
print(bt.find(10))
bt.postorder()
print("")
bt.delete(9)
print(bt.find_max().val)
bt.inorder()
| [
"[email protected]"
] | |
05f2f300257d5ca6375765b26379c1ae5bcd4984 | 3ec9d3aa7e59475683dba30a87ca68242a7ec181 | /cn/edgedetection/03Sample.py | 843592f93fc567466fb142220d9454d1c28724ac | [
"Apache-2.0"
] | permissive | Jasonandy/Python-X | 58bf36499572cdfb7d7bf80c6a3cd0c818f62c1e | 2f02b9a17bd5495dd1f8746b191f11ec2d7bccbe | refs/heads/master | 2021-06-16T17:07:29.277404 | 2021-03-07T14:17:05 | 2021-03-07T14:17:05 | 175,353,402 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,419 | py | """
边缘检测示列
https://blog.csdn.net/HuangZhang_123/article/details/80511270
"""
import cv2
import numpy as np
def show_image(image_path):
"""
show_image 展示
:param image_path:
:return:
"""
img = cv2.pyrDown(cv2.imread(image_path, cv2.IMREAD_UNCHANGED))
# threshold 函数对图像进行二化值处理,由于处理后图像对原图像有所变化,因此img.copy()生成新的图像,cv2.THRESH_BINARY是二化值
ret, thresh = cv2.threshold(cv2.cvtColor(img.copy(), cv2.COLOR_BGR2GRAY), 127, 255, cv2.THRESH_BINARY)
# findContours函数查找图像里的图形轮廓
# 函数参数thresh是图像对象
# 层次类型,参数cv2.RETR_EXTERNAL是获取最外层轮廓,cv2.RETR_TREE是获取轮廓的整体结构
# 轮廓逼近方法
# 输出的返回值,image是原图像、contours是图像的轮廓、hier是层次类型
contours, hier = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
# 轮廓绘制方法一
# boundingRect函数计算边框值,x,y是坐标值,w,h是矩形的宽和高
x, y, w, h = cv2.boundingRect(c)
# 在img图像画出矩形,(x, y), (x + w, y + h)是矩形坐标,(0, 255, 0)设置通道颜色,2是设置线条粗度
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
# 轮廓绘制方法二
# 查找最小区域
rect = cv2.minAreaRect(c)
# 计算最小面积矩形的坐标
box = cv2.boxPoints(rect)
# 将坐标规范化为整数
box = np.int0(box)
# 绘制矩形
cv2.drawContours(img, [box], 0, (0, 0, 255), 3)
# 轮廓绘制方法三
# 圆心坐标和半径的计算
(x, y), radius = cv2.minEnclosingCircle(c)
# 规范化为整数
center = (int(x), int(y))
radius = int(radius)
# 勾画圆形区域
img = cv2.circle(img, center, radius, (0, 255, 0), 2)
# # 轮廓绘制方法四
# 围绕图形勾画蓝色线条
cv2.drawContours(img, contours, -1, (255, 0, 0), 2)
# 显示图像
cv2.imshow("contours", img)
cv2.waitKey()
cv2.destroyAllWindows()
def run():
# image_path = "media/13.jpg"
# image_path = "media/lena/lena.jpg"
image_path = "media/sample/sample.png"
show_image(image_path)
if __name__ == '__main__':
run()
| [
"[email protected]"
] | |
c9c1b551a250613ffb85b581a47121c5e9319dc2 | 855c7f84876f66fe48fa3cfebcb2d96cb809ccce | /manage.py | 642ca99f5fc4b37a16fb35199b75f179159c8063 | [] | no_license | chifeng111/Appointment | 1a9fd11755a03112fa995ef4736761d9ee4c4b93 | 233c65a51fe4b346ddd2e4dc22adb7d233bd0faf | refs/heads/master | 2020-06-20T00:08:56.139597 | 2016-12-06T15:30:44 | 2016-12-06T15:30:44 | 74,894,926 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 809 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Appointment.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
13e9b3fafe2f0f5e0947fec71bd1d9c4f1fd6730 | 2a171178942a19afe9891c2425dce208ae04348b | /kubernetes/client/models/v1_job_list.py | 4c27f86f781c5f929aaeab8ca1386fdec70302fc | [
"Apache-2.0"
] | permissive | ouccema/client-python | ac3f1dee1c5ad8d82f15aeecb87a2f5f219ca4f4 | d7f33ec53e302e66674df581904a3c5b1fcf3945 | refs/heads/master | 2021-01-12T03:17:54.274888 | 2017-01-03T22:13:14 | 2017-01-03T22:13:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,395 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.5.0-snapshot
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1JobList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, items=None, metadata=None):
"""
V1JobList - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'items': 'list[V1Job]',
'metadata': 'UnversionedListMeta'
}
self.attribute_map = {
'items': 'items',
'metadata': 'metadata'
}
self._items = items
self._metadata = metadata
@property
def items(self):
"""
Gets the items of this V1JobList.
Items is the list of Job.
:return: The items of this V1JobList.
:rtype: list[V1Job]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1JobList.
Items is the list of Job.
:param items: The items of this V1JobList.
:type: list[V1Job]
"""
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`")
self._items = items
@property
def metadata(self):
"""
Gets the metadata of this V1JobList.
Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
:return: The metadata of this V1JobList.
:rtype: UnversionedListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1JobList.
Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
:param metadata: The metadata of this V1JobList.
:type: UnversionedListMeta
"""
self._metadata = metadata
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
51e410a7583b82d254106376c125b43aa5f99007 | ed7e61c8eef7fb2213adeb67557d605470c17fb3 | /ML/confusion-matrix/split_two.py | b3bc65d93e39225a414ead9d46ec4d8d6b6fd697 | [] | no_license | MartinThoma/algorithms | 535840224323822f2ea6b7dd6f82a0fdd22a0ff9 | a251e9599b685dbf89c891f02d20fefd8538ead5 | refs/heads/master | 2023-02-23T17:58:10.913634 | 2023-02-21T05:58:59 | 2023-02-21T05:58:59 | 4,939,076 | 241 | 126 | null | 2023-02-16T05:16:23 | 2012-07-07T16:07:23 | Python | UTF-8 | Python | false | false | 6,693 | py | #!/usr/bin/env python
"""Split the classes into two equal-sized groups to maximize accuracy."""
import json
import os
import random
import numpy as np
random.seed(0)
import logging
import sys
from visualize import apply_permutation, plot_cm, read_symbols, swap, swap_1d
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.DEBUG,
stream=sys.stdout)
def calculate_split_accuracy(cm):
"""
Calculate the accuracy of the adjusted classifier.
The adjusted classifier is built by joining the first n/2 classes into one
group and the rest into another group.
"""
n = len(cm)
first = int(n / 2)
cm_small = np.zeros((2, 2))
for i in range(n):
class_i = int(i < first)
for j in range(n):
class_j = int(j < first)
cm_small[class_i][class_j] += cm[i][j]
return (float(cm_small[0][0] + cm_small[1][1]) / cm_small.sum())
def calculate_split_error(cm):
"""Calculate the error of 2 group split."""
return 1.0 - calculate_split_accuracy(cm)
def simulated_annealing(current_cm,
current_perm=None,
score=calculate_split_error,
steps=2 * 10**5,
temp=100.0,
cooling_factor=0.99,
deterministic=False):
"""
Optimize current_cm by randomly swapping elements.
Parameters
----------
current_cm : numpy array
current_perm : None or iterable, optional (default: None)
steps : int, optional (default: 2 * 10**4)
temp : float > 0.0, optional (default: 100.0)
Temperature
cooling_factor: float in (0, 1), optional (default: 0.99)
"""
assert temp > 0
assert cooling_factor > 0
assert cooling_factor < 1
n = len(current_cm)
if current_perm is None:
current_perm = list(range(n))
current_perm = np.array(current_perm)
# Debugging code
perm_exp = np.zeros((n, n), dtype=np.int)
for i in range(n):
for j in range(n):
perm_exp[i][j] = j
current_cm = apply_permutation(current_cm, current_perm)
perm_exp_current = apply_permutation(perm_exp, current_perm)
logging.debug(perm_exp_current[0])
print("apply permutation %s" % str(current_perm))
current_score = score(current_cm)
best_perm = current_perm
best_cm = current_cm
best_score = current_score
print("## Starting Score: {:0.2f}%".format(current_score * 100))
for step in range(steps):
tmp = np.array(current_cm, copy=True)
split_part = int(n / 2) - 1
i = random.randint(0, split_part)
j = random.randint(split_part + 1, n - 1)
perm = swap_1d(current_perm.copy(), i, j)
tmp = swap(tmp, i, j)
# tmp = apply_permutation(tmp, perm)
tmp_score = score(tmp)
if deterministic:
chance = 1.0
else:
chance = random.random()
temp *= 0.99
hot_prob = min(1, np.exp(-(tmp_score - current_score) / temp))
if chance <= hot_prob:
if best_score > tmp_score: # Minimize the score
best_perm = perm
best_cm = tmp
best_score = tmp_score
current_score = tmp_score
perm_exp_current = swap(perm_exp_current, i, j)
print(list(perm_exp_current[0]))
current_cm = tmp
logging.info(("Current: %0.2f%% (best: %0.2f%%, hot_prob=%0.2f%%, "
"step=%i)"),
(current_score * 100),
(best_score * 100),
(hot_prob * 100),
step)
return {'cm': best_cm, 'perm': list(perm_exp_current[0])}
def main(cm_file, perm_file, steps, labels_file):
"""Orchestrate."""
# Load confusion matrix
with open(cm_file) as f:
cm = json.load(f)
cm = np.array(cm)
# Load permutation
if os.path.isfile(perm_file):
print("loaded %s" % perm_file)
with open(perm_file) as data_file:
perm = json.load(data_file)
else:
perm = random.shuffle(list(range(len(cm))))
print("Score without perm: {:0.2f}%".format(calculate_split_error(cm) * 100))
result = simulated_annealing(cm, perm,
score=calculate_split_error,
deterministic=True,
steps=steps)
# First recursive step
# split_i = int(len(cm) / 2)
# cm = result['cm'][:split_i, :split_i]
# perm = list(range(split_i))
# result = simulated_annealing(cm, perm,
# score=calculate_split_error,
# deterministic=True,
# steps=steps)
print("Score: {}".format(calculate_split_error(result['cm'])))
print("Perm: {}".format(list(result['perm'])))
# Load labels
if os.path.isfile(labels_file):
with open(labels_file) as f:
symbols = json.load(f)
else:
symbols = read_symbols()
print("Symbols: {}".format([symbols[i] for i in result['perm']]))
plot_cm(result['cm'], zero_diagonal=True)
def get_parser():
"""Get parser object for script xy.py."""
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--cm",
dest="cm_file",
help=("path of a json file with a confusion matrix"),
metavar="cm.json",
default='confusion-matrix.json')
parser.add_argument("--perm",
dest="perm_file",
help=("path of a json file with a permutation to "
"start with"),
metavar="perm.json",
default="")
parser.add_argument("--labels",
dest="labels_file",
help=("path of a json file with a list of label "
"names"),
metavar="labels.json",
default="")
parser.add_argument("-n",
dest="n",
default=4 * 10**5,
type=int,
help="number of steps to iterate")
return parser
if __name__ == "__main__":
args = get_parser().parse_args()
main(args.cm_file, args.perm_file, args.n, args.labels_file)
| [
"[email protected]"
] | |
f5435f602b8973519150389a75dd7328fe65e570 | c0f808504dd3d7fd27c39f1503fbc14c1d37bf9f | /sources/scipy-scipy-414c1ab/scipy/sparse/linalg/dsolve/umfpack/tests/try_umfpack.py | 6f7cd7acdb421fa1497d93d5b68da26ef2943b61 | [] | no_license | georgiee/lip-sync-lpc | 7662102d4715e4985c693b316a02d11026ffb117 | e931cc14fe4e741edabd12471713bf84d53a4250 | refs/heads/master | 2018-09-16T08:47:26.368491 | 2018-06-05T17:01:08 | 2018-06-05T17:01:08 | 5,779,592 | 17 | 4 | null | null | null | null | UTF-8 | Python | false | false | 6,310 | py | #!/usr/bin/env python
# Created by: Robert Cimrman, 05.12.2005
"""Benchamrks for umfpack module"""
from optparse import OptionParser
import time
import urllib
import gzip
import numpy as np
import scipy.sparse as sp
import scipy.sparse.linalg.dsolve.umfpack as um
import scipy.linalg as nla
defaultURL = 'http://www.cise.ufl.edu/research/sparse/HBformat/'
usage = """%%prog [options] <matrix file name> [<matrix file name>, ...]
<matrix file name> can be a local or distant (gzipped) file
default url is:
%s
supported formats are:
triplet .. [nRow, nCol, nItem] followed by 'nItem' * [ir, ic, value]
hb .. Harwell-Boeing format N/A
""" % defaultURL
##
# 05.12.2005, c
def read_triplet( fd ):
nRow, nCol = map( int, fd.readline().split() )
nItem = int( fd.readline() )
ij = np.zeros( (nItem,2), np.int32 )
val = np.zeros( (nItem,), np.float64 )
for ii, row in enumerate( fd.readlines() ):
aux = row.split()
ij[ii] = int( aux[0] ), int( aux[1] )
val[ii] = float( aux[2] )
mtx = sp.csc_matrix( (val, ij), dims = (nRow, nCol), nzmax = nItem )
return mtx
##
# 06.12.2005, c
def read_triplet2( fd ):
nRow, nCol = map( int, fd.readline().split() )
nItem = int( fd.readline() )
ij, val = io.read_array( fd,
columns = [(0,1), (2,)],
atype = (np.int32, np.float64),
rowsize = nItem )
mtx = sp.csc_matrix( (val, ij), dims = (nRow, nCol), nzmax = nItem )
return mtx
formatMap = {'triplet' : read_triplet}
##
# 05.12.2005, c
def readMatrix( matrixName, options ):
if options.default_url:
matrixName = defaultURL + matrixName
print 'url:', matrixName
if matrixName[:7] == 'http://':
fileName, status = urllib.urlretrieve( matrixName )
## print status
else:
fileName = matrixName
print 'file:', fileName
try:
readMatrix = formatMap[options.format]
except:
raise ValueError('unsupported format: %s' % options.format)
print 'format:', options.format
print 'reading...'
if fileName.endswith('.gz'):
fd = gzip.open( fileName )
else:
fd = open( fileName )
mtx = readMatrix( fd )
fd.close()
print 'ok'
return mtx
##
# 05.12.2005, c
def main():
parser = OptionParser( usage = usage )
parser.add_option( "-c", "--compare",
action = "store_true", dest = "compare",
default = False,
help = "compare with default scipy.sparse solver [default: %default]" )
parser.add_option( "-p", "--plot",
action = "store_true", dest = "plot",
default = False,
help = "plot time statistics [default: %default]" )
parser.add_option( "-d", "--default-url",
action = "store_true", dest = "default_url",
default = False,
help = "use default url [default: %default]" )
parser.add_option( "-f", "--format", type = type( '' ),
dest = "format", default = 'triplet',
help = "matrix format [default: %default]" )
(options, args) = parser.parse_args()
if (len( args ) >= 1):
matrixNames = args;
else:
parser.print_help(),
return
sizes, nnzs, times, errors = [], [], [], []
legends = ['umfpack', 'sparse.solve']
for ii, matrixName in enumerate( matrixNames ):
print '*' * 50
mtx = readMatrix( matrixName, options )
sizes.append( mtx.shape )
nnzs.append( mtx.nnz )
tts = np.zeros( (2,), dtype = np.double )
times.append( tts )
err = np.zeros( (2,2), dtype = np.double )
errors.append( err )
print 'size : %s (%d nnz)' % (mtx.shape, mtx.nnz)
sol0 = np.ones( (mtx.shape[0],), dtype = np.double )
rhs = mtx * sol0
umfpack = um.UmfpackContext()
tt = time.clock()
sol = umfpack( um.UMFPACK_A, mtx, rhs, autoTranspose = True )
tts[0] = time.clock() - tt
print "umfpack : %.2f s" % tts[0]
error = mtx * sol - rhs
err[0,0] = nla.norm( error )
print '||Ax-b|| :', err[0,0]
error = sol0 - sol
err[0,1] = nla.norm( error )
print '||x - x_{exact}|| :', err[0,1]
if options.compare:
tt = time.clock()
sol = sp.solve( mtx, rhs )
tts[1] = time.clock() - tt
print "sparse.solve : %.2f s" % tts[1]
error = mtx * sol - rhs
err[1,0] = nla.norm( error )
print '||Ax-b|| :', err[1,0]
error = sol0 - sol
err[1,1] = nla.norm( error )
print '||x - x_{exact}|| :', err[1,1]
if options.plot:
try:
import pylab
except ImportError:
raise ImportError("could not import pylab")
times = np.array( times )
print times
pylab.plot( times[:,0], 'b-o' )
if options.compare:
pylab.plot( times[:,1], 'r-s' )
else:
del legends[1]
print legends
ax = pylab.axis()
y2 = 0.5 * (ax[3] - ax[2])
xrng = range( len( nnzs ) )
for ii in xrng:
yy = y2 + 0.4 * (ax[3] - ax[2])\
* np.sin( ii * 2 * np.pi / (len( xrng ) - 1) )
if options.compare:
pylab.text( ii+0.02, yy,
'%s\n%.2e err_umf\n%.2e err_sp'
% (sizes[ii], np.sum( errors[ii][0,:] ),
np.sum( errors[ii][1,:] )) )
else:
pylab.text( ii+0.02, yy,
'%s\n%.2e err_umf'
% (sizes[ii], np.sum( errors[ii][0,:] )) )
pylab.plot( [ii, ii], [ax[2], ax[3]], 'k:' )
pylab.xticks( xrng, ['%d' % (nnzs[ii] ) for ii in xrng] )
pylab.xlabel( 'nnz' )
pylab.ylabel( 'time [s]' )
pylab.legend( legends )
pylab.axis( [ax[0] - 0.05, ax[1] + 1, ax[2], ax[3]] )
pylab.show()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
fc25356354bc680cf49d82450ed1864df13bc7cb | 18ccaa1160f49f0d91f1d9dc376f860aed8a9c2a | /tracpro/groups/tests/test_middleware.py | 170e207ec77587705841395a880fa174d72a1d05 | [
"BSD-3-Clause"
] | permissive | caktus/tracpro | bb6033b170b7a77cf9ac76b1be2779b71afa80e0 | 368f43e666d3c718843dffe934ba35ca859ebaf7 | refs/heads/develop | 2020-12-24T22:06:21.341755 | 2016-01-22T13:16:29 | 2016-01-22T13:16:29 | 50,186,576 | 0 | 0 | null | 2016-01-22T14:38:29 | 2016-01-22T14:38:28 | null | UTF-8 | Python | false | false | 8,083 | py | from django.contrib.auth.models import AnonymousUser
from django.test import RequestFactory
from tracpro.test import factories
from tracpro.test.cases import TracProTest
from ..middleware import UserRegionsMiddleware
from ..models import Region
class TestUserRegionsMiddleware(TracProTest):
def setUp(self):
super(TestUserRegionsMiddleware, self).setUp()
self.middleware = UserRegionsMiddleware()
self.org = factories.Org()
self.user = factories.User()
def get_request(self, **kwargs):
request_kwargs = {'HTTP_HOST': "{}.testserver".format(self.org.subdomain)}
request = RequestFactory().get("/", **request_kwargs)
for key, value in kwargs.items():
setattr(request, key, value)
return request
def make_regions(self):
"""Create a collection of nested regions."""
self.region_uganda = factories.Region(
org=self.org, name="Uganda")
self.region_kampala = factories.Region(
org=self.org, name="Kampala", parent=self.region_uganda)
self.region_makerere = factories.Region(
org=self.org, name="Makerere", parent=self.region_kampala)
self.region_entebbe = factories.Region(
org=self.org, name="Entebbe", parent=self.region_uganda)
self.region_kenya = factories.Region(
org=self.org, name="Kenya")
self.region_nairobi = factories.Region(
org=self.org, name="Nairobi", parent=self.region_kenya)
self.region_mombasa = factories.Region(
org=self.org, name="Mombasa", parent=self.region_kenya)
self.region_inactive = factories.Region(
org=self.org, name="Inactive", parent=self.region_nairobi,
is_active=False)
return Region.get_all(self.org)
def test_variables_set(self):
"""Middleware should set several commonly-used region variables."""
request = self.get_request(user=self.user, org=self.org, session={})
self.middleware.process_request(request)
self.assertTrue(hasattr(request, 'region'))
self.assertTrue(hasattr(request, 'include_subregions'))
self.assertTrue(hasattr(request, 'user_regions'))
self.assertTrue(hasattr(request, 'data_regions'))
def test_user_regions__unauthenticated(self):
"""User regions should be set to null for unauthenticated users."""
request = self.get_request(user=AnonymousUser(), org=self.org)
self.middleware.set_user_regions(request)
self.assertIsNone(request.user_regions)
def test_user_regions__no_org(self):
"""User regions should be set to null for non-org views."""
request = self.get_request(user=self.user, org=None)
self.middleware.set_user_regions(request)
self.assertIsNone(request.user_regions)
def test_user_regions(self):
"""User regions should be set to the value of get_all_regions."""
self.make_regions()
self.region_kenya.users.add(self.user)
request = self.get_request(user=self.user, org=self.org)
self.middleware.set_user_regions(request)
self.assertEqual(
set(request.user_regions),
set([self.region_kenya, self.region_nairobi, self.region_mombasa]))
def test_include_subregions__default(self):
"""If key is not in the session, should default to True."""
request = self.get_request(session={})
self.middleware.set_include_subregions(request)
self.assertTrue(request.include_subregions)
def test_include_subregions__yes(self):
"""include_subregions should be retrieved from the session."""
request = self.get_request(session={'include_subregions': True})
self.middleware.set_include_subregions(request)
self.assertTrue(request.include_subregions)
def test_include_subregions__no(self):
"""include_subregions should be retrieved from the session."""
request = self.get_request(session={'include_subregions': False})
self.middleware.set_include_subregions(request)
self.assertFalse(request.include_subregions)
def test_data_regions__no_region(self):
"""If there is no current region, data_regions should be None."""
request = self.get_request(user=self.user, region=None)
self.middleware.set_data_regions(request)
self.assertIsNone(request.data_regions)
def test_data_regions__include_subregions(self):
"""Include all subregions user has access to if include_subregions is True."""
self.make_regions()
user_regions = Region.objects.filter(pk__in=(
self.region_uganda.pk, self.region_kenya.pk, self.region_nairobi.pk))
request = self.get_request(
user=self.user, region=self.region_kenya, include_subregions=True,
user_regions=user_regions)
self.middleware.set_data_regions(request)
self.assertEqual(
set(request.data_regions),
set([self.region_kenya, self.region_nairobi]))
def test_data_regions__exclude_subregions(self):
"""Include only the current region if include_subregions is False."""
self.make_regions()
user_regions = Region.objects.filter(pk__in=(
self.region_uganda.pk, self.region_kenya.pk, self.region_nairobi.pk))
request = self.get_request(
user=self.user, region=self.region_kenya, include_subregions=False,
user_regions=user_regions)
self.middleware.set_data_regions(request)
self.assertEqual(
set(request.data_regions),
set([self.region_kenya]))
def test_region__unauthenticated(self):
"""Current region should be None for an unauthenticated user."""
request = self.get_request(user=AnonymousUser(), org=self.org)
self.middleware.set_region(request)
self.assertIsNone(request.region)
def test_region__no_org(self):
"""Current region should be None if there is no current org."""
request = self.get_request(user=self.user, org=None)
self.middleware.set_region(request)
self.assertIsNone(request.region)
def test_region__not_set__admin(self):
"""If region_id is not in the session, admin will see All Regions."""
self.make_regions()
self.org.administrators.add(self.user)
user_regions = Region.objects.filter(pk__in=(
self.region_uganda.pk, self.region_kenya.pk, self.region_nairobi.pk))
request = self.get_request(
user=self.user, org=self.org, session={}, user_regions=user_regions)
self.middleware.set_region(request)
self.assertIsNone(request.region)
def test_region__not_set(self):
"""If region_id is not in the session, user will see first of their regions."""
self.make_regions()
user_regions = Region.objects.filter(pk=self.region_kenya.pk)
request = self.get_request(
user=self.user, org=self.org, session={}, user_regions=user_regions)
self.middleware.set_region(request)
self.assertEqual(request.region, self.region_kenya)
def test_region__not_in_user_regions(self):
"""If region is not in user regions, return the first of the user's regions."""
self.make_regions()
user_regions = Region.objects.filter(pk=self.region_kenya.pk)
request = self.get_request(
user=self.user, org=self.org, session={'region_id': self.region_nairobi.pk},
user_regions=user_regions)
self.middleware.set_region(request)
self.assertEqual(request.region, self.region_kenya)
def test_region(self):
self.make_regions()
user_regions = Region.objects.filter(pk=self.region_kenya.pk)
request = self.get_request(
user=self.user, org=self.org, session={'region_id': self.region_kenya.pk},
user_regions=user_regions)
self.middleware.set_region(request)
self.assertEqual(request.region, self.region_kenya)
| [
"[email protected]"
] | |
7e60a4df9930178e0ae0a8e732141a2219d3acd4 | a0cbbc57dd1b583ab66ce37ad8c6970e74a600ba | /raylab/policy/modules/model/stochastic/single.py | 0aa460ac8354ed4246fca21f0ef0ac8245a399ee | [
"MIT"
] | permissive | GapData/raylab | ccf6c39ea20d5568561207d92a4b9097657fb909 | c5e862334dc1f29a09b42286ddcc40e72c6eb3a2 | refs/heads/master | 2022-12-19T07:09:45.799180 | 2020-09-29T17:09:54 | 2020-09-29T17:09:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,157 | py | """NN modules for stochastic dynamics estimation."""
from dataclasses import dataclass
from typing import List
from typing import Tuple
import torch
import torch.nn as nn
from gym.spaces import Box
from torch import Tensor
import raylab.torch.nn as nnx
import raylab.torch.nn.distributions as ptd
from raylab.policy.modules.networks.mlp import StateActionMLP
from raylab.utils.types import TensorDict
SampleLogp = Tuple[Tensor, Tensor]
class StochasticModel(nn.Module):
"""Represents a stochastic model as a conditional distribution module."""
def __init__(
self, params_module: nn.Module, dist_module: ptd.ConditionalDistribution
):
super().__init__()
self.params = params_module
self.dist = dist_module
def forward(self, obs, action) -> TensorDict: # pylint:disable=arguments-differ
return self.params(obs, action)
@torch.jit.export
def sample(self, params: TensorDict, sample_shape: List[int] = ()) -> SampleLogp:
"""
Generates a sample_shape shaped sample or sample_shape shaped batch of
samples if the distribution parameters are batched. Returns a (sample, log_prob)
pair.
"""
return self.dist.sample(params, sample_shape)
@torch.jit.export
def rsample(self, params: TensorDict, sample_shape: List[int] = ()) -> SampleLogp:
"""
Generates a sample_shape shaped reparameterized sample or sample_shape
shaped batch of reparameterized samples if the distribution parameters
are batched. Returns a (rsample, log_prob) pair.
"""
return self.dist.rsample(params, sample_shape)
@torch.jit.export
def log_prob(self, next_obs: Tensor, params: TensorDict) -> Tensor:
"""
Returns the log probability density/mass function evaluated at `next_obs`.
"""
return self.dist.log_prob(next_obs, params)
@torch.jit.export
def cdf(self, next_obs: Tensor, params: TensorDict) -> Tensor:
"""Returns the cumulative density/mass function evaluated at `next_obs`."""
return self.dist.cdf(next_obs, params)
@torch.jit.export
def icdf(self, prob, params: TensorDict) -> Tensor:
"""Returns the inverse cumulative density/mass function evaluated at `prob`."""
return self.dist.icdf(prob, params)
@torch.jit.export
def entropy(self, params: TensorDict) -> Tensor:
"""Returns entropy of distribution."""
return self.dist.entropy(params)
@torch.jit.export
def perplexity(self, params: TensorDict) -> Tensor:
"""Returns perplexity of distribution."""
return self.dist.perplexity(params)
@torch.jit.export
def reproduce(self, next_obs, params: TensorDict) -> SampleLogp:
"""Produce a reparametrized sample with the same value as `next_obs`."""
return self.dist.reproduce(next_obs, params)
@torch.jit.export
def deterministic(self, params: TensorDict) -> SampleLogp:
"""
Generates a deterministic sample or batch of samples if the distribution
parameters are batched. Returns a (rsample, log_prob) pair.
"""
return self.dist.deterministic(params)
class ResidualMixin:
"""Overrides StochasticModel interface to model state transition residuals."""
# pylint:disable=missing-function-docstring,not-callable
def forward(self, obs: Tensor, action: Tensor) -> TensorDict:
params = self.params(obs, action)
params["obs"] = obs
return params
@torch.jit.export
def sample(self, params: TensorDict, sample_shape: List[int] = ()) -> SampleLogp:
res, log_prob = self.dist.sample(params, sample_shape)
return params["obs"] + res, log_prob
@torch.jit.export
def rsample(self, params: TensorDict, sample_shape: List[int] = ()) -> SampleLogp:
res, log_prob = self.dist.rsample(params, sample_shape)
return params["obs"] + res, log_prob
@torch.jit.export
def log_prob(self, next_obs: Tensor, params: TensorDict) -> Tensor:
return self.dist.log_prob(next_obs - params["obs"], params)
@torch.jit.export
def cdf(self, next_obs: Tensor, params: TensorDict) -> Tensor:
return self.dist.cdf(next_obs - params["obs"], params)
@torch.jit.export
def icdf(self, prob, params: TensorDict) -> Tensor:
residual = self.dist.icdf(prob, params)
return params["obs"] + residual
@torch.jit.export
def reproduce(self, next_obs, params: TensorDict) -> SampleLogp:
sample_, log_prob_ = self.dist.reproduce(next_obs - params["obs"], params)
return params["obs"] + sample_, log_prob_
@torch.jit.export
def deterministic(self, params: TensorDict) -> SampleLogp:
sample, log_prob = self.dist.deterministic(params)
return params["obs"] + sample, log_prob
class DynamicsParams(nn.Module):
"""Neural network mapping state-action pairs to distribution parameters.
Args:
encoder: Module mapping state-action pairs to 1D features
params: Module mapping 1D features to distribution parameters
"""
def __init__(self, encoder: nn.Module, params: nn.Module):
super().__init__()
self.encoder = encoder
self.params = params
def forward(self, obs, actions): # pylint:disable=arguments-differ
return self.params(self.encoder(obs, actions))
@dataclass
class MLPModelSpec(StateActionMLP.spec_cls):
"""Specifications for stochastic mlp model network.
Inherits parameters from `StateActionMLP.spec_cls`.
Args:
units: Number of units in each hidden layer
activation: Nonlinearity following each linear layer
delay_action: Whether to apply an initial preprocessing layer on the
observation before concatenating the action to the input.
standard_scaler: Whether to transform the inputs of the NN using a
standard scaling procedure (subtract mean and divide by stddev). The
transformation mean and stddev should be fitted during training and
used for both training and evaluation.
fix_logvar_bounds: Whether to use fixed or dynamically adjusted
bounds for the log-scale outputs of the network.
input_dependent_scale: Whether to parameterize the Gaussian standard
deviation as a function of the state and action
"""
fix_logvar_bounds: bool = True
input_dependent_scale: bool = True
class MLPModel(StochasticModel):
"""Stochastic model with multilayer perceptron state-action encoder.
Attributes:
params: NN module mapping obs-act pairs to obs dist params
dist: NN module implementing the distribution API
encoder: NN module used in `params` to map obs-act pairs to vector
embeddings
"""
spec_cls = MLPModelSpec
def __init__(self, obs_space: Box, action_space: Box, spec: MLPModelSpec):
encoder = StateActionMLP(obs_space, action_space, spec)
params = nnx.NormalParams(
encoder.out_features,
obs_space.shape[0],
input_dependent_scale=spec.input_dependent_scale,
bound_parameters=not spec.fix_logvar_bounds,
)
if spec.fix_logvar_bounds:
params.max_logvar.fill_(2)
params.min_logvar.fill_(-20)
params = DynamicsParams(encoder, params)
dist = ptd.Independent(ptd.Normal(), reinterpreted_batch_ndims=1)
super().__init__(params, dist)
# Can only assign modules and parameters after calling nn.Module.__init__
self.encoder = encoder
def initialize_parameters(self, initializer_spec: dict):
"""Initialize all encoder parameters.
Args:
initializer_spec: Dictionary with mandatory `name` key corresponding
to the initializer function name in `torch.nn.init` and optional
keyword arguments.
"""
self.encoder.initialize_parameters(initializer_spec)
class ResidualMLPModel(ResidualMixin, MLPModel):
"""Residual stochastic multilayer perceptron model."""
| [
"[email protected]"
] | |
59128387488db0592ddb5fef863061a8952d1da3 | 929cdbe211fbf254e1ec8122f9b48fa32520232c | /analysisflow.py | 0f35da16f85c1ab0b53e1d567b3def8ec7103f46 | [] | no_license | arizzi/nail | c8edec306628cecd269ad9d4241100afdbf6a7fc | a5ba9aed1bcc266cd9d9a36167ce66e51d851e8f | refs/heads/master | 2023-05-11T15:55:34.038861 | 2023-05-05T12:56:42 | 2023-05-05T12:56:42 | 162,547,201 | 3 | 1 | null | 2023-02-22T15:40:31 | 2018-12-20T08:09:10 | Python | UTF-8 | Python | false | false | 3,501 | py | from .nail import *
flow.SetAlias("n(.*)", "\\1.size()", defaultPersitency=True)
flow.SetAlias(
"(.*)_p4", "{TLorentzVector ret; ret.SetPtEtaPhiM(\\1_pt,\\1_eta,\\1_phi,\\1_mass); return ret;}", defaultPersistency=False)
# SubCikkectuib actuib"
flow.SetAlias("SelectedMuon_(.*)([\.*\])", "Muon_\1[SelectedMuon[\2]]")
flow = SampleProcessing("")
# cuts value should not be hardcoded below but rather being declared here so that scans and optimizations are possible
flow.DefaultConfig(muIsoCut=0.13, muIdCut=3, muPtCut=25)
# Higgs to mumu reconstruction
# Maps to plain RDF VecOps
flow.DefineCollAttr("Muon_id", "Muon_tightId*3+Muon_looseId")
# this should generate some kind of wrapper/ref that can be used as the parent collection
flow.SubCollection("SelectedMuon", "Muon",
sel="Muon_iso < muIsoCut && Muon_id > muIdCut && Muon_pt > muPtCut")
flow.Filter("twoOppositeSignMuons",
"nSelectedMuon==2 && SelectedMuon_charge[0]*SelectedMuon_charge[1] < 0")
# p4 should be handled somehow ... any syntax is ok such as p4(SelectedMuon[0]) or _p4 or .p4 etc..
flow.Define("Higgs", "p4at(SelectedMuon,0)+p4at(SelectedMuon,1)",
requires=["twoOppositeSignMuons"])
# the following could work
# define p4at(x,y) ROOT::Math::PtEtaPhiMVector(x##_pt[y] , x##_eta[y], x##_phi[y], x##_mass[y])
# define p4(x) ROOT::Math::PtEtaPhiMVector(x##_pt , x##_eta, x##_phi, x##_mass)
# VBF Jets kinematics
flow.DefaultConfig(jetPtCut=25)
flow.SubCollection("SelectedJet", "Jet",
"Jet_pt > jetPtCut && (Jet_muonIdx1 == -1 || Muon_iso[Jet_muonIdx1] > muIsoCut || Muon_id[Jet_muonIdx1] > 0")
flow.Filter("twoJets", "nSelectedJet>=2")
flow.Define("Qjet1", "SelectedJet[0].p4()", requires=["twoJets"])
flow.Define("Qjet2", "SelectedJet[1].p4()", requires=["twoJets"])
flow.Define("qq", "Qjet1+Qjet2")
flow.Define("Mqq", "qq.M()")
flow.Define("qq_pt", "qq.Pt()")
flow.Define("qqDeltaEta", "TMath::Abs(Qjet1.Eta()-Qjet2.Eta())")
flow.Define("qqDeltaPhi", "TMath::Abs(Qjet1.DeltaPhi(Qjet2))")
# QQ vs ll kinematic
flow.Define(
"ll_ystar", "Higgs.Rapidity() - (Qjet1.Rapidity() + Qjet2.Rapidity())")
flow.Define(
"ll_zstar", " TMath::Abs( ll_ystar/ (Qjet1.Rapidity()-Qjet2.Rapidity() )) ")
flow.Define("DeltaEtaQQSum",
"TMath::Abs(Qjet1.Eta()) + TMath::Abs(Qjet2.Eta())")
flow.Define("PhiZQ1", "TMath::Abs(Higgs.DeltaPhi(Qjet1))")
flow.Define("PhiZQ2", "TMath::Abs(Higgs.DeltaPhi(Qjet2))")
flow.Define("EtaHQ1", "TMath::Abs(Higgs.Eta() - Qjet1.Eta())")
flow.Define("EtaHQ2", "TMath::Abs(Higgs.Eta() - Qjet2.Eta())")
flow.Define("DeltaRelQQ", "(Qjet1+Qjet2).Pt()/( Qjet1.Pt()+Qjet2.Pt())")
flow.Define(
"Rpt", "(Qjet1+Qjet2+ Higgs).Pt()/( Qjet1.Pt()+Qjet2.Pt() + Higgs.Pt())")
flow.DefaultConfig(higgsMassWindowWidth=15, mQQcut=400, nominalHMass=125.03)
flow.Filter("MassWindow", "abs(Higgs_m-nominalHMass)<higgsMassWindowWidth")
flow.Filter("SideBand", "! MassWindow")
flow.Filter("VBFRegion", "Mqq > mQQcut")
flow.Filter("SignalRegion", "VBFRegion && MassWindow")
# flow.Trainable("SBClassifier","evalMVA",["Higgs_pt","Higgs_m","Mqq","Rpt","DeltaRelQQ"],splitMode="TripleMVA",requires="VBFRegion")
print((flow.NeededInputs()))
# flow.AddSystematic("MuScaleUp","Muon_pt","Muon_pt*1.01") #name, target, replacement
# flow.AddSystematic("HMassUncertainityUp","nominalHMass","125.1") #name, target, replacement
# flow.OptimizationScan("MuPtCutScan","muPtCut","30") #name, target, replacement
#from samples import background,signal,data
| [
"[email protected]"
] | |
aeacb4306279e72cab7c8fd20e25ca480c58c205 | 4d0266a4554a93bba316f70548c2f4916834b1a8 | /partitioned code/dhrec/autohistoryinfer.py | 8a9be06ba94e2dcf00b756ec20eed03174e536cf | [] | no_license | seferlab/DHREC | 864c2c2d4076f8de85697b097a90e2ce549db1cd | b8a9f5274321e14ecc486f4cad02417dbc5658ce | refs/heads/master | 2022-12-27T16:14:54.145795 | 2020-09-29T12:25:27 | 2020-09-29T12:25:27 | 299,609,232 | 0 | 0 | null | 2020-09-29T12:23:25 | 2020-09-29T12:23:24 | null | UTF-8 | Python | false | false | 21,522 | py | import networkx as nx
import sys
sys.path.append("./lib")
import os
import gzip
import cPickle
import random
import myutilities as myutil
import itertools
from InputOutput import InputOutput
from Trace import Trace
from Pbs import Pbs
import HistoryInferRunner
import HistOtherAlgos
def genOtherConfigFile(configfile,graphfile,algo,algoparams,snapshotfile,resultfile,smodel,trunfolder,inter):
"""generates config file for other algos
Args:
configfile:
config parameters:
"""
with gzip.open(configfile,"wb") as file:
cPickle.dump(graphfile,file)
cPickle.dump(algo,file)
cPickle.dump(algoparams,file)
cPickle.dump(snapshotfile,file)
cPickle.dump(resultfile,file)
cPickle.dump(smodel,file)
cPickle.dump(trunfolder,file)
cPickle.dump(inter,file)
def genHistoryConfigFile(configfile,vararr):
"""generates history config file
Args:
configfile:
vararr:
"""
with open(configfile,"w") as file:
for var in vararr.keys():
if var == "dists":
for key in vararr[var].keys():
if key in [Trace.S2I, Trace.I2R, Trace.E2I, Trace.I2S, Trace.S2E]:
paramstr = " ".join([str(item) for item in vararr[var][key][1]])
file.write("{0}: {1} {2}\n".format(key,vararr[var][key][0],paramstr))
elif key == Trace.SPROB:
file.write("{0}: {1}\n".format(key,vararr[var][key]))
else:
file.write("{0}: {1}\n".format(var,vararr[var]))
def getAlgoBlocks(prob,inter,infermode,smodel):
if prob == "dis" and inter == "bound":
if infermode == "Spreader":
algoblocks = [("MatroidSub",{"method":"search","ensemble":False}),("MatroidSub",{"method":"search","ensemble":True,"enscount":2}),("MatroidSub",{"method":"search","ensemble":True,"enscount":5})]
algoblocks = [("second-logconcave-internal",{"ensemble":False,"approx":"reliability"}),("second-arbitrary-internal",{"ensemble":False,"method":"greedy"})]
algoblocks = [("second-arbitrary-internal",{"ensemble":False,"method":"pipage"})]
algoblocks.extend(algoblocks2)
otheralgos = [("RumorCentrality",{})]
algoblocks.extend(otheralgos)
elif infermode == "History":
algoblocks = [("MatroidSub",{"method":"search","ensemble":False})]
#algoblocks = [("second-logconcave",{"ensemble":False,"approx":"reliability"}),("second-arbitrary",{"ensemble":False,"method":"greedy"})]
#algoblocks = [("second-arbitrary",{"ensemble":False,"method":"pipage"})]
#algoblocks = [("MatroidSub",{"method":"search","ensemble":False})]
#algoblocks = [("Qsapmin",{"ensemble":False,"approx":"reliability"})]
#algoblocks = [("Qsapmax",{"ensemble":False,"method":"pipage"})]
#algoblocks.extend(algoblocks2)
#indeblocks = [("Independent",{"ensemble":False})]
#algoblocks.extend(indeblocks)
elif prob == "dis" and inter == None:
if infermode == "Spreader":
algoblocks = [("GreedySubCoverSingle",{"iter":None,"objmethod":"log","ensemble":False}),("GreedySubCoverSingle",{"iter":None, "objmethod":"log","ensemble":True,"enscount":2})]
#,("GreedySubCoverSingle",{"iter":None, "objmethod":"log","ensemble":True,"enscount":3})
#algoblocks = [("FracCover",{"iter":None, "objmethod":"log","ensemble":False,"roundmethod":"random"})]
#algoblocks = []
if smodel in ["si","sir"]:
appralgos = [("Pcdsvc",{"ensemble":False}),("Pcdsvc",{"ensemble":True,"enscount":2})]
appralgos2 = [("Pcvc",{"ensemble":False}),("Pcvc",{"ensemble":True,"enscount":2})]
algoblocks.extend(appralgos)
algoblocks.extend(appralgos2)
elif smodel == "seir":
appralgos = [("MinCut",{"ensemble":False}),("MinCut",{"ensemble":True,"enscount":2})]
algoblocks.extend(appralgos)
#algoblocks = []
#otheralgos = [("NetSleuth",{}),("RumorCentrality",{}),("KEffectors",{})]
#otheralgos = [("NetSleuth",{})]
otheralgos = [("RumorCentrality",{})]
algoblocks.extend(otheralgos)
elif infermode == "History":
algoblocks = [("GreedySubCoverSingle",{"iter":None, "objmethod":"log","ensemble":False})]
if smodel in ["si","sir"]:
appralgos = [("Pcdsvc",{"ensemble":False}),("Pcvc",{"ensemble":False})]
elif smodel == "seir":
appralgos = [("MinCut",{"ensemble":False})]
algoblocks.extend(appralgos)
indeblocks = [("GreedyForward",{"ensemble":False})]
algoblocks.extend(indeblocks)
elif prob == "cont" and inter == "bound":
algoblocks = [("greedy",{})]
elif prob == "cont" and inter == None:
algoblocks = [("greedy",{})]
return algoblocks
def returnAlgoStr(algo,algoparams):
return "-".join([algo] + [str(item) for item in algoparams.values()])
def genMainFolders(graphtraceinput,runresultinput,configinput,sideinput):
(graphfolderpref,tracepref,tracefolderpref,newtracepref,newtracefolderpref) = graphtraceinput
(runpref,runfolderpref,resultpref,resultfolderpref) = runresultinput
(configpref,configfolderpref,pbsfolder) = configinput
(smodel,prob,evol,realdata,inter) = sideinput
graphfolder = "{0}/{1}_{2}_{3}".format(graphfolderpref,realdata,evol,"graphs")
tracefolder = "{0}/{1}_{2}_{3}_{4}_{5}_{6}".format(tracefolderpref,tracepref,realdata,evol,smodel,"edge",prob)
newtracefolder = "{0}/{1}_{2}_{3}_{4}_{5}_{6}".format(newtracefolderpref,newtracepref,realdata,evol,smodel,prob,inter)
configfolder = "{0}/{1}_{2}_{3}_{4}_{5}_{6}".format(configfolderpref,configpref,realdata,evol,smodel,prob,inter)
resultfolder = "{0}/{1}_{2}_{3}_{4}_{5}_{6}".format(resultfolderpref,resultpref,realdata,evol,smodel,prob,inter)
runfolder = "{0}/{1}_{2}_{3}_{4}_{5}_{6}".format(runfolderpref,runpref,realdata,evol,smodel,prob,inter)
[os.makedirs(folder) for folder in [pbsfolder,configfolder,resultfolder,runfolder] if not os.path.exists(folder)]
return graphfolder,tracefolder,newtracefolder,configfolder,resultfolder,runfolder
def returnPathInfo(graphinput,traceinput):
"""returns path info
Args:
graphinput:
traceinput:
Returns:
path2info:
"""
(graphfolder,realdata,evol,prob,smodel,filesamplecount,inter) = graphinput
(tracefolder,fraccons,samplenoisestr,startcount) = traceinput
path2info={}
if realdata == "real" and evol == "static":
for filename in myutil.listfiles(graphfolder):
if filename.split("-")[0:2] != [prob,smodel]:
continue
filepath ="{0}/{1}".format(graphfolder,filename)
if inter == None and (filename.find("weibull") != -1 or filename.find("rayleigh") != -1 or filename.find("powerlaw") != -1):
continue
#if filename.find("sn") != -1:
# if filename.find("sn1") == -1:
# continue
#if filename.find("grid") != -1:
# continue
filetracefolder = "{0}/{1}/{2}/{3}".format(tracefolder,filename,samplenoisestr,startcount)
assert os.path.exists(filetracefolder)
G = InputOutput.readGraphAndParams(filepath)
minicount = int(round(G.number_of_nodes() * fraccons[Trace.INFECTED]["min"]))
maxicount = int(round(G.number_of_nodes() * fraccons[Trace.INFECTED]["max"]))
sentfraccons = {Trace.INFECTED: {"min":minicount, "max":maxicount}}
while True:
filem = Trace.getRandomTraceFile(filetracefolder,sentfraccons)
if filem not in ["0.plain","1.plain"]:
continue
tracefile = "{0}/{1}".format(filetracefolder,filem)
break
#tracefile = "{0}/{1}".format(filetracefolder,Trace.getRandomTraceFile(filetracefolder,sentfraccons))
path2info[filepath] = (filename,tracefile)
elif realdata == "syn" and evol == "static":
for filename in myutil.listfiles(graphfolder):
filepath = "{0}/{1}".format(graphfolder,filename)
if filename.split("-")[0:2] != [prob,smodel]:
continue
if inter == None and (filename.find("weibull") != -1 or filename.find("rayleigh") != -1 or filename.find("powerlaw") != -1):
continue
innum = int(filename.split("_")[-1].replace(".edgelist",""))
if innum > 1:
continue
if smodel == "si":
if filename.find("sprob_1.0_1.0") != -1 and (filename.find("expo_0.2_1.0") != -1 or filename.find("expo_0.5_0.5") != -1 or filename.find("expo_0.1_0.5") != -1):
pass
else:
continue
elif smodel == "sir":
if filename.find("sprob_1.0_1.0") != -1 and filename.find("s2i_expo_0.2_1.0") != -1 and filename.find("i2r_expo_0.8_1.0") != -1:
pass
else:
continue
elif smodel == "seir":
if filename.find("sprob_1.0_1.0") != -1 and filename.find("s2e_expo_0.5_2.0") != -1 and filename.find("e2i_expo_0.5_2.0") != -1 and filename.find("i2r_expo_0.5_2.0") != -1:
pass
else:
continue
filetracefolder = "{0}/{1}/{2}/{3}".format(tracefolder,filename,samplenoisestr,startcount)
assert os.path.exists(filetracefolder)
G = InputOutput.readGraphAndParams(filepath)
minicount = int(round(G.number_of_nodes() * fraccons[Trace.INFECTED]["min"]))
maxicount = int(round(G.number_of_nodes() * fraccons[Trace.INFECTED]["max"]))
sentfraccons = {Trace.INFECTED: {"min":minicount, "max":maxicount}}
while True:
filem = Trace.getRandomTraceFile(filetracefolder,sentfraccons)
if filem not in ["0.plain","1.plain"]:
continue
tracefile = "{0}/{1}".format(filetracefolder,filem)
break
#tracefile = "{0}/{1}".format(filetracefolder,Trace.getRandomTraceFile(filetracefolder,sentfraccons))
path2info[filepath] = (filename,tracefile)
return path2info
def runOtherAlgos(algofields,tracefields,otherfields):
"""runs other algos
Args:
algofields:
tracefields:
otherfields:
Returns:
"""
return
algo,algoparams,runfolder = algofields
noise,noisetype,timefrac,timecount,realdata,tracefile,tracestr,newtracefolder,tracefolder = tracefields
complete,completeupto,path,graphfolder,configfolder,resultfolder = otherfields
algostr = returnAlgoStr(algo,algoparams)
parameterstr = "frac{0}-count{1}".format(timefrac,timecount)
extensionstr = "-".join(path.replace("./","").split("/")[1:])
tresultfolder = "{0}/{1}/{2}/{3}/{4}/{5}".format(resultfolder,extensionstr,tracestr,parameterstr,infermode,algostr)
if not os.path.exists(tresultfolder):
os.makedirs(tresultfolder)
indices = set([-1] + [int(myfile.replace(".hist","")) for myfile in myutil.listfiles(tresultfolder)])
if complete and len(indices) >= completeupto + 1:
return
resultfile = "{0}/{1}.hist".format(tresultfolder,max(indices)+1)
trunfolder = "{0}/{1}/{2}/{3}/{4}/{5}/{6}".format(runfolder,extensionstr,tracestr,parameterstr,infermode,algostr,max(indices)+1)
if not os.path.exists(trunfolder):
os.makedirs(trunfolder)
tnewtracefolder = "{0}/{1}/{2}/{3}/{4}/{5}/{6}".format(newtracefolder,extensionstr,tracestr,parameterstr,infermode,algostr,max(indices)+1)
if not os.path.exists(tnewtracefolder):
os.makedirs(tnewtracefolder)
snapshotfile = "{0}/infer.snapshot".format(tnewtracefolder)
G = InputOutput.readGraphAndParams(path)
assert timecount == 1
if infermode == "History":
fracpoints = [(timefrac*index)/timecount for index in xrange(1,timecount+1)]
elif infermode == "Spreader":
fracpoints = [timefrac]
for index in xrange(1,timecount):
interval = (1.0-timefrac)/(timecount-1)
fracpoints.append(timefrac+(index*interval))
assert max(fracpoints) <= 1.00000001
trace = InputOutput.readPlainTrace(tracefile,prob)
maxtime = Trace.getMaxTraceTime(trace)
obstimes = sorted(list(set([int(round(frac*maxtime)) for frac in fracpoints])))
if 0 in obstimes:
obstimes.remove(0)
if len(obstimes) == 0:
return
curstates = [Trace.trace2Snapshot(trace,obstime,smodel,G) for obstime in obstimes]
InputOutput.writeSnapshot(curstates,infermode,inter,smodel,obstimes,snapshotfile)
configpath = "{0}/config_{1}_{2}_{3}_{4}_{5}-{6}.config".format(configfolder,extensionstr,infermode,algostr,parameterstr,tracestr,max(indices)+1)
genOtherConfigFile(configpath,path,algo,algoparams,snapshotfile,resultfile,smodel,trunfolder,inter)
code = "python HistOtherAlgos.py {0}".format(configpath)
#os.system(code)
#return
if random.random() <= 0.5:
pool = "pool2"
else:
pool = "pool2"
pbsfilename = "{0}-{1}-{2}-{3}-{4}-{5}-{6}-{7}-{8}-{9}-{10}.pbs".format(realdata,evol,smodel,prob,inter,extensionstr,infermode,algostr,parameterstr,tracestr,max(indices)+1)
Pbs.submitPbs(code,pbsfolder,pbsfilename,pool)
def runMyAlgos(algofields,tracefields,otherfields):
"""runs my algos
Args:
algofields:
tracefields:
otherfields:
Returns:
"""
algo,algoparams,infermode,inter,runfolder = algofields
noise,noisetype,timefrac,timecount,prob,smodel,evol,realdata,tracefile,tracestr,newtracefolder,tracefolder = tracefields
complete,completeupto,path,printscore,graphfolder,configfolder,resultfolder = otherfields
algostr = returnAlgoStr(algo,algoparams)
parameterstr = "frac{0}-count{1}".format(timefrac,timecount)
extensionstr = "-".join(path.replace("./","").split("/")[1:])
tresultfolder = "{0}/{1}/{2}/{3}/{4}/{5}".format(resultfolder,extensionstr,tracestr,parameterstr,infermode,algostr)
if not os.path.exists(tresultfolder):
os.makedirs(tresultfolder)
indices = set([-1] + [int(myfile.replace(".hist","")) for myfile in myutil.listfiles(tresultfolder)])
if complete and len(indices) >= completeupto + 1:
return
resultfile = "{0}/{1}.hist".format(tresultfolder,max(indices)+1)
trunfolder = "{0}/{1}/{2}/{3}/{4}/{5}/{6}".format(runfolder,extensionstr,tracestr,parameterstr,infermode,algostr,max(indices)+1)
if not os.path.exists(trunfolder):
os.makedirs(trunfolder)
tnewtracefolder = "{0}/{1}/{2}/{3}/{4}/{5}/{6}".format(newtracefolder,extensionstr,tracestr,parameterstr,infermode,algostr,max(indices)+1)
if not os.path.exists(tnewtracefolder):
os.makedirs(tnewtracefolder)
snapshotfile = "{0}/infer.snapshot".format(tnewtracefolder)
G = InputOutput.readGraphAndParams(path)
if infermode == "History":
fracpoints = [(timefrac*index)/timecount for index in xrange(1,timecount+1)]
elif infermode == "Spreader":
fracpoints = [timefrac]
for index in xrange(1,timecount):
interval = (1.0-timefrac)/(timecount-1)
fracpoints.append(timefrac+(index*interval))
assert max(fracpoints) <= 1.00000001
trace = InputOutput.readPlainTrace(tracefile,prob)
maxtime = Trace.getMaxTraceTime(trace)
obstimes = sorted(list(set([int(round(frac*maxtime)) for frac in fracpoints])))
if 0 in obstimes:
obstimes.remove(0)
if len(obstimes) == 0:
return
print "observed times"
print obstimes
#if max(obstimes) < 3:
# return
curstates = [Trace.trace2Snapshot(trace,obstime,smodel,G) for obstime in obstimes]
InputOutput.writeSnapshot(curstates,infermode,inter,smodel,obstimes,snapshotfile)
vararr = {"graphfile": path, "dist": prob, "runfolder":trunfolder}
if inter == None:
vararr["scoretimes"] = " ".join([str(time) for time in obstimes])
for item in ["snapshotfile","noise","noisetype","smodel","algo","inter","infermode","evol","printscore","resultfile","tracefile"]:
exec('vararr["{0}"] = {0}'.format(item)) in locals(),globals()
for param in algoparams.keys():
vararr[param] = algoparams[param]
configpath = "{0}/config_{1}_{2}_{3}_{4}_{5}-{6}.config".format(configfolder,extensionstr,infermode,algostr,parameterstr,tracestr,max(indices)+1)
genHistoryConfigFile(configpath,vararr)
code = "python HistoryInfer.py {0}".format(configpath)
os.system(code)
return
if random.random() <= 0.5:
pool = "pool2"
else:
pool = "pool2"
pbsfilename = "{0}-{1}-{2}-{3}-{4}-{5}-{6}-{7}-{8}-{9}-{10}.pbs".format(realdata,evol,smodel,prob,inter,extensionstr,infermode,algostr,parameterstr,tracestr,max(indices)+1)
Pbs.submitPbs(code,pbsfolder,pbsfilename,pool)
def assignParams():
"""assign params
Args:
Returns:
paramdict:
"""
paramdict = {}
paramdict["newtracepref"] = "tracesnapshots"
paramdict["tracepref"] = "traces"
paramdict["configpref"] = "histconfig"
paramdict["runpref"] = "run"
paramdict["resultpref"] = "result"
paramdict["resultfolderpref"] = "."
paramdict["runfolderpref"] = "."
paramdict["tracefolderpref"] = "."
paramdict["newtracefolderpref"] = "."
paramdict["graphfolderpref"] = "."
paramdict["configfolderpref"] = "."
paramdict["pbsfolder"] = "pbsfolder"
paramdict["realdata"] = "syn"
paramdict["evol"] = "static"
paramdict["infermode"] = "History" #"History" #"Spreader"
paramdict["smodel"] = "si" #"seir","sir","sis", "samd", "mamd"
paramdict["prob"] = "dis" #"dis"
paramdict["inter"] = None #"bound"
paramdict["startcounts"] = [1,2,3,5,7,10] #[1,2,3,4,5,6,7,8,9,10] #[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
paramdict["maxtimefracs"] = [0.15,0.26,0.34,0.4,0.51,0.6,0.76,0.9]
paramdict["mintimefracs"] = [0.1,0.15,0.2,0.26,0.34,0.4,0.45,0.51,0.55,0.6,0.65,0.7,0.76,0.8,0.85,0.9,0.95]
paramdict["timecounts"] = [3,5] #[1,2,3,4,5,6,7] #[1,2,3,4,5,10] #[1,2,3,4,5] #[1,2,3,4,5,10] #[1,2,3,4,5] #[1,2,3,4,5,6,7,10]
paramdict["noises"] = [0.0] #[0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
paramdict["noisetype"] = "StateChange" #Normal noise
paramdict["filesamplecount"] = 10 #for syndPlainTrace(tracefile,prob)
paramdict["complete"] = True
paramdict["completeupto"] = 2
paramdict["fraccons"] = {Trace.INFECTED: {"min":0.00001, "max":1.01}}
paramdict["printscore"] = None #"KenTauCorrelation" #"Graph,rand" #"KenTauCorrelation" #"KenTauCorrelation" #"Hausdorff"
return paramdict
if __name__ == "__main__":
paramdict = assignParams()
for var in paramdict.keys():
if type(paramdict[var]) == type(""):
exec('{0}="{1}"'.format(var,paramdict[var]))
else:
exec('{0}={1}'.format(var,paramdict[var]))
graphtraceinput = [graphfolderpref,tracepref,tracefolderpref,newtracepref,newtracefolderpref]
runresultinput = [runpref,runfolderpref,resultpref,resultfolderpref]
configinput = [configpref,configfolderpref,pbsfolder]
sideinput = [smodel,prob,evol,realdata,inter]
(graphfolder,tracefolder,newtracefolder,configfolder,resultfolder,runfolder) = genMainFolders(graphtraceinput,runresultinput,configinput,sideinput)
mainparamlist = list(itertools.product(startcounts,noises))
random.shuffle(mainparamlist)
for startcount,noise in mainparamlist:
samplenoisestr = "{0}-{1}-{2}".format(noisetype,noise,0)
graphinput = [graphfolder,realdata,evol,prob,smodel,filesamplecount,inter]
traceinput = [tracefolder,fraccons,samplenoisestr,startcount]
path2info = returnPathInfo(graphinput,traceinput)
algoblocks = getAlgoBlocks(prob,inter,infermode,smodel)
if infermode == "Spreader":
paramlist = list(itertools.product(algoblocks,mintimefracs,timecounts))
elif infermode == "History":
paramlist = list(itertools.product(algoblocks,maxtimefracs,timecounts))
for path in path2info.keys():
Gname,tracefile = path2info[path]
tracestr = "-".join(tracefile.split("/")[-3:])
for algoblock,timefrac,timecount in paramlist:
algo,algoparams = algoblock
if algo in ["NetSleuth","RumorCentrality","KEffectors"]:
assert infermode == "Spreader" and prob == "dis"
if timecount > 1:
continue
algofields = [algo,algoparams,runfolder]
tracefields = [noise,noisetype,timefrac,timecount,realdata,tracefile,tracestr,newtracefolder,tracefolder]
otherfields = [complete,completeupto,path,graphfolder,configfolder,resultfolder]
runOtherAlgos(algofields,tracefields,otherfields)
else:
algofields = [algo,algoparams,infermode,inter,runfolder]
tracefields = [noise,noisetype,timefrac,timecount,prob,smodel,evol,realdata,tracefile,tracestr,newtracefolder,tracefolder]
otherfields = [complete,completeupto,path,printscore,graphfolder,configfolder,resultfolder]
runMyAlgos(algofields,tracefields,otherfields)
| [
"[email protected]"
] | |
c43f3fc077de3a17d699c5cb4c8416a0f23c88d5 | 315450354c6ddeda9269ffa4c96750783963d629 | /CMSSW_7_0_4/src/TotemDQMLite/GUI/scripts/.svn/text-base/reco_template_T1_cfg.py.svn-base | f60ffa0be9c54729e058dea21046f4747c66c5f4 | [] | no_license | elizamelo/CMSTOTEMSim | e5928d49edb32cbfeae0aedfcf7bd3131211627e | b415e0ff0dad101be5e5de1def59c5894d7ca3e8 | refs/heads/master | 2021-05-01T01:31:38.139992 | 2017-09-12T17:07:12 | 2017-09-12T17:07:12 | 76,041,270 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,233 | import FWCore.ParameterSet.Config as cms
process = cms.Process("recoT1")
# Specify the maximum events to simulate
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100)
)
# Configure if you want to detail or simple log information.
# LoggerMax -- detail log info output including: errors.log, warnings.log, infos.log, debugs.log
# LoggerMin -- simple log info output to the standard output (e.g. screen)
process.load("Configuration.TotemCommon.LoggerMin_cfi")
# RawDataSource
process.load('TotemRawData.Readers.RawDataSource_cfi')
#process.source.fileNames.append('/project/gruppo1/totem/IP5_2015/Data/run_EVB-wn10_9261.000.vmeb')
process.source.fileNames.append('$input_file')
# Raw to digi conversion
process.load('TotemCondFormats.DAQInformation.DAQMappingSourceXML_cfi')
process.DAQMappingSourceXML.mappingFileNames.append('TotemCondFormats/DAQInformation/data/t1_all_run2.xml')
process.DAQMappingSourceXML.maskFileNames.append('TotemCondFormats/DAQInformation/test/T1DeadChannelsList_9255_onlyStrips.xml')
# Random number generator service
process.load("Configuration.TotemCommon.RandomNumbers_cfi")
################## STEP 1process.Raw2DigiProducer*process.TriggerBits
process.load('TotemRawData.RawToDigi.Raw2DigiProducer_cfi')
process.load("RecoTotemT1T2.T1MakeCluster.T1MakeCluster_cfi")
process.t1cluster.T1DigiVfatCollectionLabel = cms.InputTag("Raw2DigiProducer", "t1DataOutput")
process.t1cluster.ActivateDeadChannels = cms.bool(True)
process.load("RecoTotemT1T2.T1RecHit.T1RecHit_cfi")
process.t1rechit.T1DigiWireCollectionLabel = cms.InputTag("Raw2DigiProducer", "t1DataOutput")
process.load("RecoTotemT1T2.T1RoadProducer.T1RoadProducer_cfi")
process.t1roads.Alignment = cms.bool(True)
process.load("RecoTotemT1T2.T1TrackProducer2.T1TrackProducer2_cfi")
# Configure the output module (save the result in a file)
process.output = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('$output_file'),
outputCommands = cms.untracked.vstring('keep *')
)
process.path = cms.Path(
process.Raw2DigiProducer
*process.t1cluster
*process.t1rechit
# *process.t1roads
# *process.t1tracks2
)
process.outpath = cms.EndPath(process.output)
| [
"[email protected]"
] | ||
b4e990d93bfd4a2916201a75c53557884579150a | f62fd455e593a7ad203a5c268e23129473d968b6 | /python-watcherclient-1.0.0/watcherclient/osc/plugin.py | 5885de3b4873319fa9e70d64baded41315c15e6c | [
"Apache-2.0"
] | permissive | MinbinGong/OpenStack-Ocata | 5d17bcd47a46d48ff9e71e2055f667836174242f | 8b7650128cfd2fdf5d6c8bc4613ac2e396fb2fb3 | refs/heads/master | 2021-06-23T05:24:37.799927 | 2017-08-14T04:33:05 | 2017-08-14T04:33:05 | 99,709,985 | 0 | 2 | null | 2020-07-22T22:06:22 | 2017-08-08T15:48:44 | Python | UTF-8 | Python | false | false | 1,854 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from osc_lib import utils
LOG = logging.getLogger(__name__)
DEFAULT_API_VERSION = '1'
API_VERSION_OPTION = 'os_infra_optim_api_version'
API_NAME = 'infra-optim'
API_VERSIONS = {
'1': 'watcherclient.v1.client.Client',
}
def make_client(instance):
"""Returns an infra-optim service client."""
infraoptim_client_class = utils.get_client_class(
API_NAME,
instance._api_version[API_NAME],
API_VERSIONS)
LOG.debug('Instantiating infraoptim client: %s', infraoptim_client_class)
client = infraoptim_client_class(
os_watcher_api_version=instance._api_version[API_NAME],
session=instance.session,
region_name=instance._region_name,
)
return client
def build_option_parser(parser):
"""Hook to add global options."""
parser.add_argument('--os-infra-optim-api-version',
metavar='<infra-optim-api-version>',
default=utils.env(
'OS_INFRA_OPTIM_API_VERSION',
default=DEFAULT_API_VERSION),
help=('Watcher API version, default=' +
DEFAULT_API_VERSION +
' (Env: OS_INFRA_OPTIM_API_VERSION)'))
return parser
| [
"[email protected]"
] | |
b13aec275da4151009697accac9711e4949a705d | 76f549c062600a0c713315a9a7361ebb111178f8 | /Taller/Preguntas/PrimeraPregunta.py | 329f447c04d5a3317beecceb811b7df4bb35473d | [] | no_license | jorszs/AI | f612f26537fc3563dd2837c8f67801f091f7e3a0 | 05a839e6e115e7c6c9378e84d5ac7f50afe2870d | refs/heads/master | 2020-03-11T17:57:32.555978 | 2018-06-07T01:48:36 | 2018-06-07T01:48:36 | 130,162,558 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py |
import urllib3
def getNameNodes():
i = 0
res = {}
archivo = open('links.csv', 'rt')
for linea in archivo:
k = linea.replace(' ', '')
k = k.replace('\n', '')
if i > 0:
j = k.split('.')
if j[0] in res:
res[j[0]].append(k)
else:
res[j[0]] = [k]
i+=1
archivo.close()
return res
def getDataWeb(url):
http = urllib3.PoolManager()
r = http.request('GET', url)
r.status
return r.data
def makeArchivos(archivos):
base = 'elib.zib.de/pub/mp-testdata/tsp/tsplib/tsp/'
for k,v in archivos.items():
for e in v:
data = str(getDataWeb(base + e))
a =data.replace('\\n', ',')
#b =a.replace('\\', '')
j = a.split(',')
if len(e.split('.')) > 2:
#captura el optimo
f = open ('archivos/'+ k + '.opt'+'.txt','w')
for elem in j:
f.write(elem + '\n')
f.close()
else:
f = open ('archivos/'+ k +'.txt','w')
for elem in j:
f.write(elem + '\n')
f.close()
if __name__ == "__main__":
archivos = getNameNodes()
#print(archivos)
makeArchivos(archivos)
| [
"[email protected]"
] | |
f514f0c972565ebfc8022902b1abcc0fa242ca14 | 9d07335de5a17453bf8ae290d70993d7b20dddcd | /.history/dice_20210223203524.py | a9e39059915547dad83aa8fbce4c3cc0fedfd011 | [] | no_license | wh-debug/Matplotlib | 8d12291cd4135b3b42c185e6700f22c627ddb046 | b4f5bf63d977620f799d953c67b262c75344a1cb | refs/heads/master | 2023-03-14T10:09:33.602492 | 2021-02-23T13:51:21 | 2021-02-23T13:51:21 | 340,374,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | '''
Author: your name
Date: 2021-02-23 20:07:30
LastEditTime: 2021-02-23 20:35:24
LastEditors: Please set LastEditors
Description: In User Settings Edit
FilePath: \Matplotlib\dice.py
'''
from make_plotly import Die
import matplotlib.pyplot as plt
x_values = [1, 2, 3, 4, 5, 6]
y_values = []
die = Die()
#todo 创建一个空列表结果存储在空列表中
results = []
for roll_num in range(1000):
result = die.roll()
results.append(result)
frequencies = []
for value in range(1, die.num_sides+1):
frequency = results.count(value) #todo value是数字几,就会统计列表中相应数字个数
frequencies.append(frequency)
print(frequencies)
| [
"[email protected]"
] | |
3b9283807f9a633e9ca03ea36b3db90607bb9388 | 5063587053951fc1dc558c657d06e0b99187baf5 | /electrumx/server/controller.py | 6c449b6927f07497c4a7bd4b643a57b2177d5729 | [
"MIT"
] | permissive | Japangeek/electrumx | 04cbd7f793afe9fa2dff8adad8e7900f4a80b279 | a4ea34c6fb9bc887afb19779bde107d97006d8b7 | refs/heads/master | 2020-05-15T11:46:52.339001 | 2019-04-18T13:29:37 | 2019-04-18T13:29:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,092 | py | # Copyright (c) 2016-2018, Neil Booth
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
from asyncio import Event
from aiorpcx import _version as aiorpcx_version, TaskGroup
import electrumx
from electrumx.lib.server_base import ServerBase
from electrumx.lib.util import version_string
from electrumx.server.db import DB
from electrumx.server.mempool import MemPool, MemPoolAPI
from electrumx.server.session import SessionManager
class Notifications(object):
# hashX notifications come from two sources: new blocks and
# mempool refreshes.
#
# A user with a pending transaction is notified after the block it
# gets in is processed. Block processing can take an extended
# time, and the prefetcher might poll the daemon after the mempool
# code in any case. In such cases the transaction will not be in
# the mempool after the mempool refresh. We want to avoid
# notifying clients twice - for the mempool refresh and when the
# block is done. This object handles that logic by deferring
# notifications appropriately.
def __init__(self):
self._touched_mp = {}
self._touched_bp = {}
self._highest_block = -1
async def _maybe_notify(self):
tmp, tbp = self._touched_mp, self._touched_bp
common = set(tmp).intersection(tbp)
if common:
height = max(common)
elif tmp and max(tmp) == self._highest_block:
height = self._highest_block
else:
# Either we are processing a block and waiting for it to
# come in, or we have not yet had a mempool update for the
# new block height
return
touched = tmp.pop(height)
for old in [h for h in tmp if h <= height]:
del tmp[old]
for old in [h for h in tbp if h <= height]:
touched.update(tbp.pop(old))
await self.notify(height, touched)
async def notify(self, height, touched):
pass
async def start(self, height, notify_func):
self._highest_block = height
self.notify = notify_func
await self.notify(height, set())
async def on_mempool(self, touched, height):
self._touched_mp[height] = touched
await self._maybe_notify()
async def on_block(self, touched, height):
self._touched_bp[height] = touched
self._highest_block = height
await self._maybe_notify()
class Controller(ServerBase):
'''Manages server initialisation and stutdown.
Servers are started once the mempool is synced after the block
processor first catches up with the daemon.
'''
async def serve(self, shutdown_event):
'''Start the RPC server and wait for the mempool to synchronize. Then
start serving external clients.
'''
if not (0, 15, 0) <= aiorpcx_version < (0, 16):
raise RuntimeError('aiorpcX version 0.15.x is required')
env = self.env
min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings()
self.logger.info(f'software version: {electrumx.version}')
self.logger.info(f'aiorpcX version: {version_string(aiorpcx_version)}')
self.logger.info(f'supported protocol versions: {min_str}-{max_str}')
self.logger.info(f'event loop policy: {env.loop_policy}')
self.logger.info(f'reorg limit is {env.reorg_limit:,d} blocks')
notifications = Notifications()
Daemon = env.coin.DAEMON
BlockProcessor = env.coin.BLOCK_PROCESSOR
daemon = Daemon(env.coin, env.daemon_url)
db = DB(env)
bp = BlockProcessor(env, db, daemon, notifications)
# Set notifications up to implement the MemPoolAPI
def get_db_height():
return db.db_height
notifications.height = daemon.height
notifications.db_height = get_db_height
notifications.cached_height = daemon.cached_height
notifications.mempool_hashes = daemon.mempool_hashes
notifications.raw_transactions = daemon.getrawtransactions
notifications.lookup_utxos = db.lookup_utxos
MemPoolAPI.register(Notifications)
mempool = MemPool(env.coin, notifications)
session_mgr = SessionManager(env, db, bp, daemon, mempool,
shutdown_event)
# Test daemon authentication, and also ensure it has a cached
# height. Do this before entering the task group.
await daemon.height()
caught_up_event = Event()
mempool_event = Event()
async def wait_for_catchup():
await caught_up_event.wait()
await group.spawn(db.populate_header_merkle_cache())
await group.spawn(mempool.keep_synchronized(mempool_event))
async with TaskGroup() as group:
await group.spawn(session_mgr.serve(notifications, mempool_event))
await group.spawn(bp.fetch_and_process_blocks(caught_up_event))
await group.spawn(wait_for_catchup())
| [
"[email protected]"
] | |
6c201191527104f2d328b58b2ba84caec9c846d3 | a5ea93395d8d762caefd129648b2e954754afb00 | /examples/6_p_scale_test_Yokoo_Pt.py | fc618a74a4e14089082439c5476fe6df9f86e0e2 | [
"Apache-2.0"
] | permissive | SHDShim/pytheos | 4295e233dd089d0c9c66218a127d3f099f1d36df | bb86e0ff345efcffb04f08182c09b06b3c54930e | refs/heads/master | 2023-03-16T23:23:56.840071 | 2023-03-11T03:13:23 | 2023-03-11T03:13:23 | 93,273,486 | 7 | 6 | Apache-2.0 | 2019-11-18T13:11:46 | 2017-06-03T20:54:46 | Python | UTF-8 | Python | false | false | 1,369 | py |
# coding: utf-8
# In[1]:
get_ipython().run_line_magic('cat', '0Source_Citation.txt')
# In[2]:
get_ipython().run_line_magic('matplotlib', 'inline')
# %matplotlib notebook # for interactive
# For high dpi displays.
# In[3]:
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'")
# # 0. General note
# This example compares pressure calculated from `pytheos` and original publication for the platinum scale by Yokoo 2009.
# # 1. Global setup
# In[4]:
import matplotlib.pyplot as plt
import numpy as np
from uncertainties import unumpy as unp
import pytheos as eos
# # 3. Compare
# In[5]:
eta = np.linspace(1., 0.60, 21)
print(eta)
# In[6]:
yokoo_pt = eos.platinum.Yokoo2009()
# In[7]:
yokoo_pt.print_equations()
# In[8]:
yokoo_pt.print_equations()
# In[9]:
yokoo_pt.print_parameters()
# In[10]:
v0 = 60.37930856339099
# In[11]:
yokoo_pt.three_r
# In[12]:
v = v0 * (eta)
temp = 3000.
# In[13]:
p = yokoo_pt.cal_p(v, temp * np.ones_like(v))
# <img src='./tables/Yokoo_Pt.png'>
# In[14]:
print('for T = ', temp)
for eta_i, p_i in zip(eta, p):
print("{0: .3f} {1: .2f}".format(eta_i, p_i))
# It is alarming that even 300 K isotherm does not match with table value. The difference is 1%.
# In[15]:
v = yokoo_pt.cal_v(p, temp * np.ones_like(p), min_strain=0.6)
print(1.-(v/v0))
| [
"[email protected]"
] | |
8aad654f743a97284e6607a741abc184b41bf200 | 25ebc03b92df764ff0a6c70c14c2848a49fe1b0b | /daily/20181014/example_pycomment/pycomment.py | 4604d5e1df1248b710f77c3ea0471b933a54d818 | [] | no_license | podhmo/individual-sandbox | 18db414fafd061568d0d5e993b8f8069867dfcfb | cafee43b4cf51a321f4e2c3f9949ac53eece4b15 | refs/heads/master | 2023-07-23T07:06:57.944539 | 2023-07-09T11:45:53 | 2023-07-09T11:45:53 | 61,940,197 | 6 | 0 | null | 2022-10-19T05:01:17 | 2016-06-25T11:27:04 | Python | UTF-8 | Python | false | false | 6,165 | py | import sys
import contextlib
from io import StringIO
from lib2to3 import pytree
from lib2to3 import pygram
from lib2to3.pgen2 import driver
from lib2to3.pgen2 import token
from lib2to3.pgen2.parse import ParseError
from lib2to3.fixer_util import Assign, Name, Newline
# utf8 's PUA(https://en.wikipedia.org/wiki/Private_Use_Areas)
SEP = "\U000F0000"
SEP_MARKER = "ZZ{}ZZ".format(SEP)
COMMENT_MARKER = "# =>"
STDOUT_HEADER_MARKER = "# -- stdout --------------------"
default_driver = driver.Driver(pygram.python_grammar_no_print_statement, convert=pytree.convert)
def parse_string(code, parser_driver=default_driver, *, debug=True):
return parser_driver.parse_string(code, debug=debug)
def parse_file(filename, parser_driver=default_driver, *, debug=True):
try:
return parser_driver.parse_file(filename, debug=debug)
except ParseError as e:
if "bad input:" not in repr(e): # work around
raise
with open(filename) as rf:
body = rf.read()
return parse_string(body + "\n", parser_driver=parser_driver, debug=debug)
def node_name(node):
# Nodes with values < 256 are tokens. Values >= 256 are grammar symbols.
if node.type < 256:
return token.tok_name[node.type]
else:
return pygram.python_grammar.number2symbol[node.type]
type_repr = pytree.type_repr
class PyTreeVisitor:
def visit(self, node):
method = 'visit_{0}'.format(node_name(node))
if hasattr(self, method):
# Found a specific visitor for this node
if getattr(self, method)(node):
return
elif hasattr(node, "value"): # Leaf
self.default_leaf_visit(node)
else:
self.default_node_visit(node)
def default_node_visit(self, node):
for child in node.children:
self.visit(child)
def default_leaf_visit(self, leaf):
pass
def transform_string(source: str):
t = parse_string(source)
return transform(t)
def transform_file(fname: str):
with open(fname) as rf:
return transform_string(rf.read())
def transform(node):
t = Transformer()
t.transform(node)
return node
class Transformer(PyTreeVisitor):
marker = COMMENT_MARKER
def visit_NEWLINE(self, node):
if node.prefix.lstrip().startswith(self.marker):
# MEMO: <expr> -> _ = <expr>
target = node
while True:
parent = target.parent
if parent is None:
return
if type_repr(target.parent.type) == "simple_stmt":
break
target = parent
eol = target # target is Leaf("\n]")
target = eol.prev_sibling
cloned = target.clone()
cloned.parent = None
assigned = Assign(Name("_"), cloned)
assigned.prefix = target.prefix
target.replace(assigned)
# MEMO: adding print(SEP_MARKER, _, SEP_MARKER, sep="\n")
this_stmt = eol.parent
print_stmt = this_stmt.clone()
print_stmt.children = []
print_stmt.append_child(
Name(
"print({ms!r}, repr(_), {me!r}, sep='')".format(
ms="{}{}:".format(SEP_MARKER, node.get_lineno()), me=SEP_MARKER
)
)
)
print_stmt.prefix = assigned.prefix
# xxx: for first line
if not print_stmt.prefix:
prev_line = assigned.parent.prev_sibling
if prev_line.type == token.INDENT:
print_stmt.prefix = prev_line.value
print_stmt.append_child(Newline())
for i, stmt in enumerate(this_stmt.parent.children):
if stmt == this_stmt:
this_stmt.parent.insert_child(i + 1, print_stmt)
break
transform = PyTreeVisitor.visit
def run(sourcefile, out=sys.stdout):
o = StringIO()
with contextlib.redirect_stdout(o):
exec(str(transform_file(sourcefile)))
result_map = {}
stdout_outputs = []
for line in o.getvalue().splitlines():
if line.startswith(SEP_MARKER) and line.endswith(SEP_MARKER):
line = line.strip(SEP_MARKER)
lineno, line = line.split(":", 2)
result_map[lineno] = line
else:
stdout_outputs.append(line)
i = 0
with open(sourcefile) as rf:
import re
rx = re.compile(COMMENT_MARKER + ".*$")
for lineno, line in enumerate(rf, 1):
if line.rstrip() == STDOUT_HEADER_MARKER:
break
m = rx.search(line)
k = str(lineno)
if m is None or k not in result_map:
print(line, end="", file=out)
else:
print(line[:m.start()] + COMMENT_MARKER, result_map[k], file=out)
i += 1
if stdout_outputs:
print(STDOUT_HEADER_MARKER, file=out)
for line in stdout_outputs:
print("# >>", line, file=out)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("sourcefile")
parser.add_argument("--inplace", action="store_true")
parser.add_argument("--show-only", action="store_true")
args = parser.parse_args()
if args.show_only:
print(str(transform_file(args.sourcefile)))
from prestring.python.parse import dump_tree
dump_tree(transform_file(args.sourcefile))
elif not args.inplace:
run(args.sourcefile)
else:
import tempfile
import os
import shutil
name = None
try:
with tempfile.NamedTemporaryFile("w", delete=False) as wf:
name = wf.name
run(args.sourcefile, out=wf)
print("replace: {} -> {}".format(name, args.sourcefile), file=sys.stderr)
shutil.move(name, args.sourcefile)
except Exception:
if os.path.exists(name):
os.unlink(name)
raise
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
221ab1ad77324845627959d14968b0eed0e8e187 | f66016b962e105898ea14982e229bd44f66f32a2 | /settings.py | c142f11377dc1d70c39b3451e46f9a4f2ab30a36 | [
"MIT"
] | permissive | DerThorsten/pc | d3ceace388dd3460c0133e97b7fba0fde8d1e811 | 41d7474ceff8de7b95be5d4fbc42a40e89799e34 | refs/heads/master | 2021-01-12T10:41:47.797694 | 2016-11-10T21:59:35 | 2016-11-10T21:59:35 | 72,621,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,216 | py | from collections import OrderedDict
import h5py
from features import registerdFeatureOperators
class Settings(object):
def __init__(self, settingsDict, predictionSettingsDict=None):
self.settingsDict = settingsDict
self.featureBlockShape = tuple(self.settingsDict["setup"]["blockShape"])
if predictionSettingsDict is not None:
self.featureBlockShape = tuple(predictionSettingsDict['setup']["blockShape"])
self.numberOfClasses = self.settingsDict["setup"]["nClasses"]
self.predictionSettingsDict = predictionSettingsDict
print(self.settingsDict['setup'])
self.useBlockF = self.settingsDict['setup'].get("useBlock", None)
#self.useBlockF = self.settingsDict['setup']['useBlock']
assert self.useBlockF is not None
def useTrainingBlock(self, blockIndex, blockBegin, blockEnd):
if self.useBlockF is not None:
return self.useBlockF(blockIndex=blockIndex, blockBegin=blockBegin, blockEnd=blockBegin)
else:
return True
def trainingInstancesNames(self):
setup = self.settingsDict["setup"]
return setup['trainingDataNames']
def predictionInstancesNames(self):
return self.predictionSettingsDict['predictionInput'].keys()
def trainignInstancesDataDicts(self):
setup = self.settingsDict["setup"]
trainingDataNames = setup['trainingDataNames']
trainingInstancesSettings = [ ]
for trainingDataName in trainingDataNames:
s = self.settingsDict["trainingData"][trainingDataName]
s['name'] = trainingDataName
trainingInstancesSettings.append(s)
return trainingInstancesSettings
def predictionInstancesDataDicts(self):
assert self.predictionSettingsDict is not None
d = self.predictionSettingsDict['predictionInput']
dicts = []
for key in d.keys():
ddict = d[key]
ddict['name'] = key
dicts.append(ddict)
return dicts
def featureSetttingsList(self):
return self.settingsDict["setup"]["featureSettings"]
def getLabelsH5Path(self, instanceName):
trainingInstanceDataDict = self.settingsDict["trainingData"][instanceName]
f,d = trainingInstanceDataDict['labels']
return f,d
def getDataH5Dsets(self, instanceDataDict, openH5Files):
dataH5Dsets = OrderedDict()
for featureSettings in self.featureSetttingsList():
inputFileName = featureSettings['name']
print(" ","inputFile:",inputFileName)
# get the h5filename
dataDict = instanceDataDict['data']
f,d = dataDict[inputFileName]['file']
h5File = h5py.File(f,'r')
dset = h5File[d]
# dsets
dataH5Dsets[inputFileName] = dset
# remeber all files opend
openH5Files.append(h5File)
return dataH5Dsets, openH5Files
def getFeatureOperators(self):
dataH5Dsets = OrderedDict()
outerList = []
maxHaloList = []
#print("fs0",self.featureSetttingsList()[0])
#print("fs1",self.featureSetttingsList()[0])
for featureSettings in self.featureSetttingsList():
inputFileName = featureSettings['name']
#print("features for",inputFileName)
featureOperatorsSettingsList = featureSettings["features"]
innerList = []
maxHalo = (0,0,0)
for featureOperatorSettings in featureOperatorsSettingsList:
#print(featureOperatorSettings)
fOpName = featureOperatorSettings['type']
fOpKwargs = featureOperatorSettings['kwargs']
fOpCls = registerdFeatureOperators[fOpName]
fOp = fOpCls(**fOpKwargs)
halo = fOp.halo()
maxHalo = map(lambda aa,bb: max(aa,bb), halo, maxHalo)
innerList.append(fOp)
outerList.append(innerList)
maxHaloList.append(maxHalo)
return outerList,maxHaloList
| [
"[email protected]"
] | |
c31a0e73d6e975d7fadf3b697bac94aa6dd6b066 | 3c06dc187183b5f78dbe24d38f7a3556b7cc9975 | /Python/LC51_NQueens.py | 24f5ce90b79b47c78b3abc2259af3dc85e7f029c | [] | no_license | wondershow/CodingTraining | 071812ffd34850ce0417b95a91ac39a983fca92d | 0250c3764b6e68dfe339afe8ee047e16c45db4e0 | refs/heads/master | 2021-07-02T22:18:46.774286 | 2021-06-30T14:08:54 | 2021-06-30T14:08:54 | 77,458,117 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | class Solution:
def solveNQueens(self, n: int) -> List[List[str]]:
def generate_board(pos, n):
res = []
for i in range(n):
line = ["."] * n
line[pos[i]] = "Q"
res.append("".join(line))
return res
cols, diagnoal, anti_diagnoal = set(), set(), set()
def dfs(res, row, n, cur):
nonlocal cols, diagnoal, anti_diagnoal
if row == n:
res.append(generate_board(cur, n))
return
for col in range(n):
if col in cols or (row + col) in anti_diagnoal or (row - col) in diagnoal:
continue
cols.add(col)
anti_diagnoal.add(row + col)
diagnoal.add(row - col)
cur.append(col)
dfs(res, row + 1, n, cur)
cur.pop()
cols.remove(col)
anti_diagnoal.remove(row + col)
diagnoal.remove(row - col)
res = []
dfs(res, 0, n, [])
return res
| [
"[email protected]"
] | |
a726fded486bd2431700f554247ec143c4470120 | 947fa6a4a6155ffce0038b11f4d743603418ad68 | /.c9/metadata/environment/fb_post_learning/fb_post_v2/tests/storages/test_is_reply.py | 6f54509d512ad6b525aa649971f404875a96935a | [] | no_license | bharathi151/bharathi_diyyala | bd75e10639d7d22b332d5ce677e7799402dc4984 | 99f8657d010c790a0e4e4c9d6b57f81814784eb0 | refs/heads/master | 2022-11-21T12:43:48.401239 | 2020-07-23T09:05:52 | 2020-07-23T09:05:52 | 281,903,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,064 | py | {"changed":true,"filter":false,"title":"test_is_reply.py","tooltip":"/fb_post_learning/fb_post_v2/tests/storages/test_is_reply.py","value":"import pytest\nfrom fb_post_v2.storages.comment_storage_implementation import StorageImplementation\n\n\[email protected]_db\ndef test_is_reply_given_invalid_details_returns_flase(\n create_users,\n create_post,\n create_comments\n):\n comment_id = 1\n invalid = False\n sql_storage = StorageImplementation()\n\n result = sql_storage.is_reply(comment_id=comment_id)\n\n assert result == invalid\n\n\[email protected]_db\ndef test_is_reply_given_valid_details_returns_true(create_comments,\n create_users,\n create_replies,\n create_post):\n comment_id = 3\n valid = True\n sql_storage = StorageImplementation()\n\n result = sql_storage.is_reply(comment_id=comment_id)\n\n assert result == valid","undoManager":{"mark":88,"position":92,"stack":[[{"start":{"row":1,"column":33},"end":{"row":1,"column":34},"action":"remove","lines":["t"],"id":2},{"start":{"row":1,"column":32},"end":{"row":1,"column":33},"action":"remove","lines":["s"]},{"start":{"row":1,"column":31},"end":{"row":1,"column":32},"action":"remove","lines":["o"]},{"start":{"row":1,"column":30},"end":{"row":1,"column":31},"action":"remove","lines":["P"]},{"start":{"row":1,"column":29},"end":{"row":1,"column":30},"action":"remove","lines":[" "]},{"start":{"row":1,"column":28},"end":{"row":1,"column":29},"action":"remove","lines":["t"]},{"start":{"row":1,"column":27},"end":{"row":1,"column":28},"action":"remove","lines":["r"]},{"start":{"row":1,"column":26},"end":{"row":1,"column":27},"action":"remove","lines":["o"]},{"start":{"row":1,"column":25},"end":{"row":1,"column":26},"action":"remove","lines":["p"]},{"start":{"row":1,"column":24},"end":{"row":1,"column":25},"action":"remove","lines":["m"]},{"start":{"row":1,"column":23},"end":{"row":1,"column":24},"action":"remove","lines":["i"]},{"start":{"row":1,"column":22},"end":{"row":1,"column":23},"action":"remove","lines":[" "]},{"start":{"row":1,"column":21},"end":{"row":1,"column":22},"action":"remove","lines":["s"]},{"start":{"row":1,"column":20},"end":{"row":1,"column":21},"action":"remove","lines":["l"]},{"start":{"row":1,"column":19},"end":{"row":1,"column":20},"action":"remove","lines":["e"]},{"start":{"row":1,"column":18},"end":{"row":1,"column":19},"action":"remove","lines":["d"]},{"start":{"row":1,"column":17},"end":{"row":1,"column":18},"action":"remove","lines":["o"]}],[{"start":{"row":1,"column":16},"end":{"row":1,"column":17},"action":"remove","lines":["m"],"id":5},{"start":{"row":1,"column":15},"end":{"row":1,"column":16},"action":"remove","lines":["."]},{"start":{"row":1,"column":14},"end":{"row":1,"column":15},"action":"remove","lines":["2"]},{"start":{"row":1,"column":13},"end":{"row":1,"column":14},"action":"remove","lines":["v"]},{"start":{"row":1,"column":12},"end":{"row":1,"column":13},"action":"remove","lines":["_"]},{"start":{"row":1,"column":11},"end":{"row":1,"column":12},"action":"remove","lines":["t"]},{"start":{"row":1,"column":10},"end":{"row":1,"column":11},"action":"remove","lines":["s"]},{"start":{"row":1,"column":9},"end":{"row":1,"column":10},"action":"remove","lines":["o"]},{"start":{"row":1,"column":8},"end":{"row":1,"column":9},"action":"remove","lines":["p"]},{"start":{"row":1,"column":7},"end":{"row":1,"column":8},"action":"remove","lines":["_"]},{"start":{"row":1,"column":6},"end":{"row":1,"column":7},"action":"remove","lines":["b"]},{"start":{"row":1,"column":5},"end":{"row":1,"column":6},"action":"remove","lines":["f"]},{"start":{"row":1,"column":4},"end":{"row":1,"column":5},"action":"remove","lines":[" "]},{"start":{"row":1,"column":3},"end":{"row":1,"column":4},"action":"remove","lines":["m"]},{"start":{"row":1,"column":2},"end":{"row":1,"column":3},"action":"remove","lines":["o"]},{"start":{"row":1,"column":1},"end":{"row":1,"column":2},"action":"remove","lines":["r"]}],[{"start":{"row":1,"column":0},"end":{"row":1,"column":1},"action":"remove","lines":["f"],"id":6},{"start":{"row":0,"column":13},"end":{"row":1,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":5,"column":24},"end":{"row":5,"column":25},"action":"remove","lines":["d"],"id":7},{"start":{"row":5,"column":23},"end":{"row":5,"column":24},"action":"remove","lines":["i"]},{"start":{"row":5,"column":22},"end":{"row":5,"column":23},"action":"remove","lines":["_"]},{"start":{"row":5,"column":21},"end":{"row":5,"column":22},"action":"remove","lines":["t"]},{"start":{"row":5,"column":20},"end":{"row":5,"column":21},"action":"remove","lines":["s"]},{"start":{"row":5,"column":19},"end":{"row":5,"column":20},"action":"remove","lines":["o"]},{"start":{"row":5,"column":18},"end":{"row":5,"column":19},"action":"remove","lines":["p"]},{"start":{"row":5,"column":17},"end":{"row":5,"column":18},"action":"remove","lines":["_"]},{"start":{"row":5,"column":16},"end":{"row":5,"column":17},"action":"remove","lines":["d"]},{"start":{"row":5,"column":15},"end":{"row":5,"column":16},"action":"remove","lines":["i"]},{"start":{"row":5,"column":14},"end":{"row":5,"column":15},"action":"remove","lines":["l"]},{"start":{"row":5,"column":13},"end":{"row":5,"column":14},"action":"remove","lines":["a"]}],[{"start":{"row":5,"column":12},"end":{"row":5,"column":13},"action":"remove","lines":["v"],"id":8}],[{"start":{"row":5,"column":12},"end":{"row":5,"column":13},"action":"insert","lines":["r"],"id":9},{"start":{"row":5,"column":13},"end":{"row":5,"column":14},"action":"insert","lines":["e"]},{"start":{"row":5,"column":14},"end":{"row":5,"column":15},"action":"insert","lines":["p"]},{"start":{"row":5,"column":15},"end":{"row":5,"column":16},"action":"insert","lines":["l"]},{"start":{"row":5,"column":16},"end":{"row":5,"column":17},"action":"insert","lines":["y"]}],[{"start":{"row":16,"column":24},"end":{"row":16,"column":25},"action":"remove","lines":["d"],"id":10},{"start":{"row":16,"column":23},"end":{"row":16,"column":24},"action":"remove","lines":["i"]},{"start":{"row":16,"column":22},"end":{"row":16,"column":23},"action":"remove","lines":["_"]},{"start":{"row":16,"column":21},"end":{"row":16,"column":22},"action":"remove","lines":["t"]},{"start":{"row":16,"column":20},"end":{"row":16,"column":21},"action":"remove","lines":["s"]},{"start":{"row":16,"column":19},"end":{"row":16,"column":20},"action":"remove","lines":["o"]},{"start":{"row":16,"column":18},"end":{"row":16,"column":19},"action":"remove","lines":["p"]},{"start":{"row":16,"column":17},"end":{"row":16,"column":18},"action":"remove","lines":["_"]},{"start":{"row":16,"column":16},"end":{"row":16,"column":17},"action":"remove","lines":["d"]},{"start":{"row":16,"column":15},"end":{"row":16,"column":16},"action":"remove","lines":["i"]},{"start":{"row":16,"column":14},"end":{"row":16,"column":15},"action":"remove","lines":["l"]},{"start":{"row":16,"column":13},"end":{"row":16,"column":14},"action":"remove","lines":["a"]},{"start":{"row":16,"column":12},"end":{"row":16,"column":13},"action":"remove","lines":["v"]}],[{"start":{"row":16,"column":12},"end":{"row":16,"column":13},"action":"insert","lines":["r"],"id":11},{"start":{"row":16,"column":13},"end":{"row":16,"column":14},"action":"insert","lines":["e"]},{"start":{"row":16,"column":14},"end":{"row":16,"column":15},"action":"insert","lines":["p"]},{"start":{"row":16,"column":15},"end":{"row":16,"column":16},"action":"insert","lines":["l"]},{"start":{"row":16,"column":16},"end":{"row":16,"column":17},"action":"insert","lines":["l"]},{"start":{"row":16,"column":17},"end":{"row":16,"column":18},"action":"insert","lines":["y"]}],[{"start":{"row":16,"column":67},"end":{"row":16,"column":68},"action":"insert","lines":[","],"id":12}],[{"start":{"row":5,"column":54},"end":{"row":5,"column":55},"action":"insert","lines":["r"],"id":13}],[{"start":{"row":5,"column":54},"end":{"row":5,"column":55},"action":"remove","lines":["r"],"id":14}],[{"start":{"row":5,"column":54},"end":{"row":5,"column":55},"action":"insert","lines":["c"],"id":15},{"start":{"row":5,"column":55},"end":{"row":5,"column":56},"action":"insert","lines":["o"]},{"start":{"row":5,"column":56},"end":{"row":5,"column":57},"action":"insert","lines":["m"]},{"start":{"row":5,"column":57},"end":{"row":5,"column":58},"action":"insert","lines":["m"]}],[{"start":{"row":5,"column":57},"end":{"row":5,"column":58},"action":"remove","lines":["m"],"id":16},{"start":{"row":5,"column":56},"end":{"row":5,"column":57},"action":"remove","lines":["m"]},{"start":{"row":5,"column":55},"end":{"row":5,"column":56},"action":"remove","lines":["o"]}],[{"start":{"row":5,"column":55},"end":{"row":5,"column":56},"action":"insert","lines":["r"],"id":17},{"start":{"row":5,"column":56},"end":{"row":5,"column":57},"action":"insert","lines":["e"]},{"start":{"row":5,"column":57},"end":{"row":5,"column":58},"action":"insert","lines":["a"]},{"start":{"row":5,"column":58},"end":{"row":5,"column":59},"action":"insert","lines":["t"]},{"start":{"row":5,"column":59},"end":{"row":5,"column":60},"action":"insert","lines":["e"]}],[{"start":{"row":5,"column":60},"end":{"row":5,"column":61},"action":"insert","lines":["_"],"id":18}],[{"start":{"row":5,"column":54},"end":{"row":5,"column":61},"action":"remove","lines":["create_"],"id":19},{"start":{"row":5,"column":54},"end":{"row":5,"column":71},"action":"insert","lines":["create_comments()"]}],[{"start":{"row":5,"column":69},"end":{"row":5,"column":71},"action":"remove","lines":["()"],"id":20}],[{"start":{"row":16,"column":68},"end":{"row":16,"column":69},"action":"insert","lines":["c"],"id":21},{"start":{"row":16,"column":69},"end":{"row":16,"column":70},"action":"insert","lines":["r"]}],[{"start":{"row":16,"column":69},"end":{"row":16,"column":70},"action":"remove","lines":["r"],"id":22},{"start":{"row":16,"column":68},"end":{"row":16,"column":69},"action":"remove","lines":["c"]}],[{"start":{"row":16,"column":68},"end":{"row":16,"column":69},"action":"insert","lines":[" "],"id":23},{"start":{"row":16,"column":69},"end":{"row":16,"column":70},"action":"insert","lines":["c"]},{"start":{"row":16,"column":70},"end":{"row":16,"column":71},"action":"insert","lines":["r"]},{"start":{"row":16,"column":71},"end":{"row":16,"column":72},"action":"insert","lines":["e"]},{"start":{"row":16,"column":72},"end":{"row":16,"column":73},"action":"insert","lines":["a"]}],[{"start":{"row":16,"column":73},"end":{"row":16,"column":74},"action":"insert","lines":["t"],"id":24},{"start":{"row":16,"column":74},"end":{"row":16,"column":75},"action":"insert","lines":["e"]},{"start":{"row":16,"column":75},"end":{"row":16,"column":76},"action":"insert","lines":["_"]}],[{"start":{"row":16,"column":76},"end":{"row":16,"column":77},"action":"insert","lines":["r"],"id":25},{"start":{"row":16,"column":77},"end":{"row":16,"column":78},"action":"insert","lines":["e"]}],[{"start":{"row":16,"column":69},"end":{"row":16,"column":78},"action":"remove","lines":["create_re"],"id":26},{"start":{"row":16,"column":69},"end":{"row":16,"column":85},"action":"insert","lines":["create_replies()"]}],[{"start":{"row":16,"column":83},"end":{"row":16,"column":85},"action":"remove","lines":["()"],"id":27}],[{"start":{"row":16,"column":68},"end":{"row":16,"column":69},"action":"remove","lines":[" "],"id":28}],[{"start":{"row":16,"column":68},"end":{"row":17,"column":0},"action":"insert","lines":["",""],"id":29}],[{"start":{"row":17,"column":0},"end":{"row":17,"column":4},"action":"insert","lines":[" "],"id":30}],[{"start":{"row":17,"column":4},"end":{"row":17,"column":8},"action":"insert","lines":[" "],"id":31}],[{"start":{"row":17,"column":8},"end":{"row":17,"column":12},"action":"insert","lines":[" "],"id":32}],[{"start":{"row":17,"column":12},"end":{"row":17,"column":16},"action":"insert","lines":[" "],"id":33}],[{"start":{"row":17,"column":16},"end":{"row":17,"column":20},"action":"insert","lines":[" "],"id":34}],[{"start":{"row":17,"column":20},"end":{"row":17,"column":24},"action":"insert","lines":[" "],"id":35}],[{"start":{"row":17,"column":24},"end":{"row":17,"column":28},"action":"insert","lines":[" "],"id":36}],[{"start":{"row":17,"column":28},"end":{"row":17,"column":32},"action":"insert","lines":[" "],"id":37}],[{"start":{"row":17,"column":32},"end":{"row":17,"column":36},"action":"insert","lines":[" "],"id":38}],[{"start":{"row":17,"column":36},"end":{"row":17,"column":40},"action":"insert","lines":[" "],"id":39}],[{"start":{"row":17,"column":40},"end":{"row":17,"column":44},"action":"insert","lines":[" "],"id":40}],[{"start":{"row":17,"column":44},"end":{"row":17,"column":48},"action":"insert","lines":[" "],"id":41}],[{"start":{"row":17,"column":48},"end":{"row":17,"column":52},"action":"insert","lines":[" "],"id":42}],[{"start":{"row":22,"column":25},"end":{"row":22,"column":44},"action":"remove","lines":["is_valid_comment_id"],"id":43}],[{"start":{"row":22,"column":25},"end":{"row":22,"column":26},"action":"insert","lines":["i"],"id":44},{"start":{"row":22,"column":26},"end":{"row":22,"column":27},"action":"insert","lines":["s"]},{"start":{"row":22,"column":27},"end":{"row":22,"column":28},"action":"insert","lines":["_"]},{"start":{"row":22,"column":28},"end":{"row":22,"column":29},"action":"insert","lines":["r"]},{"start":{"row":22,"column":29},"end":{"row":22,"column":30},"action":"insert","lines":["e"]}],[{"start":{"row":22,"column":30},"end":{"row":22,"column":31},"action":"insert","lines":["p"],"id":45},{"start":{"row":22,"column":31},"end":{"row":22,"column":32},"action":"insert","lines":["l"]},{"start":{"row":22,"column":32},"end":{"row":22,"column":33},"action":"insert","lines":["y"]}],[{"start":{"row":10,"column":25},"end":{"row":10,"column":44},"action":"remove","lines":["is_valid_comment_id"],"id":46}],[{"start":{"row":10,"column":25},"end":{"row":10,"column":26},"action":"insert","lines":["i"],"id":47},{"start":{"row":10,"column":26},"end":{"row":10,"column":27},"action":"insert","lines":["s"]}],[{"start":{"row":10,"column":25},"end":{"row":10,"column":27},"action":"remove","lines":["is"],"id":48},{"start":{"row":10,"column":25},"end":{"row":10,"column":33},"action":"insert","lines":["is_reply"]}],[{"start":{"row":16,"column":16},"end":{"row":16,"column":17},"action":"remove","lines":["l"],"id":49}],[{"start":{"row":17,"column":48},"end":{"row":17,"column":52},"action":"remove","lines":[" "],"id":50}],[{"start":{"row":17,"column":48},"end":{"row":17,"column":49},"action":"insert","lines":[" "],"id":51},{"start":{"row":17,"column":49},"end":{"row":17,"column":50},"action":"insert","lines":[" "]},{"start":{"row":17,"column":50},"end":{"row":17,"column":51},"action":"insert","lines":[" "]}],[{"start":{"row":17,"column":65},"end":{"row":17,"column":66},"action":"insert","lines":[","],"id":52}],[{"start":{"row":17,"column":66},"end":{"row":18,"column":0},"action":"insert","lines":["",""],"id":53},{"start":{"row":18,"column":0},"end":{"row":18,"column":51},"action":"insert","lines":[" "]},{"start":{"row":18,"column":51},"end":{"row":18,"column":52},"action":"insert","lines":["c"]},{"start":{"row":18,"column":52},"end":{"row":18,"column":53},"action":"insert","lines":["r"]},{"start":{"row":18,"column":53},"end":{"row":18,"column":54},"action":"insert","lines":["e"]}],[{"start":{"row":18,"column":51},"end":{"row":18,"column":54},"action":"remove","lines":["cre"],"id":54},{"start":{"row":18,"column":51},"end":{"row":18,"column":67},"action":"insert","lines":["create_comment()"]}],[{"start":{"row":18,"column":65},"end":{"row":18,"column":67},"action":"remove","lines":["()"],"id":55},{"start":{"row":18,"column":64},"end":{"row":18,"column":65},"action":"remove","lines":["t"]},{"start":{"row":18,"column":63},"end":{"row":18,"column":64},"action":"remove","lines":["n"]},{"start":{"row":18,"column":62},"end":{"row":18,"column":63},"action":"remove","lines":["e"]},{"start":{"row":18,"column":61},"end":{"row":18,"column":62},"action":"remove","lines":["m"]},{"start":{"row":18,"column":60},"end":{"row":18,"column":61},"action":"remove","lines":["m"]},{"start":{"row":18,"column":59},"end":{"row":18,"column":60},"action":"remove","lines":["o"]},{"start":{"row":18,"column":58},"end":{"row":18,"column":59},"action":"remove","lines":["c"]}],[{"start":{"row":18,"column":58},"end":{"row":18,"column":59},"action":"insert","lines":["p"],"id":56},{"start":{"row":18,"column":59},"end":{"row":18,"column":60},"action":"insert","lines":["o"]},{"start":{"row":18,"column":60},"end":{"row":18,"column":61},"action":"insert","lines":["s"]},{"start":{"row":18,"column":61},"end":{"row":18,"column":62},"action":"insert","lines":["t"]}],[{"start":{"row":19,"column":17},"end":{"row":19,"column":18},"action":"remove","lines":["1"],"id":57}],[{"start":{"row":19,"column":17},"end":{"row":19,"column":18},"action":"insert","lines":["3"],"id":58}],[{"start":{"row":5,"column":54},"end":{"row":6,"column":0},"action":"insert","lines":["",""],"id":59},{"start":{"row":6,"column":0},"end":{"row":6,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":6,"column":19},"end":{"row":7,"column":0},"action":"insert","lines":["",""],"id":60},{"start":{"row":7,"column":0},"end":{"row":7,"column":4},"action":"insert","lines":[" "]},{"start":{"row":7,"column":4},"end":{"row":8,"column":0},"action":"insert","lines":["",""]},{"start":{"row":8,"column":0},"end":{"row":8,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":8,"column":0},"end":{"row":8,"column":4},"action":"remove","lines":[" "],"id":61},{"start":{"row":7,"column":4},"end":{"row":8,"column":0},"action":"remove","lines":["",""]},{"start":{"row":7,"column":0},"end":{"row":7,"column":4},"action":"remove","lines":[" "]}],[{"start":{"row":5,"column":54},"end":{"row":6,"column":0},"action":"insert","lines":["",""],"id":62},{"start":{"row":6,"column":0},"end":{"row":6,"column":4},"action":"insert","lines":[" "]},{"start":{"row":6,"column":4},"end":{"row":6,"column":5},"action":"insert","lines":["c"]},{"start":{"row":6,"column":5},"end":{"row":6,"column":6},"action":"insert","lines":["r"]},{"start":{"row":6,"column":6},"end":{"row":6,"column":7},"action":"insert","lines":["e"]}],[{"start":{"row":6,"column":7},"end":{"row":6,"column":8},"action":"insert","lines":["a"],"id":63}],[{"start":{"row":6,"column":4},"end":{"row":6,"column":8},"action":"remove","lines":["crea"],"id":64},{"start":{"row":6,"column":4},"end":{"row":6,"column":19},"action":"insert","lines":["create_comments"]}],[{"start":{"row":6,"column":18},"end":{"row":6,"column":19},"action":"remove","lines":["s"],"id":65},{"start":{"row":6,"column":17},"end":{"row":6,"column":18},"action":"remove","lines":["t"]},{"start":{"row":6,"column":16},"end":{"row":6,"column":17},"action":"remove","lines":["n"]},{"start":{"row":6,"column":15},"end":{"row":6,"column":16},"action":"remove","lines":["e"]},{"start":{"row":6,"column":14},"end":{"row":6,"column":15},"action":"remove","lines":["m"]},{"start":{"row":6,"column":13},"end":{"row":6,"column":14},"action":"remove","lines":["m"]},{"start":{"row":6,"column":12},"end":{"row":6,"column":13},"action":"remove","lines":["o"]},{"start":{"row":6,"column":11},"end":{"row":6,"column":12},"action":"remove","lines":["c"]}],[{"start":{"row":6,"column":11},"end":{"row":6,"column":12},"action":"insert","lines":["u"],"id":66},{"start":{"row":6,"column":12},"end":{"row":6,"column":13},"action":"insert","lines":["s"]},{"start":{"row":6,"column":13},"end":{"row":6,"column":14},"action":"insert","lines":["e"]},{"start":{"row":6,"column":14},"end":{"row":6,"column":15},"action":"insert","lines":["r"]},{"start":{"row":6,"column":15},"end":{"row":6,"column":16},"action":"insert","lines":["s"]},{"start":{"row":6,"column":16},"end":{"row":6,"column":17},"action":"insert","lines":[","]}],[{"start":{"row":6,"column":17},"end":{"row":7,"column":0},"action":"insert","lines":["",""],"id":67},{"start":{"row":7,"column":0},"end":{"row":7,"column":4},"action":"insert","lines":[" "]},{"start":{"row":7,"column":4},"end":{"row":7,"column":5},"action":"insert","lines":["c"]},{"start":{"row":7,"column":5},"end":{"row":7,"column":6},"action":"insert","lines":["r"]},{"start":{"row":7,"column":6},"end":{"row":7,"column":7},"action":"insert","lines":["e"]}],[{"start":{"row":7,"column":7},"end":{"row":7,"column":8},"action":"insert","lines":["a"],"id":68}],[{"start":{"row":7,"column":4},"end":{"row":7,"column":8},"action":"remove","lines":["crea"],"id":69},{"start":{"row":7,"column":4},"end":{"row":7,"column":16},"action":"insert","lines":["create_users"]}],[{"start":{"row":7,"column":15},"end":{"row":7,"column":16},"action":"remove","lines":["s"],"id":70},{"start":{"row":7,"column":14},"end":{"row":7,"column":15},"action":"remove","lines":["r"]},{"start":{"row":7,"column":13},"end":{"row":7,"column":14},"action":"remove","lines":["e"]},{"start":{"row":7,"column":12},"end":{"row":7,"column":13},"action":"remove","lines":["s"]},{"start":{"row":7,"column":11},"end":{"row":7,"column":12},"action":"remove","lines":["u"]}],[{"start":{"row":7,"column":11},"end":{"row":7,"column":12},"action":"insert","lines":["p"],"id":71},{"start":{"row":7,"column":12},"end":{"row":7,"column":13},"action":"insert","lines":["o"]},{"start":{"row":7,"column":13},"end":{"row":7,"column":14},"action":"insert","lines":["s"]},{"start":{"row":7,"column":14},"end":{"row":7,"column":15},"action":"insert","lines":["t"]},{"start":{"row":7,"column":15},"end":{"row":7,"column":16},"action":"insert","lines":[","]}],[{"start":{"row":20,"column":67},"end":{"row":21,"column":0},"action":"insert","lines":["",""],"id":72}],[{"start":{"row":21,"column":0},"end":{"row":21,"column":4},"action":"insert","lines":[" "],"id":73}],[{"start":{"row":21,"column":4},"end":{"row":21,"column":8},"action":"insert","lines":[" "],"id":74}],[{"start":{"row":21,"column":8},"end":{"row":21,"column":12},"action":"insert","lines":[" "],"id":75}],[{"start":{"row":21,"column":12},"end":{"row":21,"column":16},"action":"insert","lines":[" "],"id":76}],[{"start":{"row":21,"column":16},"end":{"row":21,"column":20},"action":"insert","lines":[" "],"id":77}],[{"start":{"row":21,"column":20},"end":{"row":21,"column":24},"action":"insert","lines":[" "],"id":78}],[{"start":{"row":21,"column":24},"end":{"row":21,"column":28},"action":"insert","lines":[" "],"id":79}],[{"start":{"row":21,"column":28},"end":{"row":21,"column":32},"action":"insert","lines":[" "],"id":80}],[{"start":{"row":21,"column":32},"end":{"row":21,"column":36},"action":"insert","lines":[" "],"id":81}],[{"start":{"row":21,"column":36},"end":{"row":21,"column":40},"action":"insert","lines":[" "],"id":82}],[{"start":{"row":21,"column":40},"end":{"row":21,"column":44},"action":"insert","lines":[" "],"id":83}],[{"start":{"row":21,"column":44},"end":{"row":21,"column":48},"action":"insert","lines":[" "],"id":84}],[{"start":{"row":21,"column":48},"end":{"row":21,"column":52},"action":"insert","lines":[" "],"id":85}],[{"start":{"row":21,"column":48},"end":{"row":21,"column":52},"action":"remove","lines":[" "],"id":86}],[{"start":{"row":21,"column":48},"end":{"row":21,"column":49},"action":"insert","lines":[" "],"id":87},{"start":{"row":21,"column":49},"end":{"row":21,"column":50},"action":"insert","lines":[" "]},{"start":{"row":21,"column":50},"end":{"row":21,"column":51},"action":"insert","lines":[" "]},{"start":{"row":21,"column":51},"end":{"row":21,"column":52},"action":"insert","lines":["c"]},{"start":{"row":21,"column":52},"end":{"row":21,"column":53},"action":"insert","lines":["r"]}],[{"start":{"row":21,"column":53},"end":{"row":21,"column":54},"action":"insert","lines":["e"],"id":88},{"start":{"row":21,"column":54},"end":{"row":21,"column":55},"action":"insert","lines":["a"]}],[{"start":{"row":21,"column":51},"end":{"row":21,"column":55},"action":"remove","lines":["crea"],"id":89},{"start":{"row":21,"column":51},"end":{"row":21,"column":66},"action":"insert","lines":["create_comments"]}],[{"start":{"row":21,"column":65},"end":{"row":21,"column":66},"action":"remove","lines":["s"],"id":90},{"start":{"row":21,"column":64},"end":{"row":21,"column":65},"action":"remove","lines":["t"]},{"start":{"row":21,"column":63},"end":{"row":21,"column":64},"action":"remove","lines":["n"]},{"start":{"row":21,"column":62},"end":{"row":21,"column":63},"action":"remove","lines":["e"]},{"start":{"row":21,"column":61},"end":{"row":21,"column":62},"action":"remove","lines":["m"]},{"start":{"row":21,"column":60},"end":{"row":21,"column":61},"action":"remove","lines":["m"]},{"start":{"row":21,"column":59},"end":{"row":21,"column":60},"action":"remove","lines":["o"]},{"start":{"row":21,"column":58},"end":{"row":21,"column":59},"action":"remove","lines":["c"]}],[{"start":{"row":21,"column":58},"end":{"row":21,"column":59},"action":"insert","lines":["u"],"id":91},{"start":{"row":21,"column":59},"end":{"row":21,"column":60},"action":"insert","lines":["s"]},{"start":{"row":21,"column":60},"end":{"row":21,"column":61},"action":"insert","lines":["e"]},{"start":{"row":21,"column":61},"end":{"row":21,"column":62},"action":"insert","lines":["r"]},{"start":{"row":21,"column":62},"end":{"row":21,"column":63},"action":"insert","lines":["s"]}],[{"start":{"row":21,"column":63},"end":{"row":21,"column":64},"action":"insert","lines":[","],"id":92}],[{"start":{"row":1,"column":28},"end":{"row":1,"column":29},"action":"remove","lines":["t"],"id":93},{"start":{"row":1,"column":27},"end":{"row":1,"column":28},"action":"remove","lines":["s"]},{"start":{"row":1,"column":26},"end":{"row":1,"column":27},"action":"remove","lines":["o"]},{"start":{"row":1,"column":25},"end":{"row":1,"column":26},"action":"remove","lines":["p"]}],[{"start":{"row":1,"column":25},"end":{"row":1,"column":26},"action":"insert","lines":["c"],"id":94},{"start":{"row":1,"column":26},"end":{"row":1,"column":27},"action":"insert","lines":["o"]},{"start":{"row":1,"column":27},"end":{"row":1,"column":28},"action":"insert","lines":["m"]},{"start":{"row":1,"column":28},"end":{"row":1,"column":29},"action":"insert","lines":["e"]}],[{"start":{"row":1,"column":28},"end":{"row":1,"column":29},"action":"remove","lines":["e"],"id":95}],[{"start":{"row":1,"column":28},"end":{"row":1,"column":29},"action":"insert","lines":["m"],"id":96},{"start":{"row":1,"column":29},"end":{"row":1,"column":30},"action":"insert","lines":["e"]},{"start":{"row":1,"column":30},"end":{"row":1,"column":31},"action":"insert","lines":["n"]},{"start":{"row":1,"column":31},"end":{"row":1,"column":32},"action":"insert","lines":["t"]}]]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":6,"column":17},"end":{"row":6,"column":17},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1590061738237} | [
"[email protected]"
] | |
87f86ee5f18ff897da50586e96692fc1a9d89d64 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03371/s901597766.py | 123ae25fb816ba52b35bc7528454231feb8593bf | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | A, B, C, X, Y = map(int, input().split())
cntA = 0
cntB = 0
cntC = 0
value = 0
if 2*C<=A+B:
#Cで買いそろえた方が安いので
#(max(X, Y)-abs(X-Y))*2枚は買うことになる
Cmaisu = (max(X, Y)-abs(X-Y))*2
value += C * Cmaisu
if (2*C<=A and X>Y) or (2*C<=B and X<Y):
#abs(X-Y)枚についても2Cが安けりゃCで買う
value += C * abs(X-Y) * 2
else:
if X>Y:
value += A*(X-Y)
else:
value += B*(Y-X)
else:
value += A*X+B*Y
print(value) | [
"[email protected]"
] | |
f6b658b1ddac70cd71d916a2ed089c862e530a4e | f8666599b83d34c861651861cc7db5b3c434fc87 | /plotly/validators/scatterpolargl/marker/colorbar/tickformatstop/_templateitemname.py | 3aca0890a4cb67b0ff4f477575d146dfbe41dbf8 | [
"MIT"
] | permissive | mode/plotly.py | 8b66806e88c9f1820d478bab726f0bea81884432 | c5a9ac386a40df2816e6c13264dadf14299401e4 | refs/heads/master | 2022-08-26T00:07:35.376636 | 2018-09-26T19:08:54 | 2018-09-26T19:19:31 | 60,372,968 | 1 | 1 | MIT | 2019-11-13T23:03:22 | 2016-06-03T19:34:55 | Python | UTF-8 | Python | false | false | 545 | py | import _plotly_utils.basevalidators
class TemplateitemnameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name='templateitemname',
parent_name='scatterpolargl.marker.colorbar.tickformatstop',
**kwargs
):
super(TemplateitemnameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
role=kwargs.pop('role', 'info'),
**kwargs
)
| [
"[email protected]"
] | |
bf825d15878d7b99d77904e32eb9daf305bfa790 | 4eaa1b9b08914e0a2cc9276363e489ccef19d3a2 | /ch3/guest_list.py | 3a09e97b39d325d4f92b191d3cabd2777d74e4f8 | [] | no_license | melihcanyardi/Python-Crash-Course-2e-Part-I | 69b3b5b3f63cdbd7be6fabd6d4f2ddfd9a3434a3 | 0c9b250f512985c04b2c0397f3afaa8bf3a57f17 | refs/heads/main | 2023-03-12T21:43:14.012537 | 2021-03-03T19:23:41 | 2021-03-03T19:23:41 | 344,236,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | guest_list = ['Ali', 'Ayşe', 'Mehmet', 'Ahmet']
message = f"Hey {guest_list[0]}, would you like to join me for the dinner?"
print(message)
message = f"Hey {guest_list[1]}, would you like to join me for the dinner?"
print(message)
message = f"Hey {guest_list[2]}, would you like to join me for the dinner?"
print(message)
message = f"Hey {guest_list[3]}, would you like to join me for the dinner?"
print(message)
| [
"[email protected]"
] | |
4696d4803fcbd9b7f1fa002caeed6d15ed478d7e | 4d0f3e2d7455f80caea978e4e70621d50c6c7561 | /Threading/Lock.py | efce0601f22fa1de07581c3637eba0dc6384a431 | [] | no_license | mhdr/PythonSamples | 66940ee2353872d2947c459e3865be42140329c6 | 1a9dccc05962033ea02b081a39cd67c1e7b29d0c | refs/heads/master | 2020-04-14T01:10:13.033940 | 2016-05-28T15:33:52 | 2016-05-28T15:33:52 | 30,691,539 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | import threading
from threading import Lock
def print_multi():
lock=Lock()
lock.acquire()
print("Hello World")
lock.release()
print_multi() | [
"[email protected]"
] | |
4733bae1eb944dc330c20c4483dd7b1171de45b2 | 99833651e4a6a0bc1221d577d9fc43b8568abedd | /nltk_contrib/hadoop/tf_idf/tf_map.py | 25f49052f7b5472b8a00478c177aac8e3dd514cd | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | nltk/nltk_contrib | 689e2683aa01b120c7473b9a4fc50bc49f014390 | 95d1806e2f4e89e960b76a685b1fba2eaa7d5142 | refs/heads/master | 2023-07-31T13:32:47.358897 | 2022-11-21T18:49:33 | 2022-11-21T18:49:33 | 2,530,774 | 145 | 127 | NOASSERTION | 2022-11-21T18:49:34 | 2011-10-07T05:59:13 | Python | UTF-8 | Python | false | false | 692 | py | from hadooplib.mapper import MapperBase
class TFMapper(MapperBase):
"""
get the filename (one filename per line),
open the file and count the term frequency.
"""
def map(self, key, value):
"""
output (word filename, 1) for every word in files
@param key: None
@param value: filename
"""
filename = value.strip()
if len(filename) == 0:
return
file = open(filename, 'r')
for line in file:
words = line.strip().split()
for word in words:
self.outputcollector.collect(word + " " + filename, 1)
if __name__ == "__main__":
TFMapper().call_map()
| [
"[email protected]"
] | |
d68dd9aee38f272a57637402ae90918c73bc1986 | 641df38bb75077cd8da28b69e38b84af293b5db7 | /docassemble_base/setup.py | 73a6d2bf9b44a96183a19d6f23282978224b061d | [
"MIT"
] | permissive | bgordo3/docassemble | f19e01f2daf41eb05e2c19b5d4278bdc0d6d3ea5 | 3ce22e22e818598badc2242038f4e4abc4ee9fde | refs/heads/master | 2020-12-26T01:03:14.840009 | 2016-05-15T13:50:35 | 2016-05-15T13:50:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,809 | py | #!/usr/bin/env python
import os
import sys
from setuptools import setup, find_packages
from fnmatch import fnmatchcase
from distutils.util import convert_path
standard_exclude = ('*.py', '*.pyc', '*~', '.*', '*.bak', '*.swp*')
standard_exclude_directories = ('.*', 'CVS', '_darcs', './build', './dist', 'EGG-INFO', '*.egg-info')
def find_package_data(where='.', package='', exclude=standard_exclude, exclude_directories=standard_exclude_directories):
out = {}
stack = [(convert_path(where), '', package)]
while stack:
where, prefix, package = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
break
if bad_name:
continue
if os.path.isfile(os.path.join(fn, '__init__.py')):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package))
else:
stack.append((fn, prefix + name + '/', package))
else:
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
break
if bad_name:
continue
out.setdefault(package, []).append(prefix+name)
return out
setup(name='docassemble.base',
version='0.1',
description=('A python module for assembling documents from templates while automatically querying a user for necessary information.'),
author='Jonathan Pyle',
author_email='[email protected]',
license='MIT',
url='http://docassemble.org',
namespace_packages = ['docassemble'],
install_requires = ['docassemble', '3to2', 'babel', 'bcrypt', 'blinker', 'cffi', 'fdfgen', 'guess-language-spirit', 'html2text', 'httplib2', 'itsdangerous', 'jellyfish', 'jinja2', 'lxml', 'mako', 'markdown', 'markupsafe', 'mdx-smartypants', 'namedentities==1.5.2', 'passlib', 'pdfminer', 'pillow', 'pip', 'pycparser', 'pycrypto', 'geopy', 'pygments', 'pyjwt', 'pypdf', 'PyPDF2', 'pyrtf-ng', 'python-dateutil', 'pytz', 'pyyaml', 'qrcode', 'six', 'titlecase', 'us', 'wheel'],
packages=find_packages(),
zip_safe = False,
package_data=find_package_data(where='docassemble/base/', package='docassemble.base'),
)
| [
"[email protected]"
] | |
47821cfbba0dbe4c3efe3982af6bf0e12bc36614 | 8e7a2b9efbc0d25111f01f4cddb781961032685a | /python-1025/python/a_socket/3_ssh/cli.py | a8eb9d81fb94a0cd363d5f6691ffd91955caf960 | [] | no_license | Dituohgasirre/python | e044aa2e1fb2233b6ccd59701b834ab01e4e24c2 | 05f036d2723f75cd89e4412aaed7ee0ba5d3a502 | refs/heads/master | 2023-06-03T13:50:18.641433 | 2021-06-17T10:23:40 | 2021-06-17T10:23:40 | 366,942,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 834 | py | #!/usr/bin/env python3
import socket
from pargs import parse
from net import Packet
if __name__ == "__main__":
def main():
args, opt = parse(['s|srv|1', 'p|port|1'])
srvIp = opt['srv'] if 'srv' in opt else "3.3.3.3"
port = int(opt['port']) if 'port' in opt else 9000
sd = socket.socket(type=socket.SOCK_DGRAM)
addr = (srvIp, port)
packet = Packet(sd)
while True:
cmd = input("<自己的网络SHELL>: ")
packet.send(cmd, addr, Packet.DATA)
if cmd == "exit":
break
out = ""
while True:
data, addr = packet.recv()
if data['type'] == Packet.QUIT:
break
out += data['data']
print(out)
sd.close()
main()
| [
"[email protected]"
] | |
51e0e2a1659f95d3b8414243848b7426d1ff7812 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p3BR/R1/benchmark/startCirq58.py | a2b36467a64bf7e0413c3647fae2acfbe412bd39 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,870 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=3
# total number=12
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.rx(-0.09738937226128368).on(input_qubit[2])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=3
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=10
c.append(cirq.H.on(input_qubit[0])) # number=11
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=5
c.append(cirq.X.on(input_qubit[1])) # number=6
c.append(cirq.Z.on(input_qubit[1])) # number=8
c.append(cirq.X.on(input_qubit[1])) # number=7
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq58.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"[email protected]"
] | |
aa886a213c5f135a412aba43490ad62764c40613 | 33836016ea99776d31f7ad8f2140c39f7b43b5fe | /fip_collab/2017_02_24_HCF_pearson/get_linkage_alt.py | 81760c80d5b2307eb97b7f5b1d063001511737ce | [] | no_license | earthexploration/MKS-Experimentation | 92a2aea83e041bfe741048d662d28ff593077551 | 9b9ff3b468767b235e7c4884b0ed56c127328a5f | refs/heads/master | 2023-03-17T23:11:11.313693 | 2017-04-24T19:24:35 | 2017-04-24T19:24:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,035 | py | import numpy as np
import functions as rr
import reg_functions as rf
from constants import const
from scipy.stats import pearsonr
import h5py
import time
from sklearn.preprocessing import PolynomialFeatures
def analysis(X, response_tot, groups, iscal):
RpredCV = rf.cv(X[iscal, :], response_tot[iscal], groups[iscal])
coef = rf.regression(X[iscal, :], response_tot[iscal])
Rpred = rf.prediction(X, coef)
return coef, RpredCV, Rpred
def pearson_eval(X, y):
Nfeat = X.shape[1]
pvec = np.zeros((Nfeat,))
# pvec[0] = 1 # for the constant term
for ii in xrange(Nfeat):
""" pearsonr returns tuples with the pearson correlation
and the P-value (chance of observing the data
if the null hypothesis is true). I'm going to throw away
the p-value"""
if np.all(X[:, ii] == 1):
pvec[ii] = 1
else:
pvec[ii] = pearsonr(X[:, ii], y)[0]
return pvec
def optimal_set(X, y, names):
C = const()
Nfeat = X.shape[1]
"""obtain pearson correlation scores against the response variable"""
pvec = pearson_eval(X, y)
indxv_ = np.argsort(np.abs(pvec))[::-1]
pvec_f = np.zeros((C['fmax'],))
names_f = np.zeros((C['fmax'],), dtype="S20")
support = np.zeros((Nfeat,), dtype='bool')
indxv = np.zeros((C['fmax']), dtype='int32')
"""start by adding zero vector"""
pvec_f[0] = pvec[indxv_[0]]
names_f[0] = names[indxv_[0]]
indxv[0] = indxv_[0]
"""add vector with highest pearson correlation besides the zero vector"""
pvec_f[1] = pvec[indxv_[1]]
names_f[1] = str(names[indxv_[1]])
support[indxv_[1]] = True
indxv[1] = indxv_[1]
c = 1
for ii in xrange(2, C['fmax']):
pvecA = np.ones((ii,))
while True:
c += 1
pvecT = pearson_eval(X[:, support], X[:, indxv_[c]])
pvecA = np.abs(pvecT)
# if pvecA.max() < 0.6:
if pvecA.max() < 1.6:
break
# print str(c) + ', ' + str(np.argmax(pvecA)) + ', ' + str(pvecA.max())
pvec_f[ii] = pvec[indxv_[c]]
names_f[ii] = names[indxv_[c]]
support[indxv_[c]] = True
indxv[ii] = indxv_[c]
"""we add support for the vector of ones at the end to not screw up the
calculation"""
support[indxv_[0]] = True
return pvec_f, names_f, support, indxv
def preanalysis(loc_tot, cov_tot):
npc = loc_tot.shape[1]
ns = loc_tot.shape[0]
"""extract names from mean loc info"""
mean_only_names = []
for ii in xrange(npc):
mean_only_names += ['m%s' % str(ii+1)]
"""extract variance info from covariance matrix"""
var_only = np.zeros((ns, npc))
var_only_names = []
for ii in xrange(npc):
var_only[:, ii] = cov_tot[:, ii, ii]
var_only_names += ['c%s_%s' % (str(ii+1), str(ii+1))]
"""extract unique, off-diagonal co-variance info from
covariance matrix"""
nc = (npc**2-npc)/2
cov_only = np.zeros((ns, nc))
cov_only_names = []
c = 0
for ii in xrange(npc):
for jj in xrange(ii+1, npc):
cov_only[:, c] = cov_tot[:, ii, jj]
cov_only_names += ['c%s_%s' % (str(ii+1), str(jj+1))]
c += 1
return loc_tot, var_only, cov_only, \
mean_only_names, var_only_names, cov_only_names
def get_poly(X_pre, names_pre):
C = const()
"""get the polynomial features"""
poly = PolynomialFeatures(C['deg_max'])
poly.fit(X_pre)
X = poly.transform(X_pre)
"""get the names of the polynomial features"""
names = poly.get_feature_names(names_pre)
return X, names
def prepare(par):
np.random.seed(0)
C = const()
p = C['n_sc']
f_link = h5py.File("sample_L%s.hdf5" % C['H'], 'r')
"""gather the calibration data"""
n_tot = len(C['sid'])
ns_tot = n_tot*p
groups = np.zeros(ns_tot, dtype='int16')
response_tot = np.zeros(ns_tot, dtype='float64')
loc_tot = np.zeros((ns_tot, C['n_pc_max']), dtype='float64')
cov_tot = np.zeros((ns_tot, C['n_pc_max'], C['n_pc_max']), dtype='float64')
iscal = np.zeros((ns_tot,), dtype='bool')
c = 0
for ii in xrange(n_tot):
c_ = c + p
sid = C['sid'][ii]
"""flag elements of the calibration set"""
if sid in C['sid_cal']:
iscal[c:c_] = True
groups[c:c_] = 2*ii+np.round(np.random.random((p,)))
dset_name = "%s_%s" % (par, sid)
response_tot[c:c_] = f_link.get(dset_name)[...]
tmp = f_link.get('samp_%s' % sid)[:, :, :C['n_pc_max']]
loc_tot[c:c_, :] = np.mean(tmp, 1)
for jj in xrange(p):
cov_tot[c+jj, ...] = np.cov(tmp[jj, ...], rowvar=False)
c = c_
f_link.close()
return groups, response_tot, loc_tot, cov_tot, iscal
def linkage(par):
st = time.time()
C = const()
p = C['n_sc']
n_tot = len(C['sid'])
ns_tot = n_tot*p
"""create arrays required for linkage creation"""
precursors = prepare(par)
groups = precursors[0]
response_tot = precursors[1]
loc_tot = precursors[2]
cov_tot = precursors[3]
iscal = precursors[4]
f_reg = h5py.File("regression_results_L%s.hdf5" % C['H'], 'a')
f_reg.create_dataset('Rsim_%s' % par, data=response_tot)
f_reg.create_dataset('iscal_%s' % par, data=iscal)
coef_set = f_reg.create_dataset('coef_%s' % par,
(C['fmax'], C['fmax']),
dtype='float64')
Rpred_set = f_reg.create_dataset('Rpred_%s' % par,
(C['fmax'], ns_tot),
dtype='float64')
RpredCV_set = f_reg.create_dataset('RpredCV_%s' % par,
(C['fmax'],
p*len(C['sid_cal'])),
dtype='float64')
"""get the polynomial features"""
tmp = preanalysis(loc_tot, cov_tot)
var_only = tmp[1]
cov_only = tmp[2]
mean_only_names = tmp[3]
var_only_names = tmp[4]
cov_only_names = tmp[5]
# X_pre = np.concatenate((loc_tot, var_only, cov_only), axis=1)
# names_pre = mean_only_names + var_only_names + cov_only_names
# X_pre = np.concatenate((loc_tot, var_only), axis=1)
# names_pre = mean_only_names + var_only_names
X_pre = loc_tot
names_pre = mean_only_names
f_reg.create_dataset('featurenames_%s' % par, data=names_pre)
X, names = get_poly(X_pre, names_pre)
print "# deg1 features: " + str(len(names_pre))
print "# higher deg features: " + str(len(names))
"""perform the pearson correlation"""
pvec, names_f, support, indxv = optimal_set(X[iscal, :], response_tot[iscal], names)
f_reg.create_dataset('scores_%s' % par, data=pvec)
f_reg.create_dataset('indxsel_%s' % par, data=indxv)
"""select the most highly correlated features"""
Xp = X[:, indxv]
# import matplotlib.pyplot as plt
# plt.plot(np.arange(pvec.size), np.abs(pvec))
# plt.show()
msg = "\ntop 20 scoring features"
rr.WP(msg, C['wrt_file'])
for ii in xrange(20):
msg = "%s: %s" % (names_f[ii], pvec[ii])
rr.WP(msg, C['wrt_file'])
"""create and evaluate the final linkages"""
meanc = np.abs(response_tot[iscal]).mean()
for ii in xrange(C['fmax']):
coef, RpredCV, Rpred = analysis(Xp[:, :(ii+1)], response_tot,
groups, iscal)
coef_set[ii, :] = 0
coef_set[ii, :(ii+1)] = coef
RpredCV_set[ii, :] = RpredCV
Rpred_set[ii, :] = Rpred
err = np.mean(np.abs(RpredCV - response_tot[iscal]))/meanc
msg = "%s features: cv.mean(): %s" % (str(ii+1), str(err))
rr.WP(msg, C['wrt_file'])
f_reg.close()
timeE = np.round(time.time()-st, 1)
msg = "regressions and cross-validations completed: %s s" % timeE
rr.WP(msg, C['wrt_file'])
if __name__ == '__main__':
par = 'mu'
linkage(par)
| [
"[email protected]"
] | |
cfcf4c4c948d02a6254d41f9f56773077bb97583 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/201/47100/submittedfiles/testes.py | ffc9bae8827fb40a5106a1cf09f62f630d98ad23 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
r=float(input('Quanto ganha por hora:'))
h=float(input('Horas trabalhadas no mês:'))
t=r*t
print('%.2f' %t)
inss=0.08:
i=inss*t
print('Desconto do INSS:' '%.2f' %i)
sind=0.05:
j=sind*t
print('Desconto do sindicato:' '%.2f' %j)
| [
"[email protected]"
] | |
c2db9fed8d3d7953430514b58aa47a57e52a59f2 | 6acea1c5206052393beb5cba132f40e55b637c11 | /doc_curation/scraping/misc_sites/iitk.py | 6df37d4e6779f4afcd0c5073d46022a249314662 | [
"MIT"
] | permissive | sanskrit-coders/doc_curation | a44afacf68d1711bcebd02c97b30a42b6d82bccc | db330393d3df052c008811f4b442421900e5fa84 | refs/heads/master | 2023-08-27T09:41:22.784001 | 2023-08-11T05:40:46 | 2023-08-11T05:40:46 | 157,027,340 | 8 | 4 | MIT | 2022-12-08T08:26:49 | 2018-11-10T22:32:26 | Python | UTF-8 | Python | false | false | 880 | py | import logging
import regex
from doc_curation.scraping.html_scraper import souper
from indic_transliteration import sanscript
def dump_item(item_url, outfile_path, title_maker):
logging.info(item_url)
def html_fixer(soup):
souper.tag_replacer(soup=soup, css_selector="table", tag_name="div")
souper.element_remover(soup=soup, css_selector="div.view-filters")
def md_fixer(md):
md = md.replace("।।", " ॥ ")
# md = md.replace(".", " - ")
md = md.replace(":", "ः")
md = md.replace("\n \n", "\n\n")
md = regex.sub("\n{3, 13}", "\n\n", md)
md = sanscript.transliterate(md, sanscript.IAST, sanscript.DEVANAGARI)
return md
souper.dump_text_from_element(url=item_url, outfile_path=outfile_path, text_css_selector="div.content", title_maker=title_maker, title_prefix="", html_fixer=html_fixer, md_fixer=md_fixer, dry_run=False)
| [
"[email protected]"
] | |
ccaa8331ba45c7e092821f902d34302c3be64a4b | af4abf0a22db1cebae466c56b45da2f36f02f323 | /storage/team10/lib/Hash.py | c028a52b082c52b9030bc406caef42c3faf20994 | [
"MIT"
] | permissive | joorgej/tytus | 0c29408c09a021781bd3087f419420a62194d726 | 004efe1d73b58b4b8168f32e01b17d7d8a333a69 | refs/heads/main | 2023-02-17T14:00:00.571200 | 2021-01-09T00:48:47 | 2021-01-09T00:48:47 | 322,429,634 | 3 | 0 | MIT | 2021-01-09T00:40:50 | 2020-12-17T22:40:05 | Python | UTF-8 | Python | false | false | 12,784 | py | from Node import Node
from graphviz import Digraph
class TablaHash:
def __init__(self, size, name, nCols):
self.id = 0
self.Size = size-1
self.name = name
self.contadorNodo = 0
self.nCols = nCols
self.genericId = -1
self.pk = None
self.values = [None]*self.Size
self.inrehashing = False
def getName(self):
return self.name
def setName(self, name):
self.name = name
def getSize(self):
return self.Size
def setSize(self, n):
self.Size = n
def getNodo(self):
return self.values
#dato sera de tipo nodo
def setNodo(self, nodo):
self.values = nodo
def alterAddPK(self, indices):
for i in indices:
try:
int(i)
except:
return 1
if i not in range(0, self.nCols):
return 5
if len(indices) <= self.nCols:
if not self.pk:
return self.recalculateKey(self.pk, indices)
# return 0
else:
# print("No se puede poner otra PK")
return 4
else:
return 5
def toASCII(self, cadena):
result = ""
aux = 0
comma = 0
for char in cadena:
if char != ",":
result += str(ord(char))
else:
comma += int(ord(char))
aux = int(result) + comma
result = str(aux)
return int(result)
def funcionHash(self, dato, flag = False):
if isinstance(dato, list):
lenDato = 0
res = ""
if flag:
for key in self.pk:
res += str(dato[key]) + ","
else:
for key in dato:
res += str(key) + ","
lenDato = self.toASCII(res)
return (int(lenDato % self.Size),lenDato) #cambie aqui para poder obtener la posicion en el arreglo (posicion hash, posicion en arreglo)
def insertIntoArray(self, dato, posicion_hash, key):
bandera = self.verificarDato(key, posicion_hash)
if self.values[posicion_hash] is not None:
if bandera:
nuevo_dato = self.values[posicion_hash]
nuevo_dato.insert(dato, key)
self.contadorNodo +=1
return 0
else:
return 4
else:
nuevo_dato = Node()
if self.pk:
nuevo_dato.pk = self.pk
else:
nuevo_dato.pk = self.genericId
nuevo_dato.isGeneric = True
nuevo_dato.insert(dato,key)
nuevo_dato.key = posicion_hash
self.values[posicion_hash] = nuevo_dato
self.contadorNodo +=1
return 0
def insert(self, dato):
if not self.inrehashing:
self.rehashing()
if isinstance(dato, list):
if len(dato) == self.nCols:
if self.pk:
# Recorre las anteriores buscando su llave primaria
# for node in self.values:
# if node is not None and node.isGeneric:
# self.recalculateKey(node)
# node.isGeneric = False
posicion_hash = self.funcionHash(dato, True)
return self.insertIntoArray(dato, posicion_hash[0], posicion_hash[1]) #aqui manda las dos llaves
else:
posicion_hash = int(self.genericId % self.Size)
self.genericId += 1
return self.insertIntoArrayCSV(dato, posicion_hash, self.genericId)
else:
return 5
else:
return 1
def insertCSV(self, dato):
if self.inrehashing:
self.rehashing()
if self.pk:
posicion_hash = self.funcionHash(dato, True)
return self.insertIntoArrayCSV(dato, posicion_hash[0], posicion_hash[1]) #aqui manda las dos llaves
else:
posicion_hash = int(self.genericId % self.Size)
self.genericId += 1
return self.insertIntoArrayCSV(dato, posicion_hash, self.genericId)
def insertIntoArrayCSV(self, dato, posicion_hash, key):
bandera = self.verificarDato(key, posicion_hash)
if self.values[posicion_hash] is not None:
if bandera:
nuevo_dato = self.values[posicion_hash]
nuevo_dato.insert(dato, key)
self.contadorNodo +=1
return 0
else:
return 4
else:
nuevo_dato = Node()
if self.pk:
nuevo_dato.pk = self.pk
else:
nuevo_dato.pk = self.genericId
nuevo_dato.isGeneric = True
nuevo_dato.insert(dato,key)
nuevo_dato.key = posicion_hash
self.values[posicion_hash] = nuevo_dato
self.contadorNodo +=1
return 0
def recalculateKey(self, newPk, indices):
listCol = []
data = []
ids = []
for node in self.values:
if node is not None:
for n in node.array:
d = n[1]
data.append(d)
key = ""
# ids = n[1][0]
# for i in n[1]:
for j in indices:
ids = n[1][j]
key += str(ids)
listCol.append(key)
if listCol.count(key) > 1:
return 1
else:
continue
# lista = self.values.copy()
self.values.clear()
self.values = [None]*self.Size
self.pk = indices
for d in data:
self.insert(d)
def truncate(self):
try:
self.values.clear()
return 0
except:
return 1
def editar(self, columna, modificacion, key):
posicion_hash = self.funcionHash(key)
nodo = self.values[posicion_hash[0]]
if nodo:
if columna not in self.pk:
respuesta = nodo.modificar(columna,modificacion,posicion_hash[1])
else:
return 4
if respuesta == 0:
return 0
elif respuesta == 4:
return 4
else:
return 1
else:
return 4
def ElementosEn_tbl(self):
auxiliar = 0
for nodo in self.values:
if nodo is not None:
auxiliar +=1
return auxiliar
def rehashing(self):
factorAgregado = int(self.Size * 0.75)
if self.contadorNodo >= factorAgregado:
estoy_en_rehashing = True
self.setSize( int(self.Size*4))
self.inrehashing =True
arrayAuxiliar = self.values[:]
self.values.clear()
self.values = [None]*self.Size
lista = [tupla for nodo in arrayAuxiliar if nodo is not None for tupla in nodo.array]
for j in lista:
self.insert(j[1])
arrayAuxiliar.clear()
self.inrehashing = False
def verificarDato(self, key, position):
aux_bol = False
if self.values[position] is not None:
if not self.values[position].buscarDato_binary(key):
aux_bol = True
return aux_bol
def eliminarDato(self, dato):
posicion_hash = self.funcionHash(dato)
nodo_hash = self.values[posicion_hash[0]]
if nodo_hash:
if nodo_hash.eliminar(posicion_hash[1]):
return 0
elif nodo_hash.eliminar(posicion_hash[1]) == 0:
return 0
self.values[posicion_hash] = None
else:
return 1
else:
return 4
def printTbl(self):
if self.values:
for i in self.values:
if i and (len(i.array) > 0):
print(str(i.key) + " | " + str(i.array) + "\n")
else:
return "vacio"
def buscar(self, dato):
posicion_hash = self.funcionHash(dato)
nodo = self.values[posicion_hash[0]]
if nodo is not None:
return nodo.busquedaB(posicion_hash[1])
else:
return []
def printlistTbl(self):
listTbl=[]
if self.values:
for i in self.values:
if i :
new = str(i.key) + " | " + str(i.array).replace('[','')
new2 = new.replace(']','')
listTbl.append(new2)
else:
print("vacio")
return listTbl
def imp1(self,columnNumber,lower,upper): ##Modificando este metodo
listCol=[]
for nodo in self.values:
if nodo is not None:
#print(nodo.array)
if len(nodo.array)>1:
for subnodo in nodo.array:
val = nodo.imp_column(subnodo[1],columnNumber,lower,upper) ##
if val != None:
listCol.append(val)
else:
val = nodo.imp_column2(columnNumber,lower,upper) ##
if val != None:
listCol.append(val)
return listCol
# agrega la nueva columna y asigna el valor
def alterAddColumn(self, dato):
if dato == []:
return 1
else:
self.nCols += 1
for i in self.values:
if i :
i.alterAddColumn(dato)
return 0
#19/12/2020
def getNumeroColumnas(self):
return self.nCols
def alterDropColumn(self, columnNumber):
if columnNumber in range(0, self.nCols):
if columnNumber <= self.nCols:
flag = False
if self.pk:
for key in self.pk:
if columnNumber == key:
return 4
pass
for i in self.values:
if i and len(i.array) > 1:
for j in i.array:
flag = True
j[1].pop(columnNumber)
pass
pass
if flag:
newKeys = []
if self.pk:
for key in self.pk:
if (key > columnNumber) and (key != 0):
key -= 1
newKeys.append(key)
self.nCols -= 1
self.pk = None
self.alterAddPK(newKeys)
return 0
else:
return 4
else:
return 4
else:
return 5
def alterDropPK(self):
if not self.pk:
return 4
else:
self.pk = None
for i in self.values:
if i:
i.isGeneric = True
return 0
#output_size = [ 4024,4024]
def genGraph(self, name):
f = Digraph("structs" , filename = name+".gv" , format = "svg",
node_attr={'shape' : 'record', } )
f.attr(rankdir='LR')
f.graph_attr['overlap']= 'false'
f.graph_attr['splines']= 'true'
hashTB = ''
contador = 0
for i in self.values:
if i:
hashTB += '<f' + str(contador) +'>' + str(i.key)+ '|'
contador +=1
hashTB = hashTB[0: len(hashTB)-1]
f.node('hash', hashTB,**{'height':str(50)})
datos = "{<n>"
for j in self.values:
count = 0
if j:
for i in j.array:
for k in i[1]:
datos += str(k) +"|"
datos+="<p>}"
with f.subgraph(name=str(j.key)+","+str(count) ) as a:
a.node("node" +str(j.key)+str(count),datos)
datos="{<n>"
count +=1
n = 0
for j in self.values:
m = 0
if j:
f.edges([("hash:f"+str(n), "node" +str(j.key)+str(0)+":n")])
for i in j.array:
if m+1 < len(j.array):
f.edges([("node" +str(j.key)+str(m)+":p", ("node"+str(j.key)+str(m+1)+":n" ))])
m+=1
n+=1
f.view()
| [
"[email protected]"
] | |
f72b255f1a70060f3fae7db94812b435d5bb8b2d | 818e5e78f84596a7c086b218fd4aa9e8ea912afe | /hackatons/materials/algo/source/T5_LinearStructure/P2_Queue/counter_game_deq.py | d78087eb4b0f2a733e40cb405b86b2885f5e47e4 | [] | no_license | davendiy/forpythonanywhere | 44fbc63651309598b58391667f0fead40e8fad91 | 1b9292ca33b06b17cd516e4e9913479edb6d35cd | refs/heads/master | 2020-08-10T04:24:02.665635 | 2019-10-25T07:05:46 | 2019-10-25T07:05:46 | 214,255,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,193 | py | #Лічилка з використанням деку
from source.T5_LinearStructure.P2_Queue.counter_game import Player
from source.T5_LinearStructure.P2_Queue.Deque import Deque
def count_counter():
""" Функція розв'язує задачу "лічилка" """
d = Deque() # створити дек d
n = int(input('Кількість гравців: '))
m = int(input('Кількість слів: '))
for i in range(n):
pl = Player(i+1) # створити гравця з номером на 1 більше i
d.append(pl) # додати гравця у кінець деку
print('\nПослідовність номерів, що вибувають')
while not d.empty():
for i in range(m-1): # m-1 раз перекласти гравця з початку до кінця деку
d.append(d.popleft())
pl = d.popleft() # узяти m-го гравця з початку деку
print(pl) # та показати його номер
count_counter()
| [
"[email protected]"
] | |
8333137c128e54828c5eee264b4aee1b358fa310 | f0a1a85e8cae69144ce304d4c91b53b8f8cf5116 | /mysite/blog/models.py | 6bc8deb3e116bb18d028e1c31219e4e91c1c6bb9 | [
"MIT"
] | permissive | ohduran-attempts/by-example | 0a96b59cf41e3c955e8e744b0604c909168fd998 | a56385c169d426090970f3f481d15fec50a9c603 | refs/heads/master | 2020-04-22T16:41:53.512719 | 2019-02-15T07:32:51 | 2019-02-15T07:32:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,995 | py | from common.models import TimeStamped
from django.conf import settings
from django.db import models
from django.urls import reverse_lazy
from django.utils import timezone
from taggit.managers import TaggableManager
class PublishedManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(status='published')
class Post(TimeStamped, models.Model):
STATUS_CHOICES_TPL = (
('draft', 'Draft'),
('published', 'Published'),
)
objects = models.Manager()
published = PublishedManager()
tags = TaggableManager()
title = models.CharField(max_length=250)
slug = models.SlugField(max_length=250,
unique_for_date='publish')
author = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='blog_posts')
body = models.TextField()
publish = models.DateTimeField(default=timezone.now)
status = models.CharField(max_length=10,
choices=STATUS_CHOICES_TPL,
default='draft')
class Meta:
ordering = ('-publish',)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse_lazy('blog:post_detail',
args=[self.publish.year,
self.publish.month,
self.publish.day,
self.slug])
class Comment(TimeStamped, models.Model):
post = models.ForeignKey(Post,
on_delete=models.CASCADE,
related_name='comments')
name = models.CharField(max_length=80)
email = models.EmailField()
body = models.TextField()
active = models.BooleanField(default=True)
class Meta:
ordering = ('created',)
def __str__(self):
return f"Comment by {self.name} on {self.post}"
| [
"[email protected]"
] | |
a25581851ebc08774e92788b3f4f132d9410d65a | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/cv/detection/SSD_for_PyTorch/configs/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco.py | 21f9a34973365b59715f06155431771e0ff8f61e | [
"Apache-2.0",
"GPL-1.0-or-later",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 1,112 | py | # Copyright 2022 Huawei Technologies Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='RegNet',
arch='regnetx_8.0gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_8.0gf')),
neck=dict(
type='FPN',
in_channels=[80, 240, 720, 1920],
out_channels=256,
num_outs=5))
| [
"[email protected]"
] | |
bffe3775877350a0d53f049549cc6499bd1d2cee | 36901e58fbdeabc7380ae2c0278010b2c51fe54d | /gatheros_subscription/urls/me.py | 4823370a6d4c79d1b4002d326f190346c0136ed1 | [] | no_license | hugoseabra/congressy | e7c43408cea86ce56e3138d8ee9231d838228959 | ac1e9b941f1fac8b7a13dee8a41982716095d3db | refs/heads/master | 2023-07-07T04:44:26.424590 | 2021-08-11T15:47:02 | 2021-08-11T15:47:02 | 395,027,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | from django.conf.urls import include, url
from gatheros_subscription import views
urls = [
url(
r'^subscriptions/$',
views.MySubscriptionsListView.as_view(),
name='my-subscriptions'
),
]
urlpatterns_me = [url(r'^me/', include(urls))]
| [
"[email protected]"
] | |
070fc92166fd5c5e64836d1cf9676f441f1cdd5c | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_6404600001200128_1/Python/ihadanny/r1_p1.py | f67fc6f333dd5df96ae47855e77a0df26307669e | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | from sys import stdin
import re
import operator
import bisect
import sys
import random
cases = int(stdin.next().strip())
for case in range(1, cases+1):
N = int(stdin.next().strip())
M = map(int, stdin.next().split())
drops = [max(i-j,0) for i, j in zip(M[:-1], M[1:])]
max_eaten = [min(max(drops), x) for x in M[:-1]]
print 'Case #%d: %d %d' % (case, sum(drops), sum(max_eaten)) | [
"[email protected]"
] | |
81370fb27ca8ee771d8333b297381817241fd383 | 9193e2743434893c76e45b85a6a2ebcef71e8e2d | /ch03/ans27.py | 7e4795a48c12edf941443c284fa07ea89d030dc3 | [] | no_license | kyodocn/nlp100v2020 | d4f06a0eb089d7f056aa00817f79199fb4edfed2 | 99c66511352092a0f4c5028b1f440e09d6401331 | refs/heads/master | 2022-04-15T02:43:12.003780 | 2020-04-13T18:41:15 | 2020-04-13T18:41:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | import re
import pandas as pd
df = pd.read_json('ch03/jawiki-country.json.gz', lines=True)
ukText = df.query('title=="イギリス"')['text'].values
ls, fg = [], False
template = '基礎情報'
p1 = re.compile('\{\{' + template)
p2 = re.compile('\}\}')
p3 = re.compile('\|')
p4 = re.compile('<ref(\s|>).+?(</ref>|$)')
for l in ukText[0].split('\n'):
if fg:
ml = [p2.match(l), p3.match(l)]
if ml[0]:
break
if ml[1]:
ls.append(p4.sub('', l.strip()))
if p1.match(l):
fg = True
p = re.compile('\|(.+?)\s=\s(.+)')
ans = {m.group(1): m.group(2) for m in [p.match(c) for c in ls]}
r = re.compile('\[\[(.+\||)(.+?)\]\]')
ans = {k: r.sub(r'\2', v) for k, v in ans.items()}
print(ans)
| [
"[email protected]"
] | |
a2111854ac54c26359b72bf65a3d4e34aa50b31e | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /EYojuPCtvSzF2chkZ_1.py | d247c0967894694c7c4e84c2701804484f99a9dd | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | """
Create a function that returns the selected **filename** from a path. Include
the **extension** in your answer.
### Examples
get_filename("C:/Projects/pil_tests/ascii/edabit.txt") ➞ "edabit.txt"
get_filename("C:/Users/johnsmith/Music/Beethoven_5.mp3") ➞ "Beethoven_5.mp3"
get_filename("ffprobe.exe") ➞ "ffprobe.exe"
### Notes
* Tests will include both absolute and relative paths.
* For simplicity, all paths will include forward slashes.
"""
from pathlib import PurePath
def get_filename(path):
return PurePath(path).name
| [
"[email protected]"
] | |
fde97c8249d30b9f96310f9a0f91c45db0dcdc11 | 4fe971fdd0fb1d87b2bfaa5fe4b249b121501836 | /vignewton/managers/admin/images.py | a76a68be13c22e69ecf041c2f50c32321f7ec221 | [
"Unlicense"
] | permissive | umeboshi2/vignewton | 709c3395b74951385d1d3f9a932e4e6a6c1e0350 | bf55f90a25ae616e003ff0f71643dbe5084e924f | refs/heads/master | 2021-01-20T13:47:26.052679 | 2013-10-25T18:36:29 | 2013-10-25T18:36:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,186 | py | from cStringIO import StringIO
from datetime import datetime
import transaction
from PIL import Image
from vignewton.models.sitecontent import SiteImage
class ImageManager(object):
def __init__(self, session):
self.session = session
self.thumbnail_size = 128, 128
def images_query(self):
return self.session.query(SiteImage)
def make_thumbnail(self, content):
imgfile = StringIO(content)
img = Image.open(imgfile)
img.thumbnail(self.thumbnail_size, Image.ANTIALIAS)
outfile = StringIO()
img.save(outfile, 'JPEG')
outfile.seek(0)
thumbnail_content = outfile.read()
return thumbnail_content
def add_image(self, name, fileobj):
content = fileobj.read()
with transaction.manager:
image = SiteImage(name, content)
image.thumbnail = self.make_thumbnail(content)
self.session.add(image)
return self.session.merge(image)
def delete_image(self, id):
with transaction.manager:
image = self.session.query(SiteImage).get(id)
self.session.delete(image)
| [
"[email protected]"
] | |
9f77e916c511b53114f58ea7fa8a56b79e0034a7 | 7a8bb4c1de15f987e3231590eae74c051bf33726 | /SJVA_Scanner_KoreaTV_Download.py | 6a40cfa985904b82d46ef3644e0cc39210ea8b19 | [] | no_license | sunyruru/SJVA-Scanners | cbe6efa56be4c74a96059a91b32b60ff2ba4f3b6 | 5028c8c4aa58d4514f77ab46f3155f288c64b6f5 | refs/heads/master | 2020-04-21T13:40:04.306951 | 2019-01-28T08:21:35 | 2019-01-28T08:21:35 | 169,606,889 | 2 | 0 | null | 2019-02-07T16:53:39 | 2019-02-07T16:53:39 | null | UTF-8 | Python | false | false | 3,916 | py | # -*- coding: UTF-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import re, os, os.path
import Media, VideoFiles, Stack, Utils
import time, json, traceback, io
episode_regexps = [
r'(?P<show>.*?)[\s\.]E?(?P<ep>\d{1,2})[\-\~]E?\d{1,2}', #합본 걸리게
r'(?P<show>.*?)[eE](?P<ep>[0-9]{1,4})'
]
date_regexps = [
r'(?P<show>.*?)[^0-9a-zA-Z](?P<year>[0-9]{2})(?P<month>[0-9]{2})(?P<day>[0-9]{2})[^0-9a-zA-Z]', # 6자리
]
try:
import logging
import logging.handlers
logger = logging.getLogger('sjva_scanner')
logger.setLevel(logging.ERROR)
formatter = logging.Formatter(u'[%(asctime)s|%(lineno)s]:%(message)s')
#file_max_bytes = 10 * 1024 * 1024
filename = os.path.join(os.path.dirname( os.path.abspath( __file__ ) ), '../../', 'Logs', 'sjva.scanner.korea.tv.download.log')
fileHandler = logging.FileHandler(filename, encoding='utf8')
#fileHandler = logging.handlers.RotatingFileHandler(filename=filename), maxBytes=file_max_bytes, backupCount=5, encoding='euc-kr')
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
except:
pass
def Scan(path, files, mediaList, subdirs, language=None, root=None):
VideoFiles.Scan(path, files, mediaList, subdirs, root)
paths = Utils.SplitPath(path)
shouldStack = True
logger.debug('=====================================================')
logger.debug('- path:%s' % path)
logger.debug('- files count:%s' % len(files))
logger.debug('- subdir count:%s' % len(subdirs))
for _ in subdirs:
logger.debug(' * %s' % _)
if len(paths) != 0:
logger.debug('- paths[0] : %s' % paths[0])
logger.debug('- files count : %s', len(files))
for i in files:
tempDone = False
try:
file = os.path.basename(i)
logger.debug(' * FILE : %s' % file)
#for idx, rx in enumerate(episode_regexps):
for rx in episode_regexps:
match = re.search(rx, file, re.IGNORECASE)
if match:
show = match.group('show').replace('.', '') if match.groupdict().has_key('show') else ''
season = 1
episode = int(match.group('ep'))
name, year = VideoFiles.CleanName(show)
name = re.sub(r'((.*?기획)|(미니시리즈)|(.*?드라마)|(.*?특집))', '', name).strip()
logger.debug(' - MATCH show:[%s] name:[%s] episode:[%s] year:[%s]', show, name, episode, year)
if len(name) > 0:
tv_show = Media.Episode(name, season, episode, '', year)
tv_show.display_offset = 0
tv_show.parts.append(i)
mediaList.append(tv_show)
logger.debug(' - APPEND by episode: %s' % tv_show)
tempDone = True
break
if tempDone == False:
for rx in date_regexps:
match = re.search(rx, file)
if match:
year = int(match.group('year')) + 2000
month = int(match.group('month'))
day = int(match.group('day'))
show = match.group('show')
tv_show = Media.Episode(show, year, None, None, None)
tv_show.released_at = '%d-%02d-%02d' % (year, month, day)
tv_show.parts.append(i)
mediaList.append(tv_show)
logger.debug(' - APPEND by date: %s' % tv_show)
tempDone = True
break
if tempDone == False:
logger.error(' NOT APPEND!!')
except Exception, e:
logger.error(e)
if shouldStack:
Stack.Scan(path, files, mediaList, subdirs)
| [
"[email protected]"
] | |
9e0ed93c65839146d4639537314916ed89f2de42 | cdd5c3238ba9feba53f95a04c247a846b15ecd09 | /code/client/munkilib/updatecheck/unused_software.py | 6c770cb491ffacf602e09ea131244321d63ffc2c | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | munki/munki | 13d786513f8fd5dba6f533bfbea76d28c4836d8e | d3c9eb4ffccd280fe3e4bbce9544171cb6c2cc80 | refs/heads/main | 2023-08-27T23:19:04.095339 | 2023-08-01T23:44:10 | 2023-08-01T23:44:10 | 24,219,473 | 2,890 | 474 | NOASSERTION | 2023-08-22T15:15:44 | 2014-09-19T06:51:32 | Python | UTF-8 | Python | false | false | 5,577 | py | # encoding: utf-8
#
# Copyright 2017-2023 Greg Neagle.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
updatecheck.unused_software
Created by Greg Neagle on 2017-02-18.
Functions for removing unused optional install items
"""
from __future__ import absolute_import, print_function
# Apple frameworks via PyObjC
# PyLint cannot properly find names inside Cocoa libraries, so issues bogus
# No name 'Foo' in module 'Bar' warnings. Disable them.
# pylint: disable=E0611
from AppKit import NSWorkspace
# pylint: enable=E0611
# our libs
from .. import app_usage
from .. import display
def bundleid_is_running(app_bundleid):
'''Returns a boolean indicating if the application with the given
bundleid is currently running.'''
workspace = NSWorkspace.sharedWorkspace()
running_apps = workspace.runningApplications()
for app in running_apps:
if app.bundleIdentifier() == app_bundleid:
return True
return False
def bundleids_from_installs_list(pkginfo_pl):
'''Extracts a list of application bundle_ids from the installs list of a
pkginfo item'''
installs_list = pkginfo_pl.get('installs', [])
bundle_ids = [item.get('CFBundleIdentifier') for item in installs_list
if (item.get('CFBundleIdentifier') and
item.get('type') == 'application'
or (item.get('type') == 'bundle' and
item.get('path', '').endswith('.app')))]
return bundle_ids
def should_be_removed(item_pl):
"""Determines if an optional install item should be removed due to lack of
use.
Returns a boolean."""
name = item_pl['name']
removal_info = item_pl.get('unused_software_removal_info')
# do we have unused_software_removal_info?
if not removal_info:
return False
display.display_debug1(
'\tChecking to see if %s should be removed due to lack of use...', name)
try:
removal_days = int(removal_info.get('removal_days', 0))
if removal_days < 1:
raise ValueError
except ValueError:
display.display_warning('Invalid removal_days: %s for item %s'
% (removal_info.get('removal_days'), name))
return False
display.display_debug1(
'\t\tNumber of days until removal is %s', removal_days)
usage = app_usage.ApplicationUsageQuery()
usage_data_days = usage.days_of_data()
if usage_data_days is None or usage_data_days < removal_days:
# we don't have usage data old enough to judge
display.display_debug1(
'\t\tApplication usage data covers fewer than %s days.',
removal_days)
return False
# check to see if we have an install request within the removal_days
days_since_install_request = usage.days_since_last_install_event(
'install', name)
if (days_since_install_request is not None and
days_since_install_request != -1 and
days_since_install_request <= removal_days):
display.display_debug1('\t\t%s had an install request %s days ago.',
name, days_since_install_request)
return False
# get list of application bundle_ids to check
if 'bundle_ids' in removal_info:
bundle_ids = removal_info['bundle_ids']
else:
# get application bundle_ids from installs list
bundle_ids = bundleids_from_installs_list(item_pl)
if not bundle_ids:
display.display_debug1('\\tNo application bundle_ids to check.')
return False
# now check each bundleid to see if it's currently running or has been
# activated in the past removal_days days
display.display_debug1('\t\tChecking bundle_ids: %s', bundle_ids)
for bundle_id in bundle_ids:
if bundleid_is_running(bundle_id):
display.display_debug1(
'\t\tApplication %s is currently running.' % bundle_id)
return False
days_since_last_activation = usage.days_since_last_usage_event(
'activate', bundle_id)
if days_since_last_activation == -1:
display.display_debug1(
'\t\t%s has not been activated in more than %s days...',
bundle_id, usage.days_of_data())
elif days_since_last_activation <= removal_days:
display.display_debug1('\t\t%s was last activated %s days ago',
bundle_id, days_since_last_activation)
return False
else:
display.display_debug1('\t\t%s was last activated %s days ago',
bundle_id, days_since_last_activation)
# if we get this far we must not have found any apps used in the past
# removal_days days, so we should set up a removal
display.display_info('Will add %s to the removal list since it has been '
'unused for at least %s days...', name, removal_days)
return True
if __name__ == '__main__':
print('This is a library of support tools for the Munki Suite.')
| [
"[email protected]"
] | |
7942255ce3e00ae3769a7cdbbb8edc73fc986e87 | 6b1cac18b81a4704c310fb30a30e2906c6137511 | /onepanman_api/views/api/notice.py | 26a0a1f3a0327c3e7ae9f34146fc170cb14d8ea3 | [
"MIT"
] | permissive | Capstone-onepanman/api-server | 973c73a4472637e5863d65ae90ec53db83aeedf7 | 1a5174fbc441d2718f3963863590f634ba2014e1 | refs/heads/master | 2022-12-09T22:43:23.720837 | 2020-03-20T00:43:21 | 2020-03-20T00:43:21 | 234,227,137 | 0 | 0 | MIT | 2022-12-08T02:37:19 | 2020-01-16T03:29:36 | Python | UTF-8 | Python | false | false | 247 | py | from rest_framework import viewsets
from onepanman_api.models import Notice
from onepanman_api.serializers.notice import NoticeSerializer
class NoticeViewSet(viewsets.ModelViewSet):
queryset = Notice
serializer_class = NoticeSerializer
| [
"[email protected]"
] | |
87be5e5ab69a0137437df7340bd28ac0c71105e1 | 88994e2e840a70ec702cee09e1a13813aa6f800c | /cg/constants/backup.py | 16d3c41653d24f57dbede48641c08572ba634d6f | [] | no_license | Clinical-Genomics/cg | 1e9eb0852f742d555a48e8696914ebe177f7d436 | d2ec6d25b577dd6938bbf92317aeff1d6b3c5b08 | refs/heads/master | 2023-09-01T02:04:04.229120 | 2023-08-31T13:50:31 | 2023-08-31T13:50:31 | 82,567,026 | 19 | 8 | null | 2023-09-14T15:24:13 | 2017-02-20T14:29:43 | Python | UTF-8 | Python | false | false | 35 | py | MAX_PROCESSING_FLOW_CELLS: int = 1
| [
"[email protected]"
] | |
dd17276b517f0934344b4de656f26eca45e56c03 | df9b342f71cee4306c52ee5e29d105f8712d7439 | /BOJ/하노이탑/다른사람.py | b447ae5dea94644e425cae796590c5652794ad21 | [] | no_license | qkreltms/problem-solvings | a3fbd93d5664830761c70ef6a476e94ada399af0 | cade3fc738c0b7b40ae4bf0385fdd552313ad5a1 | refs/heads/master | 2023-07-19T08:17:48.580833 | 2021-08-31T08:45:57 | 2021-08-31T08:45:57 | 136,621,853 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | def f(n, a, b, c):
if(n == 1):
print(a, c, sep = " ")
else:
f(n-1, a, c, b)
f(1, a, b, c)
f(n-1, b, a, c)
n = int(input())
print(2**n-1)
if(n <= 20):
f(n, 1, 2, 3)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.