metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "10DarkShadow01/Emotional-Intelligence-alpha-",
"score": 2
} |
#### File: 10DarkShadow01/Emotional-Intelligence-alpha-/calculate_features.py
```python
import wave
import numpy as np
import math
import os
import pickle
import cf
def weighting(x):
if weighting_type == 'cos':
return x * np.sin(np.pi * np.arange(len(x)) / (len(x) - 1))
elif weighting_type == 'hamming':
return x * (0.54 - 0.46 * np. cos(2. * np.pi * np.arange(len(x)) / (len(x) - 1)))
else:
return x
def calculate_features(frames, freq, options):
n = len(frames)
window_sec = 0.2
window_n = int(freq * window_sec)
use_derivatives = False
st_f = cf.stFeatureExtraction(frames, freq, window_n, window_n / 2)
if st_f.shape[1] > 2:
i0 = 1
i1 = st_f.shape[1] - 1
if i1 - i0 < 1:
i1 = i0 + 1
if use_derivatives:
deriv_st_f = np.zeros((st_f.shape[0]*3, i1 - i0), dtype=float)
else:
deriv_st_f = np.zeros((st_f.shape[0], i1 - i0), dtype=float)
for i in xrange(i0, i1):
i_left = i - 1
i_right = i + 1
deriv_st_f[:st_f.shape[0], i - i0] = st_f[:, i]
if use_derivatives:
if st_f.shape[1] >= 2:
deriv_st_f[st_f.shape[0]:st_f.shape[0]*2, i - i0] = (st_f[:, i_right] - st_f[:, i_left]) / 2.
deriv_st_f[st_f.shape[0]*2:st_f.shape[0]*3, i - i0] = st_f[:, i] - 0.5*(st_f[:, i_left] + st_f[:, i_right])
return deriv_st_f
elif st_f.shape[1] == 2:
deriv_st_f = np.zeros((st_f.shape[0], 1), dtype=float)
deriv_st_f[:st_f.shape[0], 0] = st_f[:, 0]
if use_derivatives:
deriv_st_f[st_f.shape[0]:st_f.shape[0]*2, 0] = st_f[:, 1] - st_f[:, 0]
deriv_st_f[st_f.shape[0]*2:st_f.shape[0]*3, 0] = np.zeros(st_f.shape[0])
return deriv_st_f
else:
deriv_st_f = np.zeros((st_f.shape[0], 1), dtype=float)
deriv_st_f[:st_f.shape[0], 0] = st_f[:, 0]
if use_derivatives:
deriv_st_f[st_f.shape[0]:st_f.shape[0]*2, 0] = np.zeros(st_f.shape[0])
deriv_st_f[st_f.shape[0]*2:st_f.shape[0]*3, 0] = np.zeros(st_f.shape[0])
return deriv_st_f
#vocals end at this point update for memory stick and other emotional composite models will be calculated here
```
#### File: 10DarkShadow01/Emotional-Intelligence-alpha-/code_yan_lstm.py
```python
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import wave
import numpy as np
import math
import os
import pickle
import models
import csv
import calculate_features as cf
import models
np.random.seed(200)
regime = 'aibo5'
def get_params(regime):
if regime == 'iemocap':
#opt out fru _test 4
available_emotions = ['ang', 'exc', 'neu', 'sad']
path_to_samples = 'iem_samples/'
conf_matrix_prefix = 'iemocap'
framerate = 44100
return available_emotions, '', '', '', path_to_samples, conf_matrix_prefix, framerate, '', 0
elif regime == 'aibo4':
available_emotions = ['N', 'M', 'E', 'A']
path_to_wav = 'aibo_data/wav/'
path_to_transcription = 'aibo_data/transliteration/'
path_to_labels = 'aibo_data/labels/CEICES/'
path_to_samples = 'yan_samples_lstm4/'
conf_matrix_prefix = 'aibo4'
labels_file = 'word_labels_4cl_aibo_word_set.txt'
label_pos = 2
framerate = 16000
return available_emotions, path_to_wav, path_to_transcription, path_to_labels, path_to_samples, conf_matrix_prefix, framerate, labels_file, label_pos
else:
available_emotions = ['N', 'R', 'E', 'A', 'P']
path_to_wav = 'aibo_data/wav/'
path_to_transcription = 'aibo_data/transliteration/'
path_to_labels = 'aibo_data/labels/IS2009EmotionChallenge/'
path_to_samples = 'yan_samples_lstm5/'
conf_matrix_prefix = 'aibo5'
labels_file = 'chunk_labels_5cl_corpus.txt'
framerate = 16000
label_pos = 1
return available_emotions, path_to_wav, path_to_transcription, path_to_labels, path_to_samples, conf_matrix_prefix, framerate, labels_file, label_pos
available_emotions, path_to_wav, path_to_transcription, path_to_labels, path_to_samples, conf_matrix_prefix, framerate, labels_file, label_pos = get_params(regime)
segmentation = 'by_phrase'
types = {1: np.int8, 2: np.int16, 4: np.int32}
def open_wav(path_to_wav, filename):
wav = wave.open(path_to_wav + filename, mode="r")
(nchannels, sampwidth, framerate, nframes, comptype, compname) = wav.getparams()
content = wav.readframes(nframes)
samples = np.fromstring(content, dtype=types[sampwidth])
return (nchannels, sampwidth, framerate, nframes, comptype, compname), samples
def read_lines(f):
lines = open(f, 'r').read()
return np.array(lines.split('\n'))
def get_needed_lines(lines, i, subline):
result = []
j = i
condition = True
while condition:
if lines[j][:len(subline)] == subline:
result.append(lines[j])
j += 1
else:
condition = False
return result, j
def get_label(expert_grades):
u = np.unique(expert_grades)
grade = u[0]
count = expert_grades[expert_grades == grade].shape[0]
for ui in u:
current_count = expert_grades[expert_grades == ui].shape[0]
if current_count > count:
grade = ui
count = current_count
return grade
def read_aibo_data():
data = []
files = np.sort([f[:-4] + '.wav' for f in os.listdir(path_to_wav)])
transliterations = read_lines(path_to_transcription + 'transliteration.txt')
labels = read_lines(path_to_labels + labels_file)
index_t = 0
index_l = 0
c = 0
for fi, f in enumerate(files):
d = {}
if fi % 1000 == 0:
print f, fi, ' out of ', len(files)
w = open_wav(path_to_wav, f)
signal = w[1]
length = w[0][3] / float(w[0][2])
t, index_t = get_needed_lines(transliterations, index_t, f[:-4])
l, index_l = get_needed_lines(labels, index_l, f[:-4])
if l != []:
grades = []
em = l[0].split(' ')[label_pos][0]
d['id'] = f[:-4]
d['emotion'] = em
d['signal'] = signal
d['transcription'] = t[0][14:-2]
d['length'] = length
if (d['emotion'] in available_emotions) and (d['length'] > 0.8):
data.append(d)
else:
c += 1
print 'missed: ', c
return data
sessions = ['Session1', 'Session2', 'Session3', 'Session4', 'Session5']
def get_transcriptions(path_to_transcriptions, filename):
f = open(path_to_transcriptions + filename, 'r').read()
f = np.array(f.split('\n'))
transcription = {}
for i in xrange(len(f) - 1):
g = f[i]
i1 = g.find(': ')
i0 = g.find(' [')
ind_id = g[:i0]
ind_ts = g[i1+2:]
transcription[ind_id] = ind_ts
return transcription
def split_wav(wav, emotions):
(nchannels, sampwidth, framerate, nframes, comptype, compname), samples = wav
duration = nframes / framerate
left = samples[0::nchannels]
right = samples[1::nchannels]
frames = []
for ie, e in enumerate(emotions):
start = e['start']
end = e['end']
e['right'] = right[int(start * framerate):int(end * framerate)]
e['left'] = left[int(start * framerate):int(end * framerate)]
frames.append({'left':e['left'], 'right': e['right']})
return frames
def get_emotions(path_to_emotions, filename):
f = open(path_to_emotions + filename, 'r').read()
f = np.array(f.split('\n'))
c = 0
idx = f == ''
idx_n = np.arange(len(f))[idx]
emotion = []
for i in xrange(len(idx_n) - 2):
g = f[idx_n[i]+1:idx_n[i+1]]
head = g[0]
i0 = head.find(' - ')
start_time = float(head[head.find('[') + 1:head.find(' - ')])
end_time = float(head[head.find(' - ') + 3:head.find(']')])
actor_id = head[head.find(filename[:-4]) + len(filename[:-4]) + 1:head.find(filename[:-4]) + len(filename[:-4]) + 5]
emo = head[head.find('\t[') - 3:head.find('\t[')]
vad = head[head.find('\t[') + 1:]
v = float(vad[1:7])
a = float(vad[9:15])
d = float(vad[17:23])
emotion.append({'start':start_time,
'end':end_time,
'id':filename[:-4] + '_' + actor_id,
'v':v,
'a':a,
'd':d,
'emotion':emo})
return emotion
def read_iemocap_data():
data = []
for session in sessions:
print session
path_to_wav = 'iemocap_data/' + session + '/dialog/wav/'
path_to_emotions = 'iemocap_data/' + session + '/dialog/EmoEvaluation/'
path_to_transcriptions = 'iemocap_data/' + session + '/dialog/transcriptions/'
files = os.listdir(path_to_wav)
files = [f[:-4] for f in files]
print len(files)
print files
for f in files:
emotions = get_emotions(path_to_emotions, f + '.txt')
wav = open_wav(path_to_wav, f + '.wav')
sample = split_wav(wav, emotions)
transcriptions = get_transcriptions(path_to_transcriptions, f + '.txt')
for ie, e in enumerate(emotions):
if e['emotion'] in available_emotions:
e['signal'] = sample[ie]['left']
e['transcription'] = transcriptions[e['id']]
data.append(e)
return data
def read_data():
if regime == 'aibo4' or regime == 'aibo5':
return read_aibo_data()
else:
return read_iemocap_data()
def check_all_finite(X):
X = np.asanyarray(X)
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
return True
else:
return False
def to_categorical(y):
y_cat = np.zeros((len(y), len(available_emotions)), dtype=int)
for i in xrange(len(y)):
y_cat[i, :] = np.array(np.array(available_emotions) == y[i], dtype=int)
return y_cat
def save_sample(x, y, name):
with open(name, 'w') as csvfile:
w = csv.writer(csvfile, delimiter=',')
for i in xrange(x.shape[0]):
row = x[i, :].tolist()
row.append(y[i])
w.writerow(row)
def load(name):
with open(name, 'r') as csvfile:
r = csv.reader(csvfile, delimiter=',')
x = []
y = []
for row in r:
x.append(row[:-1])
y.append(row[-1])
return np.array(x, dtype=float), np.array(y)
def get_features(data, save=True, path=path_to_samples):
failed_samples = []
for di, d in enumerate(data):
if di%1000 == 0:
print di, ' out of ', len(data)
st_features = cf.calculate_features(d['signal'], framerate, None).T
x = []
y = []
for f in st_features:
if f[1] > 1.e-4:
x.append(f)
y.append(d['emotion'])
x = np.array(x, dtype=float)
y = np.array(y)
if save:
save_sample(x, y, path + d['id'] + '.csv')
return x, y
def get_field(data, key):
return np.array([e[key] for e in data])
def balance_sample(x, y, size):
labels = np.unique(y)
xc = {}
yc = {}
for l in labels:
xc[l] = x[y == l]
yc[l] = y[y == l]
s = size / len(labels)
tx = np.zeros((s*len(labels), x.shape[1]), dtype=float)
ty = np.zeros(s*len(labels), dtype=str)
for i in xrange(len(labels)):
j = i*s
n = xc[labels[i]].shape[0]
idx = np.random.randint(low=0, high=n, size=s)
tx[j:j+s, :] = xc[labels[i]][idx, :]
ty[j:j+s] = yc[labels[i]][idx]
return tx, ty
def get_sample(idx, path):
tx = []
ty = []
for i in idx:
x, y = load(path + '/' + i + '.csv')
if len(x) < 40:
tx.append(np.array(x, dtype=float))
ty.append(y[0])
tx = np.array(tx)
ty = np.array(ty)
return tx, ty
def normalize(x):
gminx = np.zeros(x[0].shape[1]) + 1.e5
gmaxx = np.zeros(x[0].shape[1]) - 1.e5
for i in xrange(x.shape[0]):
q = x[i]
minx = np.min(q, axis=0)
maxx = np.max(q, axis=0)
for s in xrange(x[0].shape[1]):
if gminx[s] > minx[s]:
gminx[s] = minx[s]
if gmaxx[s] < maxx[s]:
gmaxx[s] = maxx[s]
for i in xrange(x.shape[0]):
for s in xrange(x[0].shape[1]):
x[i][:, s] = (x[i][:, s] - gminx[s]) / float(gmaxx[s] - gminx[s])
return x
def grow_sample(x, y, n=10000):
xg = []
yg = []
eps = 5.*1.e-2
for i in xrange(n):
j = np.random.randint(x.shape[0])
x0 = x[j]
x0 += eps * np.random.normal(0, 1, size=x0.shape)
y0 = y[j]
xg.append(x0)
yg.append(y0)
return np.array(xg), np.array(yg)
def reshape_for_dense(x, y):
j = 0
xr = []
yr = []
for i in xrange(x.shape[0]):
for k in xrange(x[i].shape[0]):
xr.append(x[i][k, :])
yr.append(y[i])
return np.array(xr), np.array(yr)
def pad_sequence(x, ts):
xp = []
for i in xrange(x.shape[0]):
x0 = np.zeros((ts, x[i].shape[1]), dtype=float)
if ts > x[i].shape[0]:
x0[ts - x[i].shape[0]:, :] = x[i]
else:
maxe = np.sum(x[i][0:ts, 1])
for j in xrange(x[i].shape[0] - ts):
if np.sum(x[i][j:j + ts, 1]) > maxe:
x0 = x[i][j:j + ts, :]
maxe = np.sum(x[i][j:j + ts, 1])
xp.append(x0)
return np.array(xp)
data = np.array(read_data())
print data
print len(data)
ids = get_field(data, 'id')
emotions = get_field(data, 'emotion')
print np.unique(emotions)
for i in xrange(len(available_emotions)):
print available_emotions[i], emotions[emotions == available_emotions[i]].shape[0]
parts = 5
permutation = np.random.permutation(len(data))
permuted_ids = ids[permutation]
step = len(data) / parts
preds = []
trues = []
get_features(data)
for part in xrange(parts):
i0 = step * part
i1 = step * (part + 1)
train_idx = np.append(permuted_ids[:i0], permuted_ids[i1:])
test_idx = permuted_ids[i0:i1]
train_x, train_y = get_sample(train_idx, path_to_samples)
test_x, test_y = get_sample(test_idx, path_to_samples)
print train_x.shape
print train_y.shape
ts = 32
train_x = pad_sequence(train_x, ts)
test_x = pad_sequence(test_x, ts)
train_y_cat = to_categorical(train_y)
test_y_cat = to_categorical(test_y)
model = models.train_lstm(train_x, train_y_cat, test_x, test_y_cat)
scores = model.predict(test_x)
prediction = np.array([available_emotions[np.argmax(t)] for t in scores])
print prediction[prediction == test_y].shape[0] / float(prediction.shape[0])
for i in xrange(len(prediction)):
preds.append(prediction[i])
trues.append(test_y[i])
class_to_class_precs = np.zeros((len(available_emotions), len(available_emotions)), dtype=float)
preds = np.array(preds)
trues = np.array(trues)
print 'Total accuracy: ', preds[preds == trues].shape[0] / float(preds.shape[0])
for cpi, cp in enumerate(available_emotions):
for cti, ct in enumerate(available_emotions):
if trues[trues == ct].shape[0] > 0:
class_to_class_precs[cti, cpi] = preds[(preds == cp) * (trues == ct)].shape[0] / float(trues[trues == ct].shape[0])
else:
class_to_class_precs[cti, cpi] = 0.
fig, ax = plt.subplots()
heatmap = ax.pcolor(class_to_class_precs.T, cmap=plt.cm.Blues)
ax.set_xticklabels(available_emotions, minor=False)
ax.set_yticklabels(available_emotions, minor=False)
ax.set_xticks(np.arange(len(available_emotions)) + 0.5, minor=False)
ax.set_yticks(np.arange(len(available_emotions)) + 0.5, minor=False)
for i in xrange(len(available_emotions)):
for j in xrange(len(available_emotions)):
plt.text(i+0.2, j+0.4, str(class_to_class_precs[i, j])[:5])
plt.savefig(conf_matrix_prefix + 'lstm.png')
plt.close()
```
#### File: 10DarkShadow01/Emotional-Intelligence-alpha-/models.py
```python
import wave
import numpy as np
import math
import os
import pickle
from keras.preprocessing import sequence
from sklearn.ensemble import RandomForestRegressor as RFR
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Merge, TimeDistributedDense
from keras.layers import LSTM
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils import np_utils
from keras.regularizers import l2
from keras.optimizers import SGD
import h5py
def train_rfr(x, y, tx, ty):
rfr = RFR(n_estimators=50)
model = rfr.fit(x, y)
return model
def train(x, y, options):
classes = np.unique(y)
print classes
classifiers = []
for c in classes:
out = np.array([int(i) for i in y==c])
print c, len(out[out == 1]), len(out[out == 0]), len(out[out == 1]) + len(out[out == 0])
classifier = RFC(n_estimators=10)
classifier.fit(x, out)
classifiers.append(classifier)
return classifiers
def train_rfc(x, y, options):
classifier = RFC(n_estimators=100)
return classifier.fit(x, y)
def fork (model, n=2):
forks = []
for i in range(n):
f = Sequential()
f.add (model)
forks.append(f)
return forks
def make_sample_lstm(x, n, y=None, use_y=False):
if use_y:
xt = np.zeros((x.shape[0], n, x.shape[1] + 1), dtype=float)
t = np.zeros((x.shape[0], x.shape[1] + 1), dtype=float)
for i in xrange(x.shape[0]):
if i == 0:
t[i, :-1] = x[i, :]
t[i, -1] = 0
else:
t[i, :-1] = x[i, :]
t[i, -1] = y[i-1]
for i in xrange(x.shape[0]):
if i < n:
i0 = n - i
xt[i, :i0, :] = np.zeros((i0, x.shape[1] + 1), dtype=float)
if i > 0:
xt[i, i0:, :] = t[:i, :]
else:
xt[i, :, :] = t[i-n:i, :]
return xt
else:
xt = np.zeros((x.shape[0], n, x.shape[1]), dtype=float)
for i in xrange(x.shape[0]):
if i < n:
i0 = n - i
xt[i, :i0, :] = np.zeros((i0, x.shape[1]), dtype=float)
if i > 0:
xt[i, i0:, :] = x[:i, :]
else:
xt[i, :, :] = x[i-n:i, :]
return xt
class modelLSTM:
def __init__(self, model, length, use_y):
self.model = model
self.n = length
self.use_y = use_y
def predict(self, x):
if self.use_y:
result = np.zeros((x.shape[0], 1), dtype=float)
for i in xrange(x.shape[0]):
t = np.zeros((self.n, x.shape[1] + 1), dtype=float)
else:
xt = make_sample_lstm(x, self.n)
return self.model.predict(xt)
def save(self, name_json, name_weights):
json_string = self.model.to_json()
open(name_json, 'w').write(json_string)
self.model.save_weights(name_weights)
def train_lstm_avec(x, y, xt, yt):
length = 25
use_y = False
x_series_train = make_sample_lstm(x, length, y, use_y)
print x_series_train.shape
x_series_test = make_sample_lstm(xt, length, yt, use_y)
print x_series_test.shape
print y[:100, 0]
model = Sequential()
model.add(LSTM(256, return_sequences=True, input_shape=(x_series_train.shape[1], x_series_train.shape[2])))
model.add(Dropout(0.2))
model.add(Activation('tanh'))
model.add(LSTM(128))
model.add(Dropout(0.2))
model.add(Activation('tanh'))
model.add(Dense(1))
model.summary()
model.compile(loss='mean_absolute_error', optimizer='rmsprop')
model.fit(x_series_train, y, batch_size=512, nb_epoch=50,
verbose=2, validation_data=(x_series_test, yt))
return modelLSTM(model, length, use_y)
def train_lstm(x, y, xt, yt):
batch_size = 320
nb_classes = 10
nb_epoch = 25
ts = x[0].shape[0]
model = Sequential()
model.add(LSTM(512, return_sequences=True, input_shape=(ts, x[0].shape[1])))
model.add(Activation('tanh'))
model.add(LSTM(256, return_sequences=False))
model.add(Activation('tanh'))
model.add(Dense(512))
model.add(Activation('tanh'))
model.add(Dense(y.shape[1]))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer=RMSprop())
model.fit(x, y, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=2, validation_data=(xt, yt), show_accuracy=True)
return model
def train_mpc(x, y, tx, ty):
batch_size = 256
nb_classes = 10
nb_epoch = 20
model = Sequential()
model.add(Dense(512, input_shape=(x.shape[1],)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(1024))
model.add(Activation('relu'))
model.add(Dense(1024))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dense(1024))
model.add(Activation('relu'))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dense(y.shape[1]))
model.compile(loss='mean_absolute_error',
optimizer=RMSprop())
history = model.fit(x, y,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(tx, ty), show_accuracy=True)
return model
def validate1(classifier, test_x, test_y):
predictions = classifier.predict(test_x)
total_acc = predictions[predictions == test_y].shape[0] / float(predictions.shape[0])
print 'Total acc ', total_acc
classes = np.unique(test_y)
print test_y
print predictions
ans = []
for ci, c in enumerate(classes):
print ci
idx = np.array([ii for ii, i in enumerate(test_y==c)])
out = test_y[idx]
pred = predictions[idx]
tt = pred[(pred == c) * (out == c)].shape[0] / float(out[out == c].shape[0])
tf = pred[(pred == c) * (out != c)].shape[0] / float(out[out != c].shape[0])
ft = pred[(pred != c) * (out == c)].shape[0] / float(out[out == c].shape[0])
ff = pred[(pred != c) * (out != c)].shape[0] / float(out[out != c].shape[0])
print tt, tf, ft, ff, '\t', out[out == c].shape[0] / float(out.shape[0]), out[out != c].shape[0] / float(out.shape[0])
tt_tf_ft_ff = [tt, tf, ft, ff]
ans.append(tt_tf_ft_ff)
ans_matrix = np.zeros((len(classes), len(classes)), dtype=float)
for ci, c in enumerate(classes):
for ci2, c2 in enumerate(classes):
ans_matrix[ci2, ci] = pred[(pred == c) * (test_y == c2)].shape[0] / float(test_y[test_y == c2].shape[0])
return np.array(ans, dtype=float), ans_matrix
class Random:
def __init__(self):
1
def fit(self, x, y):
1
def predict(self, x):
return np.random.normal(0, 0.1, size=x.shape[0])
def train_rnd(x, y, tx, ty):
return Random()
def validate(classifiers, test_x, test_y):
print len(classifiers)
classes = np.unique(test_y)
ans = []
for ci, c in enumerate(classes):
print ci
out = np.array([int(i) for i in test_y==c])
predictions = classifiers[ci].predict(test_x)
tt = predictions[(predictions == 1) * (out == 1)].shape[0] / float(out[out == 1].shape[0])
tf = predictions[(predictions == 1) * (out == 0)].shape[0] / float(out[out == 0].shape[0])
ft = predictions[(predictions == 0) * (out == 1)].shape[0] / float(out[out == 1].shape[0])
ff = predictions[(predictions == 0) * (out == 0)].shape[0] / float(out[out == 0].shape[0])
print tt, tf, ft, ff, '\t', out[out == 1].shape[0] / float(out.shape[0]), out[out == 0].shape[0] / float(out.shape[0])
tt_tf_ft_ff = [tt, tf, ft, ff]
ans.append(tt_tf_ft_ff)
return np.array(ans, dtype=float)
``` |
{
"source": "10gen/mlocust-app",
"score": 2
} |
#### File: docker-image/locust-tasks/locustfile-faker-custom.py
```python
import gevent
_ = gevent.monkey.patch_all()
########################################################################
# Add any additional imports here.
# But make sure to include in requirements.txt
########################################################################
import pymongo
from bson import json_util
from bson.json_util import loads
from bson import ObjectId
from locust import User, events, task, constant, tag, between
import time
from pickle import TRUE
from datetime import datetime, timedelta
import random
from faker import Faker
import pprint
from bson.codec_options import TypeRegistry, CodecOptions, TypeCodec
from bson.decimal128 import Decimal128
from decimal import Decimal
import string
########################################################################
# Global Static Variables that can be accessed without referencing self
# Change the connection string to point to the correct db
# and double check the readpreference etc.
########################################################################
client = None
coll = None
# Log all application exceptions (and audits) to the same cluster
audit = None
# docs to insert per batch insert
batch_size = None
fake = Faker()
# Custom Class
class DecimalCodec(TypeCodec):
python_type = Decimal # the Python type acted upon by this type codec
bson_type = Decimal128 # the BSON type acted upon by this type codec
def transform_python(self, value):
"""
Function that transforms a custom type value into a type
that BSON can encode.
"""
return Decimal128(value)
def transform_bson(self, value):
"""
Function that transforms a vanilla BSON type value into our
custom type.
"""
return value.to_decimal()
class SampleProduct:
def reset(self):
self.order_item_quantity = random.randint(1, 4)
self.order_amount = 0
def generate(self):
self.reset()
return self.create_product()
def create_product(self):
return {
"testName": fake.name(),
"orderNumber": fake.bothify(text = "AD-#########"),
# "orderStatus": random.choice(["open", "shipped", "declined", "unpaid", "returned"]),
"orderID": fake.uuid4(),
"shard": random.randint(1,25000),
"orderStatus": "open",
"channel": self.get_channel(),
"orderReference": fake.bothify(text = "????????????", letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890'),
"brand": self.get_brand(),
"orderType": self.get_order_type(),
"orderDate": fake.date_time_between(start_date=datetime(2017, 12, 31)),
"currency": self.get_currency(),
"locale": "en_US",
"integrator": "storefront",
"invoiceNumber": fake.bothify(text = "########"),
"orderTaxes": self.get_order_taxes(),
"orderLines": self.get_order_lines(),
"payments": self.get_payments(),
"promotions": self.get_promotions(),
"orderCharges": self.get_order_charges(),
"shipments": self.get_shipments(),
"customer": self.get_customer(),
"references": self.get_references(),
"notes": self.get_notes()
}
def get_channel(self):
return random.choice(["Web", "App", "Brick", "Marketplace"])
def get_brand(self):
return random.choice(["adadas", "nikey", "reebock", "old balance", "asick"])
def get_order_type(self):
return random.choice(["SALES", "SALES", "SALES", "PROMO", "COUPON"])
def get_currency(self):
return random.choice(["USD", "EUR", "CNY", "USD", "EUR"])
def get_order_lines(self):
order_lines = []
for i in range(1, self.order_item_quantity):
order_lines.append({
"lineNumber": 1,
"SKU": fake.bothify(text = "??????????", letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890'),
"productLiteralSize": round(random.uniform(2,13), 1),
"productName": fake.color_name() + ' Shirt',
"productImage": "https://edge.disstg.commercecloud.salesforce.com/dw/image/v2/aaqx_stg/on/demandware.static/Sites-adidas-US-Site/Sites-adidas-products/en_US/v1643728798340/zoom/034563_01_standard.jpg",
"UPC": random.randint(100000000000,900000000000),
"divisionCode": random.randrange(1,9),
"orderedQuantity": self.order_item_quantity,
"unitGrossPrice": round(random.uniform(1,220), 2),
"unitNetPrice": round(random.uniform(1,250), 2),
"orderLineCharges": [
{
"chargeName": random.choice(["OUTLET_MARKDOWN-ALL", "COUPON", "NONE"]),
"chargeAmount": round(random.uniform(1,25), 2)
}
],
"orderLineTaxes": [
{
"taxName": "ShippingTax",
"taxAmount": round(random.uniform(1,5), 1),
"taxClass": fake.bothify(text = "????????", letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890'),
"taxRate": round(random.uniform(1,7), 2),
}
],
"vasCode": "string",
"giftArticle": False,
"shippingGroup": "00149542",
"lineType": "inline",
"references": self.get_references()
})
return order_lines
def get_references(self):
a = []
for i in random.randint(15,25):
a.append({
random.choice(["EAN","UPC","aromaStatus","articleNumber","campaignID","color","division","flashProduct","gender","image","literalSize","literalSizeID","modelNumber","promotionId","promotionName","proratedGrossBaseDiscount","proratedGrossDiscount","proratedNetBaseDiscount","proratedNetDiscount","proratedTaxDiscount","returnType","size","sourceInfo","taxClassID","type"]): fake.bothify(text = "??????", letters = 'abcdefghijklmnopqrstuvwxyz1234567890')
})
def get_payments(self):
return [
{
"requestedAmount": round(random.uniform(1,160), 2),
"paymentGateway": random.choice(["ACI", "SIX", "ONE"]),
"paymentMethod": random.choice(["CREDIT_CARD", "CREDIT_CARD", "CREDIT_CARD", "CASH", "COUPON"]),
"collectedFromConsumer": random.randint(1,9),
"creditCardNo": random.randint(1000000000000000,9000000000000000),
"creditCardExpDate": random.randint(100000,900000),
"pspReference": random.choice(string.ascii_letters),
"reference": [
{
"name": "patchInfo",
"value": fake.bothify(text = "????????????????????????????????????????????????????????????", letters = 'abcdefghijklmnopqrstuvwxyz1234567890'),
},
{
"name": "authCode",
"value": random.randint(100000000000,900000000000)
},
{
"name": "authResult",
"value": "AUTHORISED"
},
{
"name": "pspReference",
"value": fake.bothify(text = "????????????????", letters = 'abcdefghijklmnopqrstuvwxyz1234567890'),
}
]
}
]
def get_promotions(self):
return [
{
"promotionType": "voucher",
"promotionId": random.randint(100000000000,900000000000),
"promotionName": "sale",
"campaignId": random.randint(100000000000,900000000000)
}
]
def get_shipments(self):
return [
{
"shipmentId": random.randint(10000000,90000000),
"carrierName": random.choice(["Standard Delivery", "Express Delivery", "One-day Delivery"]),
"carrierCode": random.choice(["GRND", "AIR"]),
"shipmentType": "inline",
"shippingMethod": "Standard-FED-Unregistered",
"shippingAddress": {
"firstName": fake.first_name(),
"lastName": fake.last_name(),
"addressLine1": fake.street_name(),
"addressLine2": "Unit A",
"city": fake.city(),
"state": "Taxas",
"postalCode": fake.postcode(),
"country": fake.country()
},
"pickupPointId": random.randint(10000000,99999999),
"references": [
{
"name": "carrierCode",
"value": random.choice(["GRND", "AIR"])
},
{
"name": "carrierServiceCode",
"value": random.randint(100000000000,999999999999)
},
{
"name": "collectionPeriod",
"value": fake.date_time_between(start_date=datetime(2020, 12, 31)),
},
{
"name": "deliveryPeriod",
"value": fake.date_time_between(start_date=datetime(2020, 12, 31)),
},
{
"name": "detailedShipmentStatusExpected",
"value": True
},
{
"name": "taxClassID",
"value": "FullTax"
},
{
"name": "type",
"value": "inline"
},
{
"name": "normalizedPhone",
"value": {
"phone": 1
}
},
{
"name": "SettlementComplete",
"value": False
},
{
"name": "aromaStatus",
"value": "CREATED"
},
{
"name": "carrierName",
"value": random.choice(["Standard Delivery", "Express Delivery", "One-day Delivery"])
},
{
"name": "deliveryMessage",
"value": "inline"
},
{
"name": "displayDeliveryInformation",
"value": {
"shippingMethodName": "Standard Delivery"
}
},
{
"name": "isSMSenabled",
"value": random.randint(1,9)
},
{
"name": "releaseTime",
"value": fake.date_time_between(start_date=datetime(2020, 12, 31)),
},
{
"name": "shippingNode",
"value": "adidasUS_DC1"
},
{
"name": "subOrderNo",
"value": "AD199715227a"
},
{
"name": "type",
"value": "inline"
}
]
}
]
def get_customer(self):
return {
"customerContactID": random.randrange(1,8),
"customerId": fake.bothify(text = "????????????", letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890'),
"customerEmail": fake.ascii_email(),
"customerPhone": fake.phone_number(),
"customerBillingAddress": {
"addressLine1": fake.street_address(),
"city": fake.city(),
"state": fake.current_country(),
"postalCode": fake.postcode(),
"country": fake.country()
}
}
def get_references(self):
return [
{
"name": "BICExportStatus",
"value": random.randrange(1,5)
},
{
"name": "CartId",
"value": fake.bothify(text = "????????????????????", letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890')
},
{
"name": "ChannelNo",
"value": "web"
},
{
"name": "ExactTargetExportStatus",
"value": random.randrange(1,5)
},
{
"name": "ExportedToTibco",
"value": random.randrange(1,5)
},
{
"name": "OMS",
"value": random.randrange(1,5)
},
{
"name": "agreeForSubscription",
"value": True
},
{
"name": "aromaStatus",
"value": "CREATED"
},
{
"name": "billingAddressSanity",
"value": True
},
{
"name": "brand",
"value": "adidas"
},
{
"name": "carrierStatus",
"value": "READY_FOR_EPORT"
},
{
"name": "checkoutId",
"value": fake.bothify(text = "????????????????????????????????????", letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890')
},
{
"name": "city",
"value": fake.city()
},
{
"name": "consentVersion",
"value": fake.bothify(text = "????????????", letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890')
},
{
"name": "customerEUCI",
"value": fake.bothify(text = "????????????", letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890')
},
{
"name": "customerEncryptedEmail",
"value": fake.bothify(text = "????????????????????", letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890')
},
{
"name": "customerIP",
"value": fake.ipv4()
},
{
"name": "customerOrderHistoryStatus",
"value": random.randrange(1,5)
},
{
"name": "dataMartExportStatus",
"value": random.randrange(1,5)
},
{
"name": "emailConfirmationSent",
"value": False
},
{
"name": "isCCOrder",
"value": False
},
{
"name": "ivsReservationIDs",
"value": "[]"
},
{
"name": "orderSource",
"value": "glass"
},
{
"name": "orderTrackData",
"value": fake.bothify(text = "????????????????????????????????????????????????????????????????", letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890')
},
{
"name": "paymentMethodName",
"value": fake.credit_card_provider()
},
{
"name": "shippingAddressSanity",
"value": True
},
{
"name": "shippingAddressValidated",
"value": True
},
{
"name": "shippingAddressValidation",
"value": fake.bothify(text = "????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????", letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890')
},
{
"name": "shippingMethod",
"value": "Standard-FED-Unregistered"
},
{
"name": "siteId",
"value": "adidas_US"
},
{
"name": "siteId",
"value": "adidas_US"
},
{
"name": "taxCacheKey",
"value": fake.bothify(text = "????????????????????", letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890')
},
{
"name": "taxCalculationMissing",
"value": False
},
{
"name": "userAgent",
"value": fake.user_agent()
},
{
"name": "NPSSurveyAttributes",
"value": "&g1=M&c1=Shoes&t1=Socce"
},
{
"name": "environment",
"value": "development-test-adidasgroup-eu.demandware.net"
},
{
"name": "paymentMethod",
"value": "CREDIT_CARD"
}
]
def get_notes(self):
return [
{
"created-by": "storefront",
"creation-date": fake.date_time_between(start_date=datetime(2020, 1, 1)),
"subject": "AvalaraDocCode",
"text": fake.bothify(text = "????????????????????", letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')
},
{
"created-by": "storefront",
"creation-date": fake.date_time_between(start_date=datetime(2020, 12, 31)),
"subject": "AvalaraDocCode",
"text": fake.bothify(text = "????????????????????", letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')
},
{
"created-by": "storefront",
"creation-date": fake.date_time_between(start_date=datetime(2020, 12, 31)),
"subject": "CartId",
"text": fake.bothify(text = "????????????????????", letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')
},
{
"created-by": "storefront",
"creation-date": fake.date_time_between(start_date=datetime(2020, 12, 31)),
"subject": "AciPaymentHelper-validateHostedPaymentAuthorizationResult",
"text": "AciPaymentLib:validateHostedPaymentAuthorizationResult() - AuthCode = 000.100.112, AuthResult = AUTHORISED, Description = Request successfully processed in 'Merchant in Connector Test Mode', id = <KEY>, ndc = 552BEDE2B4FDB62F6C6ABAC3BBD3C618.uat01-vm-tx03. Order was placed, order status was updated from Created to New."
},
{
"created-by": "storefront",
"creation-date": fake.date_time_between(start_date=datetime(2020, 12, 31)),
"subject": "AciPaymentHelper-validateHostedPaymentAuthorizationResult",
"text": "AciPaymentLib:validateHostedPaymentAuthorizationResult() - AuthCode = 000.100.112, AuthResult = AUTHORISED, Description = Request successfully processed in 'Merchant in Connector Test Mode', id = 8ac7a4a07eb7f657017eb8f6cd772001, ndc = 552BEDE2B4FDB62F6C6ABAC3BBD3C618.uat01-vm-tx03. Order total is captured"
}
]
def get_order_taxes(self):
return [
{
"taxName": "ShippingTax",
"taxAmount": round(random.uniform(1,5), 2),
"taxClass": "FullTax",
"taxRate": round(random.uniform(1,8), 2)
}
]
def get_order_charges(self):
return [
{
"chargeName": "OrderLineTax",
"chargeAmount": round(random.uniform(1,7), 2)
}
]
# Init this after class declaration above
generator = SampleProduct()
########################################################################
# Even though locust is designed for concurrency of simulated users,
# given how resource intensive fakers/bulk inserts are,
# you should only run 1 simulated user / worker else you'll kill the
# CPU of the workers.
########################################################################
class MetricsLocust(User):
####################################################################
# Unlike a standard locust file where we throttle requests on a per
# second basis, since we are trying to load data asap, there will
# be no throttling
####################################################################
def __init__(self, parent):
super().__init__(parent)
global client, coll, audit, batch_size
# Parse out env variables from the host
vars = self.host.split("|")
srv = vars[0]
print("SRV:",srv)
client = pymongo.MongoClient(srv)
db = client[vars[1]]
coll = db[vars[2]]
# docs to insert per batch insert
batch_size = int(vars[3])
print("Batch size from Host:",batch_size)
# Singleton
if (client is None):
# Log all application exceptions (and audits) to the same cluster
audit = client.mlocust.audit
# Extra work setting up the db collection and indexes
"""
Define the collection and its indexes
"""
# prepare a codec for decimal values
decimal_codec = DecimalCodec()
type_registry = TypeRegistry([decimal_codec])
codec_options = CodecOptions(type_registry=type_registry)
# create the collection if not exists
if vars[2] not in db.list_collection_names():
coll = db.create_collection(
vars[2], codec_options=codec_options)
# create the required indexes
indexes = [
pymongo.IndexModel([('orderNumber', pymongo.ASCENDING)], name="oNumber"),
pymongo.IndexModel([('customer.customerEmail', pymongo.ASCENDING)], name="cEmail"),
pymongo.IndexModel([('customer.customerPhone', pymongo.ASCENDING)], name="cPhone"),
pymongo.IndexModel([('orderStatus', pymongo.ASCENDING)], name="oStatus")]
coll.create_indexes(indexes)
else:
coll = db.get_collection(
vars[2], codec_options=codec_options)
################################################################
# Example helper function that is not a Locust task.
# All Locust tasks require the @task annotation
# You have to pass the self reference for all helper functions
################################################################
def get_time(self):
return time.time()
################################################################
# Audit should only be intended for logging errors
# Otherwise, it impacts the load on your cluster since it's
# extra work that needs to be performed on your cluster
################################################################
def audit(self, type, msg):
print("Audit: ", msg)
audit.insert_one({"type":type, "ts":self.get_time(), "msg":str(msg)})
# TODO Set this to 0 so bulkinsert goes as fast as possible. Make sure to simulate 1 user per worker only.
wait_time = between(0, 0)
################################################################
# Since the loader is designed to be single threaded with 1 user
# There's no need to set a weight to the task.
# Do not create additional tasks in conjunction with the loader
# If you are testing running queries while the loader is running
# deploy 2 clusters in mLocust with one running faker and the
# other running query tasks
# The reason why we don't want to do both loads and queries is
# because of the simultaneous users and wait time between
# requests. The bulk inserts can take longer than 1s possibly
# which will cause the workers to fall behind.
################################################################
# TODO 0 this out if doing normal load
@task(1)
def _bulkinsert(self):
# Note that you don't pass in self despite the signature above
tic = self.get_time();
name = "bulkinsert";
global coll, audit
try:
coll.insert_many([generator.generate() for _ in range(batch_size)], ordered=False)
events.request_success.fire(request_type="pymongo", name=name, response_time=(time.time()-tic)*1000, response_length=0)
except Exception as e:
events.request_failure.fire(request_type="pymongo", name=name, response_time=(time.time()-tic)*1000, response_length=0, exception=e)
self.audit("exception", e)
# Add a sleep for just faker gen so we don't hammer the system with file not found ex
time.sleep(5)
``` |
{
"source": "1-0/gl_qa_auto_pro_camp_final",
"score": 2
} |
#### File: 1-0/gl_qa_auto_pro_camp_final/solution.py
```python
import sys
import os
from os.path import join, dirname
import subprocess
import paramiko
from tools.params import load_params
def connect_a(hostname, port, username, password, command='ls -l'):
with paramiko.SSHClient() as client:
client.load_system_host_keys()
client.connect(
hostname=hostname,
port=port,
username=username,
password=password
)
stdin, stdout, stderr = client.exec_command(command)
print(stdout.read().decode())
return stdout.read().decode()[:]
def trace_b_log(p):
return connect_a(
hostname=p["linux1"],
port=p["port1"],
username=p["user1"],
password=p["<PASSWORD>"],
command='/usr/bin/tracepath ' + p["linux2"]
)
def main():
p = load_params()
# print(p)
trace_b_log(p)
trace_b_log(p)
if __name__ == '__main__':
main()
```
#### File: gl_qa_auto_pro_camp_final/tools/params.py
```python
import argparse
import os
from dotenv import load_dotenv
def load_param(args, param_name, param_type, default=None):
"""load_param - load and return param from args or .env or default"""
try: # for python38 and python39
if getattr(args, param_name.lower()) is not None:
return getattr(args, param_name.lower())
except AttributeError:
pass
if os.getenv(param_name.upper()):
return param_type(os.getenv(param_name.upper()))
return default
def load_params():
"""load_params - load and return params dictionary"""
res = {}
load_dotenv()
parser = argparse.ArgumentParser(
description='Process testing 2 ways of linux mashins connections'
)
parser.add_argument(
'-f', '--test_file', required=False,
metavar='test_file', type=str,
help='file name for testing connection speed'
)
parser.add_argument(
'-s', '--test_file_size', required=False,
metavar='test_file_size', type=float,
help=('file size in Mb, generated ' +
'(if no test_file) for testing connection speed')
)
parser.add_argument(
'-l1', '--linux1', required=False,
metavar='linux1', type=str,
help='linux A machine address'
)
parser.add_argument(
'-p1', '--port1', required=False,
metavar='port1', type=int,
help='linux A machine ssh connection port'
)
parser.add_argument(
'-l2', '--linux2', required=False,
metavar='linux2', type=str,
help='linux B machine address'
)
parser.add_argument(
'-p2', '--port2', required=False,
metavar='port2', type=int,
help='linux B machine ssh connection port'
)
args = parser.parse_args()
res["test_file"] = load_param(args, "test_file", str)
res["test_file_size"] = load_param(args, "test_file_size", float, 100.)
res["linux1"] = load_param(args, "linux1", str, "Linux-A")
res["port1"] = load_param(args, "port1", int, 22)
res["user1"] = load_param(args, "user1", str, "ubuntu")
res["password1"] = load_param(args, "password1", str)
res["linux2"] = load_param(args, "linux2", str, "Linux-B")
res["port2"] = load_param(args, "port2", int, 22)
res["user2"] = load_param(args, "user2", str, "ubuntu")
res["password2"] = load_param(args, "password2", str)
return res
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=True)
``` |
{
"source": "10jmellott/ProjectEuler",
"score": 4
} |
#### File: ProjectEuler/problems/p002.py
```python
from utils.fibonacci import _fib
def main():
"""Main method to run and set the parameters for this problem
Returns:
Integer: Solution to this problem
"""
N = 4000000
i = 0
v = 0
memo = {}
ret = 0
while v < N:
v = _fib(i, memo)
if v < N and v % 2 == 0:
ret = ret + v
i = i + 1
return ret
```
#### File: ProjectEuler/problems/p010.py
```python
from utils.eratosthenes import sieve
def main():
"""Solves this problem
Utilizes the Sieve of Eratosthenes and the python sum function.
This was a simple one...
Returns:
Integer: Solution to this problem
"""
N = 2000000
primes = sieve(N)
return sum(primes)
```
#### File: ProjectEuler/problems/p012.py
```python
from utils.oeis import triangular_numbers
from utils.fibonacci import trial_division
from utils.fibonacci import factors_to_dictionary
def main():
"""Solves this problem
Utilizes [A000005](http://oeis.org/A000005) which is solved via a
lemma to Euler's Totient Function
Returns:
Integer: Solution to this problem
"""
i = 1
divisors = 0
while divisors <= 500:
triangle = triangular_numbers(i)
prime_factors = trial_division(triangle)
prime_factors = factors_to_dictionary(prime_factors)
divisors = 1
for k, v in prime_factors.items():
divisors = divisors * (v + 1)
i = i + 1
return triangular_numbers(i - 1)
```
#### File: ProjectEuler/problems/p016.py
```python
def main():
"""Solves this problem
Python makes this a joke...
At least the code is pretty...
Returns:
Integer: Solution to this problem
"""
# 2 ^ 1000 gets calculated: 2 ** 1000
# Turned into a string: str(~)
# Turned into a list of chars: list(~)
# Each char is transformed into an int: int(s) for s in ~
# Sum operation is performed on the list of ints: sum(~)
return sum(int(s) for s in list(str(2 ** 1000)))
```
#### File: ProjectEuler/problems/p021.py
```python
from utils import factor
def main():
"""Solves this problem
Calculates the sum of the factors of all numbers under 10000.
Then checks each result for amicable numbers.
Nothing very special in terms of calculations.
Returns:
Integer: Solution to this problem
"""
dn = {}
for i in range(1, 10000):
dn[i] = sum(factor(i))
amicable = set()
for k, v in dn.items():
if k != v and v in dn and dn[v] == k:
amicable.add(k)
return sum(amicable)
```
#### File: ProjectEuler/problems/p022.py
```python
def score(name):
v = 0
init_value = ord('A') - 1
chars = list(name)
for c in chars:
v += ord(c) - init_value
return v
def main():
"""Solves this problem
Returns:
Integer: Solution to this problem
"""
with open('input/p022.txt') as f:
text = f.read(-1)
names = text.split(',')
for i in range(len(names)):
names[i] = names[i].replace('"', '')
names.sort()
N = len(names)
value = 0
for i in range(N):
value += score(names[i]) * (i + 1)
return value
``` |
{
"source": "10JoeCook/MondrianProject",
"score": 3
} |
#### File: MondrianProject/Utils/Readers.py
```python
import scipy.io
from Structures.Formal import FormalMondrian
class MatReader:
def __init__(self, filename: str = "../Paper1Data/MondriansAndTransatlantics.mat"):
self.filename = filename
self.file_contents = None
self.keys = []
self.header = ""
self.version = 0
self.labels = []
self.names = []
self.reps = []
self.images = []
self.attr_lists = {}
def read(self):
self.file_contents = scipy.io.loadmat(self.filename)
self.keys = self.file_contents.keys()
self.header = self.file_contents["__header__"]
self.version = self.file_contents["__version__"]
self.labels = self.file_contents["labels"]
self.labels = self.labels[0]
self.names = self.file_contents["names"]
self.names = self.names[0]
for i in range(len(self.names)):
self.names[i] = self.names[i][0]
self.reps = self.file_contents["reps"]
self.reps = self.reps[0]
self.generate_image_objects()
self.generate_attr_lists()
def generate_image_objects(self):
for i in range(len(self.names)):
name = self.names[i]
rep = self.reps[i]
ymax = int(rep[0][0][0])
xmax = int(rep[1][0][0])
vpts = list(rep[2][0])
vext = list(rep[3])
for i in range(len(vext)):
vext[i] = list(vext[i])
vthick = list(rep[4])
for i in range(len(vthick)):
vthick[i] = list(vthick[i])
hpts = list(rep[5][0])
hext = list(rep[6])
for i in range(len(hext)):
hext[i] = list(hext[i])
hthick = list(rep[7])
for i in range(len(hthick)):
hthick[i] = list(hthick[i])
rects = list(rep[8])
for i in range(len(rects)):
rects[i] = list(rects[i])
rect_colors = list(rep[9][0])
self.images.append(FormalMondrian(name,
ymax,
xmax,
vpts,
hpts,
rect_colors,
vext,
vthick,
hext,
hthick,
rects))
def generate_attr_lists(self):
self.attr_lists["ymax"] = []
self.attr_lists["xmax"] = []
self.attr_lists["vpts"] = []
self.attr_lists["vext"] = []
self.attr_lists["vthick"] = []
self.attr_lists["htps"] = []
self.attr_lists["hext"] = []
self.attr_lists["hthick"] = []
self.attr_lists["rects"] = []
self.attr_lists["rect_colors"] = []
for i in range(len(self.names)):
rep = self.reps[i]
self.attr_lists["ymax"].append(int(rep[0][0][0]))
self.attr_lists["xmax"].append(int(rep[1][0][0]))
self.attr_lists["vpts"].append(list(rep[2][0]))
vext = list(rep[3])
for i in range(len(vext)):
vext[i] = list(vext[i])
self.attr_lists["vext"].append(vext)
vthick = list(rep[4])
for i in range(len(vthick)):
vthick[i] = list(vthick[i])
self.attr_lists["vthick"].append(vthick)
self.attr_lists["hpts"].appemd(list(rep[5][0]))
hext = list(rep[6])
for i in range(len(hext)):
hext[i] = list(hext[i])
self.attr_lists["hext"].append(hext)
hthick = list(rep[7])
for i in range(len(hthick)):
hthick[i] = list(hthick[i])
self.attr_lists["hthick"].append(hthick)
rects = list(rep[8])
for i in range(len(rects)):
rects[i] = list(rects[i])
self.attr_lists["rects"].append(rects)
self.attr_lists["rect_colors"].append(list(rep[9][0]))
def get_images(self):
return self.images
``` |
{
"source": "10jqka-aicubes/span_abstract",
"score": 2
} |
#### File: metrics/util/bm25.py
```python
import glob
import json
import time
import dill
import jieba
import re
import heapq
import os
from gensim.summarization import bm25
class BM25Retrieval(object):
def __init__(
self, corpus_file_pattern=None, stop_words_file="../stop_words/stop_words.txt", MAX_LEN=300, path="./"
):
"""
BM25检索模块,主要是在BM25库基础上封装了预处理部分。
:param corpus_file_pattern: 检索资料库-文本数据 str
:param stop_words_file: 停用词表 str
:param path: 保存的模型目录 str
"""
os.makedirs(path, exist_ok=True)
self.model = os.path.join(path, "bm25.m")
self.sen = os.path.join(path, "sen.pkl")
self.stop = os.path.join(path, "stop.pkl")
self.MAX_LEN = MAX_LEN
if os.path.isfile(self.model) and os.path.isfile(self.sen) and os.path.isfile(self.stop):
print("bm25 model found, loading...")
self.load()
else:
print("training bm25 model ...")
assert corpus_file_pattern is not None, "Can not find model or corpus file."
if os.path.isfile(stop_words_file):
self.stop_words = self.load_stop_words(stop_words_file)
self.sentences, corpus = self.get_corpus(corpus_file_pattern)
self.bm25 = bm25.BM25(corpus)
self.dump()
@staticmethod
def load_stop_words(f="stop_words.txt"):
words = set()
with open(f, "r", encoding="utf-8") as fp:
for line in fp:
words.add(line.strip())
return words
def cut_and_stop(self, s):
ws = jieba.cut(s) # 分词
ws = [x for x in ws if x not in self.stop_words] # 去除停用词
return ws
@staticmethod
def strQ2B(ustring):
rstring = ""
for uchar in ustring:
inside_code = ord(uchar)
# 全角区间
if inside_code >= 0xFF01 and inside_code <= 0xFF5E:
inside_code -= 0xFEE0
rstring += chr(inside_code)
# 全角空格特殊处理
elif inside_code == 0x3000 or inside_code == 0x00A0:
inside_code = 0x0020
rstring += chr(inside_code)
else:
rstring += uchar
return rstring
def get_corpus(self, input_file_pattern):
"""
句号+换行符作为段落分隔标识,连续段落不超最大长度可合并;段落超长按句号分隔。
:param input_file_pattern:
:return:
"""
sentences = []
corpus = []
sen_tmp = ""
for f in glob.iglob(str(input_file_pattern)):
print(f)
with open(f, "r", encoding="utf-8") as fp:
lines = []
for line in fp:
line = self.strQ2B(re.sub(r"\s+", "", line)) # 全角转半角
if len(line) < 2:
continue
lines.append(line)
lines_str = "\n".join(lines)
paragraphs = lines_str.split("。\n")
for para in paragraphs:
if len(sen_tmp) + len(para) <= self.MAX_LEN:
sen_tmp += para + "。\n"
else:
words = self.cut_and_stop(sen_tmp)
corpus.append(words)
sentences.append(sen_tmp)
if len(para) <= self.MAX_LEN:
sen_tmp = para + "。\n"
else:
sen_tmp = ""
para_sep = para.split("。")
for p in para_sep:
if len(sen_tmp) + len(p) <= self.MAX_LEN:
sen_tmp += p + "。"
else:
words = self.cut_and_stop(sen_tmp)
corpus.append(words)
sentences.append(sen_tmp)
sen_tmp = p + "。"
sen_tmp += "\n"
if sen_tmp:
words = self.cut_and_stop(sen_tmp)
corpus.append(words)
sentences.append(sen_tmp)
sen_tmp = ""
assert len(sentences) == len(corpus)
print("Total paragraphs: ", len(sentences))
return sentences, corpus
def get_scores(self, document):
"""
输入一个句子,返回库中所有候选的相似度
:param document: str
:return: List[float]
"""
line = self.strQ2B(re.sub(r"\s+", "", document)) # 全角转半角
tokens = self.cut_and_stop(line)
return self.bm25.get_scores(tokens)
def top_k(self, document, k=1):
"""
输入document,返回最相似的k个句子。
:param document: str
:param k:
:return: List[str]
"""
scores = self.get_scores(document)
indexes = heapq.nlargest(k, range(len(scores)), scores.__getitem__)
return [self.sentences[i] for i in indexes]
def dump(self):
with open(self.model, "wb") as fpm, open(self.sen, "wb") as fpse, open(self.stop, "wb") as fpst:
dill.dump(self.bm25, fpm)
dill.dump(self.sentences, fpse)
dill.dump(self.stop_words, fpst)
def load(self):
with open(self.model, "rb") as fpm, open(self.sen, "rb") as fpse, open(self.stop, "rb") as fpst:
self.bm25 = dill.load(fpm)
self.sentences = dill.load(fpse)
self.stop_words = dill.load(fpst)
TAGS = "ABCDⅠⅡⅢⅣⅤⅥⅦⅧⅨ①②③④⑤⑥⑦⑧.、0123456789 "
# 保存bm25模型以及检索结果
def search(
input_path, output_path, document_files, save_model_dir, mode="1", top_k=1
): # mode=0: query only; mode=1: query+option
datasets = input_path
bm25_model_dir = save_model_dir + "/bm25_models/"
bm25_retrieval = BM25Retrieval(document_files, path=bm25_model_dir)
time_start = time.time()
num = 0
for f in glob.glob(str(datasets)):
dataset_new = []
path, filename = os.path.split(f)
with open(f, "r", encoding="utf-8") as fp, open(output_path, "w", encoding="utf-8") as fp1:
for line in fp:
data = json.loads(line)
query = data["question"]
options = data["options"]
evidences = {}
if mode == "0":
evidence = bm25_retrieval.top_k(query.strip(TAGS), top_k)
for k in options.keys():
evidences[k] = evidence
elif mode == "1":
for k, v in options.items():
evidence = bm25_retrieval.top_k(query.strip(TAGS) + "\n" + v.strip(TAGS), top_k)
evidences[k] = evidence
data["evidences"] = evidences
dataset_new.append(data)
for d in dataset_new:
fp1.write(json.dumps(d, ensure_ascii=False) + "\n")
num += 1
time_end = time.time()
print("Total examples: ", num)
print("Sec/example: ", (time_end - time_start) / num)
if __name__ == "__main__":
pass
# search(mode="1", top_k=1)
```
#### File: span_abstract/train/model.py
```python
import json
import sys
import time
import shutil
import os
import helper
import tokenization
from bert import modeling
from bert.optimization import create_optimizer
import tensorflow as tf
from tensorflow.contrib.layers.python.layers import initializers
import logging as logger
import data_processor
import argparse
from tensorflow.python.platform import gfile
class Model:
def __init__(self, config):
self.config = config
self.task_name = config["task_name"]
self.lstm_dim = config["lstm_dim"]
self.embedding_size = config["embedding_size"]
self.max_epoch = config["max_epoch"] # 原为10 epoch
self.learning_rate = config["learning_rate"]
self.checkpoint_dir = config["checkpoint_dir"]
self.checkpoint_path = config["checkpoint_path"]
self.initializer = initializers.xavier_initializer()
self.is_training = True
self.bert_config = config["bert_config"]
self.init_checkpoint = config["init_checkpoint"]
self.vocab_dir = config["vocab_dir"]
self.tf_serving_save_dir = config["tf_serving_save_dir"]
self.predict_file = config["predict_file"]
self.predict_result = config["predict_result"]
self.require_improvement = config["require_improvement"]
self.global_steps = tf.Variable(0, trainable=False)
self.best_dev_f1 = tf.Variable(0.0, trainable=False)
self.best_f1 = 0.0
self.best_match_num = 0
self.steps = 0 # 迭代次数
self.last_improved = 0 # 记录上一次提升批次
self.tokenizer = tokenization.FullTokenizer(
vocab_file=self.vocab_dir,
)
def creat_model(self):
self._init_placeholder()
self.add_bert_layer()
self.add_biLSTM_layer()
self.add_logits_layer()
self.add_loss_layer()
self.add_optimizer_layer()
def _init_placeholder(self):
self.input_ids = tf.placeholder(dtype=tf.int32, shape=[None, None], name="input_ids")
self.input_mask = tf.placeholder(dtype=tf.int32, shape=[None, None], name="input_mask")
self.segment_ids = tf.placeholder(dtype=tf.int32, shape=[None, None], name="segment_ids")
self.targets = tf.placeholder(dtype=tf.int32, shape=[None, None], name="targets")
self.dropout = tf.placeholder(dtype=tf.float32, shape=None, name="dropout")
used = tf.sign(tf.abs(self.input_ids))
length = tf.reduce_sum(used, reduction_indices=1)
self.length = tf.cast(length, tf.int32)
self.nums_steps = tf.shape(self.input_ids)[-1]
def add_bert_layer(self):
bert_config = modeling.BertConfig.from_json_file(self.bert_config)
model = modeling.BertModel(
config=bert_config,
is_training=self.is_training,
input_ids=self.input_ids,
input_mask=self.input_mask,
token_type_ids=self.segment_ids,
use_one_hot_embeddings=False,
)
self.embedded = model.get_sequence_output()
self.model_inputs = tf.nn.dropout(self.embedded, self.dropout)
def add_biLSTM_layer(self):
with tf.variable_scope("bi-LSTM") as scope:
lstm_cell = {}
for direction in ["forward", "backward"]:
with tf.variable_scope(direction):
lstm_cell[direction] = tf.contrib.rnn.LSTMCell(
num_units=self.lstm_dim,
# use_peepholes=True,
# initializer=self.initializer,
state_is_tuple=True,
)
outputs, final_states = tf.nn.bidirectional_dynamic_rnn(
cell_fw=lstm_cell["forward"],
cell_bw=lstm_cell["backward"],
inputs=self.model_inputs,
sequence_length=self.length,
dtype=tf.float32,
)
self.lstm_outputs = tf.concat(outputs, axis=2)
def add_logits_layer(self):
with tf.variable_scope("hidden"):
w = tf.get_variable(
"W", shape=[self.lstm_dim * 2, self.lstm_dim], dtype=tf.float32, initializer=self.initializer
)
b = tf.get_variable("b", shape=[self.lstm_dim], dtype=tf.float32, initializer=self.initializer)
output = tf.reshape(self.lstm_outputs, shape=[-1, self.lstm_dim * 2])
self.hidden = tf.tanh(tf.matmul(output, w) + b)
with tf.variable_scope("logits"):
w = tf.get_variable(
"W", shape=[self.lstm_dim, self.nums_tags], initializer=self.initializer, dtype=tf.float32
)
b = tf.get_variable("b", shape=[self.nums_tags], dtype=tf.float32)
pred = tf.matmul(self.hidden, w) + b
self.logits = tf.reshape(pred, shape=[-1, self.nums_steps, self.nums_tags])
def add_loss_layer(self):
with tf.variable_scope("loss_layer"):
self.trans = tf.get_variable(
"transitions", shape=[self.nums_tags, self.nums_tags], initializer=self.initializer
)
log_likelihood, self.trans = tf.contrib.crf.crf_log_likelihood(
inputs=self.logits, tag_indices=self.targets, transition_params=self.trans, sequence_lengths=self.length
)
self.paths, _ = tf.contrib.crf.crf_decode(
potentials=self.logits, transition_params=self.trans, sequence_length=self.length
)
self.outputs = tf.identity(self.paths, name="outputs")
self.loss = tf.reduce_mean(-log_likelihood)
def add_optimizer_layer(self):
correct_prediction = tf.equal(self.paths, tf.cast(self.targets, tf.int32))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
num_train_steps = int(self.train_length / self.batch_size * self.max_epoch)
num_warmup_steps = int(num_train_steps * 0.1)
self.train_op = create_optimizer(self.loss, self.learning_rate, num_train_steps, num_warmup_steps, False)
self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=5)
def train_step(self, sess, batch):
tokens, tag_ids, inputs_ids, segment_ids, input_mask = zip(*batch)
feed = {
self.input_ids: inputs_ids,
self.targets: tag_ids,
self.segment_ids: segment_ids,
self.input_mask: input_mask,
self.dropout: 0.5,
}
embedding, global_steps, loss, _, logits, acc, length = sess.run(
[self.embedded, self.global_steps, self.loss, self.train_op, self.logits, self.accuracy, self.length],
feed_dict=feed,
)
return global_steps, loss, logits, acc, length
def train(self):
processors = {
"alias": data_processor.NerProcessor,
"ner": data_processor.NerProcessor,
"wencai_span": data_processor.NerProcessor,
}
self.train_data = processors[self.task_name](self.tokenizer, "train", self.config)
self.dev_data = processors[self.task_name](self.tokenizer, "dev", self.config)
logger.info("---" * 20)
self.batch_size = self.train_data.batch_size
self.nums_tags = len(self.train_data.get_tags())
self.tag_to_id = self.train_data.tag_to_id
self.id_to_tag = self.train_data.id_to_tag
self.train_length = len(self.train_data.data)
self.dev_batch = self.dev_data.iteration()
self.dev_length = len(self.dev_data.data)
logger.info("-" * 50)
logger.info("train data:\t %s", self.train_length)
logger.info("dev data:\t %s", self.dev_length)
logger.info("nums of tags:\t %s", self.nums_tags)
logger.info("tag_to_id: {}".format(self.tag_to_id))
logger.info("id_to_tag: {}".format(self.id_to_tag))
self.creat_model()
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
with tf.device("/gpu:0"): # /cpu:0
# 是否加载训练模型
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
sess.run(tf.global_variables_initializer())
t_vars = tf.trainable_variables()
# 固定部分参数
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(
t_vars, self.init_checkpoint
)
tf.train.init_from_checkpoint(self.init_checkpoint, assignment_map)
self.model_input = {
"input_ids": self.input_ids,
"segment_ids": self.segment_ids,
"input_mask": self.input_mask,
"dropout": self.dropout,
}
self.model_output = {"logits": self.logits, "length": self.length, "pre_paths": self.paths}
for i in range(self.max_epoch):
logger.info("-" * 50)
logger.info("epoch {}".format(i))
self.steps = 0
for batch in self.train_data.get_batch():
# print (len(batch))
self.steps += 1
global_steps, loss, logits, acc, length = self.train_step(sess, batch)
if self.steps % 10 == 0:
logger.info(
"[->] epoch {}: step {}/{}\tloss {:.4f}\tacc {:.5f}".format(
i, self.steps, len(self.train_data.batch_data), loss, acc
)
)
if self.steps % 100 == 0:
self.evaluate(sess)
logger.info("training finished!!!")
def evaluate(self, sess):
batch = self.dev_batch.__next__()
tokens, tag_ids, inputs_ids, segment_ids, input_mask = zip(*batch)
feed = {
self.input_ids: inputs_ids,
self.segment_ids: segment_ids,
self.targets: tag_ids,
self.input_mask: input_mask,
self.dropout: 0.5,
}
scores, acc, lengths, pre_paths = sess.run(
[self.logits, self.accuracy, self.length, self.paths], feed_dict=feed
)
logger.info(tokens[0])
logger.info(inputs_ids[0])
logger.info(tag_ids[0])
logger.info(pre_paths[0])
match_num = sum(
[
1 if helper.get_tags(tar, self.id_to_tag) == helper.get_tags(pre, self.id_to_tag) else 0
for tar, pre in zip(tag_ids, pre_paths)
if any([elem for elem in tar if elem not in (0, 1, 2, 3)])
]
)
logger.info("\tmatch {:.4f}\t best_match_num {:.4f}".format(match_num, self.best_match_num))
logger.info("\tacc {:.4f}\t best {:.4f}\t".format(acc, self.best_f1))
if acc >= self.best_f1 or match_num >= self.best_match_num:
logger.info("acc {:.4f}, match_num {:.4f} saved model!!".format(acc, match_num))
self.saver.save(sess, self.checkpoint_path)
if acc > self.best_f1:
self.best_f1 = acc
if match_num > self.best_match_num:
self.best_match_num = match_num
self.last_improved = self.steps
helper.model_save(
session=sess,
save_path=self.tf_serving_save_dir,
model_input=self.model_input,
model_output=self.model_output,
)
def prepare_pred_data(self, text):
max_length = len(text) + 2
tokens = list(text)
tokens = ["[CLS]"] + tokens + ["[SEP]"]
logger.info(tokens)
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
input_ids = input_ids + (max_length - len(input_ids)) * [0]
segment_ids = segment_ids + (max_length - len(segment_ids)) * [0]
input_mask = input_mask + (max_length - len(input_mask)) * [0]
feed = {
self.input_ids: [input_ids],
self.segment_ids: [segment_ids],
self.input_mask: [input_mask],
self.dropout: 0.5,
}
return feed
def predict(self):
self.tag_to_id = helper.obj_load(self.config["tag_to_id"])
self.id_to_tag = {int(id): tag for id, tag in helper.obj_load(self.config["id_to_tag"]).items()}
self.batch_size = 1
self.train_length = 10
self.nums_tags = len(self.tag_to_id.keys())
self.creat_model()
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(self.checkpoint_dir)
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
logger.info("[->] restore model")
self.saver.restore(sess, ckpt.model_checkpoint_path)
else:
logger.info("[->] no model, error,please check cpk dir: {}".format(self.checkpoint_dir))
return
# sess.run(tf.global_variables_initializer())
constant_graph = tf.graph_util.convert_variables_to_constants(
sess, sess.graph_def, ["loss_layer/outputs", "logits/Reshape", "Sum/reduction_indices"]
)
# todo 模型预测
with gfile.FastGFile("wencai_span.pb", "wb") as f:
f.write(constant_graph.SerializeToString())
while True:
text = input("input text : ")
logger.info("text: %s", text)
feed = self.prepare_pred_data(text)
logits, length, paths = sess.run([self.logits, self.length, self.paths], feed_dict=feed)
logger.info(["paths: ", paths])
logger.info(["tag_map: ", self.id_to_tag])
logger.info(["tag", [self.id_to_tag[elem] for elem in paths[0]]])
entities_result = helper.format_result(
["[CLS]"] + list(text) + ["[SEP]"], [self.id_to_tag[elem] for elem in paths[0]]
)
print(json.dumps(entities_result, indent=4, ensure_ascii=False))
if __name__ == "__main__":
if len(sys.argv) < 2:
print("arg error!!")
exit()
parser = argparse.ArgumentParser(usage="usage: python3 model.py -t ner -e train/predict ")
parser.add_argument("-t", "--task", type=str, default="ner")
parser.add_argument("-e", "--entry", type=str, default="train")
ARGS = parser.parse_args()
processors = {
"alias": data_processor.NerProcessor,
"ner": data_processor.NerProcessor,
"wencai_span": data_processor.NerProcessor,
}
task_name = ARGS.task
if task_name not in processors:
print("Task not found: %s" % (task_name))
exit()
if ARGS.entry == "train":
para = {"lstm_dim": 128, "max_epoch": 50, "train_batch": 128, "dev_batch": 128, "require_improvement": 1000}
logger.warn("--------" * 10)
logger.warn("\npara : \n {para}".format(para=json.dumps(para, indent=4, ensure_ascii=False)))
base_config = {
"task_name": task_name,
"mode": "bert",
"lstm_dim": 128,
"embedding_size": 50,
"max_epoch": 10,
"train_batch": 128,
"dev_batch": 128,
"learning_rate": 5e-5,
"require_improvement": 1000,
"bert_config": "bert_model/bert_config_rbt3.json",
"init_checkpoint": "bert_model/bert_model.ckpt",
"vocab_dir": "bert_model/vocab.txt",
"checkpoint_dir": "./result/{task_name}/ckpt_model/{model_version}".format(
task_name=task_name, model_version=time.strftime("%Y%m%d")
), # %Y%m%d%H%M%S
"checkpoint_path": "./result/{task_name}/ckpt_model/{model_version}/{task_name}.ckpt".format(
task_name=task_name, model_version=time.strftime("%Y%m%d")
),
"train_file": "data/{task_name}/train".format(task_name=task_name),
"dev_file": "data/{task_name}/dev".format(task_name=task_name),
"predict_file": "data/{task_name}/predict".format(task_name=task_name),
"predict_result": "data/{task_name}/predict_result".format(task_name=task_name),
"tf_serving_save_dir": "result/{task_name}/saved_model/{model_version}".format(
task_name=task_name, model_version=time.strftime("%Y%m%d")
),
"parameter_information": "result/{task_name}/saved_model/parameter_information.json".format(
task_name=task_name
),
"save_dir": "result/{task_name}/saved_model/".format(task_name=task_name),
"tag_to_id": "result/{task_name}/saved_model/tag_to_id.json".format(task_name=task_name),
"id_to_tag": "result/{task_name}/saved_model/id_to_tag.json".format(task_name=task_name),
}
bert_config = helper.obj_load(base_config["bert_config"])
base_config = helper.merge_two_dicts(base_config, para)
config = {"base_config": base_config, "bert_config": bert_config}
helper.obj_save(config, base_config["parameter_information"])
if os.path.exists(os.path.join(base_config["save_dir"], "vocab.txt")):
logger.debug(["model_result vocab_file existed!!"])
else:
shutil.copy(base_config["vocab_dir"], base_config["save_dir"])
logger.info(base_config)
else:
base_config = helper.obj_load(
"result/{task_name}/saved_model/parameter_information.json".format(task_name=task_name)
)["base_config"]
model = Model(base_config)
if ARGS.entry == "train":
model.train()
elif ARGS.entry == "predict":
model.predict()
```
#### File: span_abstract/util/interface.py
```python
from abc import ABC, abstractmethod
from pathlib import Path
class TrainInterface(ABC):
def __init__(self, input_file_dir: Path, save_model_dir: Path, *args, **kargs):
self.initialize(input_file_dir, save_model_dir, *args, **kargs)
self.input_file_dir = input_file_dir
self.save_model_dir = save_model_dir
@abstractmethod
def initialize(self, input_file_dir: Path, save_model_dir: Path, *args, **kargs):
"""训练初始化函数
Args:
input_file_dir (Path): 训练文件的目录
save_model_dir (Path): 保存模型的目录
"""
raise NotImplementedError()
@abstractmethod
def do_train(self):
raise NotImplementedError()
class PredictInterface(ABC):
def __init__(self, input_file_dir: Path, load_model_dir: Path, predict_file_dir: Path, *args, **kargs):
self.initialize(input_file_dir, load_model_dir, predict_file_dir, *args, **kargs)
self.input_file_dir = input_file_dir
self.load_model_dir = load_model_dir
self.predict_file_dir = predict_file_dir
@abstractmethod
def initialize(self, input_file_dir: Path, load_model_dir: Path, predict_file_dir: Path, *args, **kargs):
"""预测初始化函数
Args:
input_file_dir (Path): 预测的文件目录
load_model_dir (Path): 加载模型的目录
predict_file_dir (Path): 预测结果的文件目录
"""
raise NotImplementedError()
@abstractmethod
def do_predict(self):
raise NotImplementedError()
class MetricsInterface(ABC):
@abstractmethod
def do_eval(
self,
predict_file_dir: Path,
groudtruth_file_dir: Path,
result_json_file: Path,
result_detail_file: Path,
*args,
**kargs
):
"""评测主函数
Args:
predict_file_dir (Path): 模型预测结果的文件目录
groudtruth_file_dir (Path): 真实结果的文件目录
result_json_file (Path): 评测结果,json格式,{"f1": 0.99}
result_detail_file (Path): 预测明细,可选
"""
raise NotImplementedError()
``` |
{
"source": "10K-Linesofcode/Glowing-Tribble",
"score": 2
} |
#### File: GT_cbv/cbv_app/views.py
```python
from django.shortcuts import render
from django.views.generic import View, TemplateView, ListView, DetailView
from .import models
#from django.http import HttpResponse
# Create your views here.
#def index(request):
# return render(request,'index.html')
#class CBView(View):
# def get(self, request):
# return HttpResponse("Class based View")
class IndexView(TemplateView):
template_name = "index.html"
def get_context_data(self,**kwargs):
context=super().get_context_data(**kwargs)
context['injectme']="Basic Injection"
return context
class SchoolListView(ListView):
#context_object_name='school_list'
model= models.School
class SchoolDetailView(DetailView):
context_object_name = 'school_details'
model = models.School
template_name = 'cbv_app/school_detail.html'
```
#### File: GT_forms/form_app/views.py
```python
from django.shortcuts import render
# Create your views here.
from . import forms
def index(request):
return render(request, 'form_app/index.html')
def form_name_view(request):
form=forms.FormName()
if request.method =='POST':
form= forms.FormName(request.POST)
if form.is_valid():
# Do Something Code
print(" Validation sucecss")
print("NAME: "+ form.cleaned_data['name'])
print("Email: "+ form.cleaned_data['email'])
print("Text: "+ form.cleaned_data['text'])
return render (request,'form_app/form_app.html', {'form':form} )
```
#### File: GT/GT_app/models.py
```python
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Topic(models.Model):
top_name= models.CharField(max_length=264,unique=True)
def __str__(self):
return self.top_name
class Webpage(models.Model):
topic=models.ForeignKey(Topic,on_delete=models.DO_NOTHING)
name=models.CharField(max_length=264,unique=True)
url=models.URLField(unique=True)
def __str__(self):
return self.name
class AccessRecord(models.Model):
name=models.ForeignKey(Webpage,on_delete=models.DO_NOTHING)
date = models.DateField()
def __str__(self):
return str(self.date)
```
#### File: GT/GT_app/views.py
```python
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from GT_app.models import Topic,Webpage,AccessRecord
# Create your views here.
#def index(request):
# return HttpResponse(' Hello World')
# pass
def index(request):
webpages_list = AccessRecord.objects.order_by('date')
date_dict = {'access_records':webpages_list}
return render(request,'index.html',context=date_dict)
#def index_withTemplate(request):
# insert_dict= {'insert_me':'Hello im comming from views.py'}
# return render(request,'index.html',insert_dict)
```
#### File: myproject/myapp/models.py
```python
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class UserProfileInfo(models.Model):
user= models.OneToOneField(User,on_delete=models.DO_NOTHING)
#additional
portfolio_site= models.URLField(blank=True)
profile_pic=models.ImageField(upload_to='profile_pics',blank=True)
def __str__(self):
return self.user.username
``` |
{
"source": "10ks/py_utils",
"score": 3
} |
#### File: experiments/async_tests/async_3.py
```python
import asyncio
async def wait_sec(l):
print("Before wait")
await asyncio.sleep(1)
print("After wait")
l[0] = False
async def main():
# await asyncio.gather(wait_sec([True]), wait_sec([True]), wait_sec([True]))
run = [True]
asyncio.create_task(wait_sec(run))
await asyncio.sleep(0)
print("continuing main")
while run[0]:
print(".")
await asyncio.sleep(0.1)
# for i in range(10):
# print(i)
# # time.sleep(0.2)
# # await asyncio.sleep(0)
# await asyncio.sleep(0.2)
if __name__ == "__main__":
import time
s = time.perf_counter()
asyncio.run(main())
# Completing unfinished tasks (throws a warning)
# loop = asyncio.get_event_loop()
# loop.run_until_complete(main())
# pending = asyncio.Task.all_tasks()
# loop.run_until_complete(asyncio.gather(*pending))
elapsed = time.perf_counter() - s
print(f"{__file__} executed in {elapsed:0.2f} seconds.")
```
#### File: experiments/sql/sqlit_qry2.py
```python
import sqlite3
PATH_DB = "C:/DEV/PillData/Sqlite_DB/pills_5_filtered.db"
SIZE_TOLERANCE = 1 # margin of error of pill size (in milimeters)
def query_pills(pill_properties):
"""
Queries pill information from DB
Query is constructed dynamically, input values are checked
to avoid SQL injection
Input format:
{"imprints": [], "colors": [], "shape": [], "size": "x"}
"""
imprint_part_list = pill_properties.get("imprints")
if imprint_part_list is None:
print("Imprints not provided")
return None
query = "SELECT * FROM pills WHERE 1=1"
for imprint in imprint_part_list:
if not imprint.isalnum():
print("Non alphanumeric value in imprint list")
return None
query += f" AND imprint LIKE '%{imprint}%'"
color_list = pill_properties.get("colors")
if color_list is not None:
for color in color_list:
if not color.isalnum():
print("Non alphanumeric value in color list")
return None
color_string = ', '.join("'" + item + "'" for item in color_list)
query += f" AND color IN ({color_string})"
shape_list = pill_properties.get("shape")
if shape_list is not None:
for shape in shape_list:
if not shape.isalnum():
print("Non alphanumeric value in shape list")
return None
shape_string = ', '.join("'" + item + "'" for item in shape_list)
query += f" AND shape IN ({shape_string})"
size = pill_properties.get("size")
if size is not None:
if not size.isnumeric():
print("Non numeric value in size value")
return None
query += f" AND size >= {int(size) - SIZE_TOLERANCE}"
query += f" AND size <= {int(size) + SIZE_TOLERANCE}"
print(query)
conn = sqlite3.connect(PATH_DB)
c = conn.cursor()
c.execute(query)
result = c.fetchall()
c.close()
return(result)
#
pill_1 = {
"imprints": ["B", "IN"]
, "colors": ["YELLOW", "ORANGE"]
, "shape": ["OVAL", "ROUND"]
, "size": "12"
}
for record in query_pills(pill_1):
print("----")
print(record)
# print(f"name={record[1]}; imprint={record[2]}; code={record[0]}")
# Possible shapes:
# BULLET
# CAPSULE
# CLOVER
# DIAMOND
# DOUBLE CIRCLE
# FREEFORM
# HEXAGON (6 SIDED)
# OCTAGON (8 SIDED)
# OVAL
# PENTAGON (5 SIDED)
# RECTANGLE
# ROUND
# SEMI-CIRCLE
# SQUARE
# TEAR
# TRAPEZOID
# TRIANGLE
# Possible colors:
# BLACK
# BLUE
# BROWN
# GRAY
# GREEN
# ORANGE
# PINK
# PURPLE
# RED
# TURQUOISE
# WHITE
# YELLOW
```
#### File: experiments/xml_processing/x1_test.py
```python
import xml.etree.ElementTree as ET
# Only Capsules, Pills, Tablets (from https://www.fda.gov/industry/structured-product-labeling-resources/dosage-forms)
FORMS_TO_PROCESS = {"C25158", "C42895", "C42896", "C42917", "C42902",
"C42904", "C42916", "C42928", "C42936", "C42954", "C25394", "C42998",
"C42893", "C124794", "C42897", "C60997", "C42905", "C42997", "C42910",
"C42927", "C42931", "C42930", "C61004", "C61005", "C42964", "C42963",
"C42999", "C61006", "C42985", "C42992", "C147579"}
def get_imprint(el_tree):
elem = el_tree.find(".//*[@code='SPLIMPRINT']..{*}value")
return(elem.text)
def get_shape(el_tree):
elem = el_tree.find(".//*[@code='SPLSHAPE']..{*}value")
return(elem.attrib["displayName"])
def get_color(el_tree):
try:
elem = el_tree.find(".//*[@code='SPLCOLOR']..{*}value")
return(elem.attrib["displayName"])
except:
return("")
def get_size(el_tree):
try:
elem = el_tree.find(".//*[@code='SPLSIZE']..{*}value")
return(elem.attrib["value"])
except:
return("")
def get_unit(el_tree):
try:
elem = el_tree.find(".//*[@code='SPLSIZE']..{*}value")
return(elem.attrib["unit"])
except:
return("")
def get_name(el_tree):
try:
elem = el_tree.find("./{*}manufacturedProduct/{*}manufacturedProduct/{*}name")
return(elem.text)
except:
elem = el_tree.find("./{*}manufacturedProduct/{*}manufacturedMedicine/{*}name")
return(elem.text)
def get_code(el_tree):
try:
elem = el_tree.find("./{*}manufacturedProduct/{*}manufacturedProduct/{*}code")
return(elem.attrib["code"])
except:
elem = el_tree.find("./{*}manufacturedProduct/{*}manufacturedMedicine/{*}code")
return(elem.attrib["code"])
def get_form_code(el_tree):
try:
elem = el_tree.find("./{*}manufacturedProduct/{*}manufacturedProduct/{*}formCode")
return(elem.attrib["code"])
except:
elem = el_tree.find("./{*}manufacturedProduct/{*}manufacturedMedicine/{*}formCode")
return(elem.attrib["code"])
tree = ET.parse("pill_info8.xml")
root = tree.getroot()
# result = root.findall(".*[@name='Singapore']/year")
# result = root.fin dall(".//neighbor[2]")
# result = root.findall(".//*[@code='SPLIMPRINT']/../{*}value")
# result = root.findall(".//*[@code='SPLIMPRINT']..{*}value")
# result = root.find(".//*[@code='SPLIMPRINT']..{*}value")
# result = root.findall(".//*[@code='SPLCOLOR']..{*}value")
subjects = root.findall(".//{*}subject")
# print(f"text={result.text}")
print(subjects)
for elem in subjects:
print("-----")
# print(elem)
# print(f"tag={elem.tag}")
# print(f"text={elem.text}")
# print(f"attribs={elem.attrib}")
# print(f"shape={elem.attrib['displayName']}")
# TODO check if form code is in tablets list
# https://www.fda.gov/industry/structured-product-labeling-resources/dosage-forms
try:
imprint = get_imprint(elem)
except:
# if there's no imprint - don't process this subject
continue
print(f"imprint={imprint}")
form_code = get_form_code(elem)
print(f"form_code={form_code}")
if form_code not in FORMS_TO_PROCESS:
# print("not our form - skipping")
continue
print(f"shape={get_shape(elem)}")
print(f"color={get_color(elem)}")
print(f"size={get_size(elem)}")
print(f"unit={get_unit(elem)}")
print(f"name={get_name(elem)}")
print(f"code={get_code(elem)}")
``` |
{
"source": "10MINT/proethos2",
"score": 4
} |
#### File: proethos2/sandbox/parse_xml_ictrp.py
```python
import os
import sys
from xmltodict import parse
def print_keys(data, prefix=None):
for key, value in data.items():
output_string = key
if prefix:
output_string = "{0}/{1}".format(prefix, key)
if type(value) == type(data):
print_keys(value, prefix=output_string)
else:
print output_string
xml_filename = sys.argv[1]
with open(xml_filename) as handle:
data = handle.read()
data = parse(data)
print_keys(data)
``` |
{
"source": "10mohi6/bitbank-api-python-client",
"score": 3
} |
#### File: bitbank-api-python-client/bitbank_client/sync.py
```python
import hashlib
import requests
import time
import hmac
import json
import urllib
class Client(object):
def __init__(self, **kwargs):
self.public_origin = kwargs.get('public_origin', 'https://public.bitbank.cc')
self.private_origin = kwargs.get('private_origin', 'https://api.bitbank.cc')
self.public_key = kwargs.get('public_key', None)
if self.public_key is None:
raise Exception('api key is absent.')
self.private_key = kwargs.get('private_key', None)
if self.private_key is None:
raise Exception('secret key is absent.')
self.timeout = kwargs.get('timeout', None)
def _requests_public(self, path):
uri = self.public_origin + path
res = requests.get(uri, timeout=self.timeout)
return res
def _requests_private(self, path, method='GET', params=None):
nonce = str(time.time())
if method == 'GET':
text = nonce + path + urllib.parse.urlencode(params)
uri = '{0}{1}{2}'.format(self.private_origin, path, urllib.parse.urlencode(params))
else: #method == 'POST'
text = nonce + json.dumps(params)
uri = '{0}{1}'.format(self.private_origin, path)
headers = {
'ACCESS-KEY': self.public_key,
'ACCESS-NONCE': nonce,
'ACCESS-SIGNATURE': self._signature(text),
'Content-Type': 'application/json'
}
if method == 'GET':
res = requests.get(uri, headers=headers, timeout=self.timeout, params=params)
else: #method == 'POST'
res = requests.post(uri, headers=headers, timeout=self.timeout, data=params)
return res
def _signature(self, params):
sign = hmac.new(self.private_key.encode('utf-8'), params.encode('utf-8'), hashlib.sha256).hexdigest()
return sign
def get_ticker(self, **kwargs):
params = kwargs
path = '/{0}/ticker'.format(params['pair'])
data = self._requests_public(path)
return data
def get_depth(self, **kwargs):
params = kwargs
path = '/{0}/depth'.format(params['pair'])
data = self._requests_public(path)
return data
def get_transactions(self, **kwargs):
params = kwargs
path = '/{0}/transactions'.format(params['pair'])
yyyymmdd = params.get('yyyymmdd', None)
if yyyymmdd is not None:
path += '/{0}'.format(params['yyyymmdd'])
data = self._requests_public(path)
return data
def get_candlestick(self, **kwargs):
params = kwargs
path = '/{0}/candlestick/{1}/{2}'.format(params['pair'], params['candle_type'], params['yyyymmdd'])
data = self._requests_public(path)
return data
def get_assets(self, **kwargs):
params = kwargs
path = '/v1/user/assets'
data = self._requests_private(path, params=params)
return data
def get_order(self, **kwargs):
params = kwargs
path = '/v1/user/spot/order'
data = self._requests_private(path, params=params)
return data
def order(self, **kwargs):
params = kwargs
path = '/v1/user/spot/order'
data = self._requests_private(path, method='POST', params=params)
return data
def cancel_order(self, **kwargs):
params = kwargs
path = '/v1/user/spot/cancel_order'
data = self._requests_private(path, method='POST', params=params)
return data
def cancel_orders(self, **kwargs):
params = kwargs
path = '/v1/user/spot/cancel_orders'
data = self._requests_private(path, method='POST', params=params)
return data
def orders_info(self, **kwargs):
params = kwargs
path = '/v1/user/spot/orders_info'
data = self._requests_private(path, method='POST', params=params)
return data
def get_active_orders(self, **kwargs):
params = kwargs
path = '/v1/user/spot/active_orders'
data = self._requests_private(path, params=params)
return data
def get_trade_history(self, **kwargs):
params = kwargs
path = '/v1/user/spot/trade_history'
data = self._requests_private(path, params=params)
return data
def get_withdrawal_account(self, **kwargs):
params = kwargs
path = '/v1/user/withdrawal_account'
data = self._requests_private(path, params=params)
return data
def request_withdrawal(self, **kwargs):
params = kwargs
path = '/v1/user/request_withdrawal'
data = self._requests_private(path, method='POST', params=params)
return data
``` |
{
"source": "10mohi6/bitflyer-api-python-client",
"score": 3
} |
#### File: bitflyer-api-python-client/bitflyer_client/sync.py
```python
import hashlib
import requests
import time
import hmac
import json
class Client(object):
def __init__(self, **kwargs):
self.origin = kwargs.get('origin', 'https://api.bitflyer.jp')
public_key = kwargs.get('public_key', None)
if public_key is None:
raise Exception('public key is absent.')
self.public_key = public_key
private_key = kwargs.get('private_key', None)
if private_key is None:
raise Exception('private key is absent.')
self.private_key = private_key
self.timeout = kwargs.get('timeout', None)
def _request(self, path, method='GET', params=None):
uri = '{0}{1}'.format(self.origin, path)
timestamp = str(time.time())
text = timestamp + method + path
if len(params) > 0:
text += json.dumps(params)
headers = {
'ACCESS-KEY': self.public_key,
'ACCESS-TIMESTAMP': timestamp,
'ACCESS-SIGN': self._signature(text),
'Content-Type': 'application/json'
}
if method == 'GET':
res = requests.get(uri, headers=headers, timeout=self.timeout, params=params)
else: #method == 'POST'
res = requests.post(uri, headers=headers, timeout=self.timeout, data=params)
return res
def _signature(self, params):
sign = hmac.new(self.private_key.encode('utf-8'), params.encode('utf-8'), hashlib.sha256).hexdigest()
return sign
def markets(self, **kwargs):
path = '/v1/markets'
params = kwargs
data = self._request(path, params=params)
return data
def board(self, **kwargs):
path = '/v1/board'
params = kwargs
data = self._request(path, params=params)
return data
def ticker(self, **kwargs):
path = '/v1/ticker'
params = kwargs
data = self._request(path, params=params)
return data
def executions(self, **kwargs):
path = '/v1/executions'
params = kwargs
data = self._request(path, params=params)
return data
def getboardstate(self, **kwargs):
path = '/v1/getboardstate'
params = kwargs
data = self._request(path, params=params)
return data
def gethealth(self, **kwargs):
path = '/v1/gethealth'
params = kwargs
data = self._request(path, params=params)
return data
def getchats(self, **kwargs):
path = '/v1/getchats'
params = kwargs
data = self._request(path, params=params)
return data
def getpermissions(self, **kwargs):
path = '/v1/me/getpermissions'
params = kwargs
data = self._request(path, params=params)
return data
def getbalance(self, **kwargs):
path = '/v1/me/getbalance'
params = kwargs
data = self._request(path, params=params)
return data
def getcollateral(self, **kwargs):
path = '/v1/me/getcollateral'
params = kwargs
data = self._request(path, params=params)
return data
def getcollateralaccounts(self, **kwargs):
path = '/v1/me/getcollateralaccounts'
params = kwargs
data = self._request(path, params=params)
return data
def getaddresses(self, **kwargs):
path = '/v1/me/getaddresses'
params = kwargs
data = self._request(path, params=params)
return data
def getcoinins(self, **kwargs):
path = '/v1/me/getcoinins'
params = kwargs
data = self._request(path, params=params)
return data
def getcoinouts(self, **kwargs):
path = '/v1/me/getcoinouts'
params = kwargs
data = self._request(path, params=params)
return data
def getbankaccounts(self, **kwargs):
path = '/v1/me/getbankaccounts'
params = kwargs
data = self._request(path, params=params)
return data
def getdeposits(self, **kwargs):
path = '/v1/me/getdeposits'
params = kwargs
data = self._request(path, params=params)
return data
def withdraw(self, **kwargs):
path = '/v1/me/withdraw'
params = kwargs
data = self._request(path, method='POST', params=params)
return data
def getwithdrawals(self, **kwargs):
path = '/v1/me/getwithdrawals'
params = kwargs
data = self._request(path, params=params)
return data
def sendchildorder(self, **kwargs):
path = '/v1/me/sendchildorder'
params = kwargs
data = self._request(path, method='POST', params=params)
return data
def cancelchildorder(self, **kwargs):
path = '/v1/me/cancelchildorder'
params = kwargs
data = self._request(path, method='POST', params=params)
return data
def sendparentorder(self, **kwargs):
path = '/v1/me/sendparentorder'
params = kwargs
data = self._request(path, method='POST', params=params)
return data
def cancelparentorder(self, **kwargs):
path = '/v1/me/cancelparentorder'
params = kwargs
data = self._request(path, method='POST', params=params)
return data
def cancelallchildorders(self, **kwargs):
path = '/v1/me/cancelallchildorders'
params = kwargs
data = self._request(path, method='POST', params=params)
return data
def getchildorders(self, **kwargs):
path = '/v1/me/getchildorders'
params = kwargs
data = self._request(path, params=params)
return data
def getparentorders(self, **kwargs):
path = '/v1/me/getparentorders'
params = kwargs
data = self._request(path, params=params)
return data
def getparentorder(self, **kwargs):
path = '/v1/me/getparentorder'
params = kwargs
data = self._request(path, params=params)
return data
def getexecutions(self, **kwargs):
path = '/v1/me/getexecutions'
params = kwargs
data = self._request(path, params=params)
return data
def getpositions(self, **kwargs):
path = '/v1/me/getpositions'
params = kwargs
data = self._request(path, params=params)
return data
def getcollateralhistory(self, **kwargs):
path = '/v1/me/getcollateralhistory'
params = kwargs
data = self._request(path, params=params)
return data
def gettradingcommission(self, **kwargs):
path = '/v1/me/gettradingcommission'
params = kwargs
data = self._request(path, params=params)
return data
``` |
{
"source": "10mohi6/bybit-backtest-python",
"score": 3
} |
#### File: bybit-backtest-python/bybit_backtest/bybit_backtest.py
```python
from typing import Tuple
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import requests
from bs4 import BeautifulSoup
from sqlalchemy import create_engine
import urllib.request
import gzip
import datetime
class Backtest(object):
def __init__(
self,
*,
symbol: str = "BTCUSD",
sqlite_file_name: str = "backtest.sqlite3",
from_date: str = "",
to_date: str = "",
qty: float = 0.001,
interval: str = "1T", # 5-60S(second), 1-60T(minute), 1-24H(hour)
download_data_dir: str = "data",
) -> None:
self.interval = interval
self.download_data_dir = download_data_dir
self.qty = qty
self.take_profit = 0
self.stop_loss = 0
self.buy_entry = (
self.buy_exit
) = self.sell_entry = self.sell_exit = pd.DataFrame()
self.symbol = symbol
self.engine = create_engine(f"sqlite:///{sqlite_file_name}")
self.from_date = from_date
self.to_date = to_date
if not os.path.exists(self.download_data_dir):
os.mkdir(self.download_data_dir)
def strategy(self) -> None:
pass
def _download_data_to_db(self) -> None:
data_url = f"https://public.bybit.com/trading/{self.symbol}/"
res = requests.get(data_url)
if res.status_code != 200:
raise Exception(f"requests error {res.status_code} {res.json()}")
interval = "5S"
soup = BeautifulSoup(res.text, "lxml")
for link in soup.find_all("a"):
name = link.get("href")
if self.from_date != "" and f"{self.symbol}{self.from_date}.csv.gz" > name:
continue
if self.to_date != "" and f"{self.symbol}{self.to_date}.csv.gz" < name:
continue
fname = f"{self.download_data_dir}/{name}"
if os.path.exists(fname):
continue
data = urllib.request.urlopen(f"{data_url}{name}").read()
with open(fname, mode="wb") as f:
f.write(data)
print(f"Downloaded {name}")
json = []
with gzip.open(fname, mode="rt") as fp:
for index, line in enumerate(fp.read().split("\n")):
ary = line.split(",")
if index == 0:
continue
if len(line) == 0:
continue
json.append(
{
"time": datetime.datetime.fromtimestamp(
int(float(ary[0])), datetime.timezone.utc
),
"size": float(ary[3]),
"price": float(ary[4]),
}
)
df = pd.DataFrame(json)
df.index = pd.DatetimeIndex(df["time"])
df.index.names = ["T"]
df = (
df.resample(interval).agg(
{
"price": "ohlc",
"size": "sum",
}
)
).fillna(method="bfill")
df.columns = ["O", "H", "L", "C", "V"]
df.index.astype(str)
df.to_sql(self.symbol, con=self.engine, if_exists="append", index_label="T")
def _create_candles(self) -> None:
sql = ""
where = []
if self.from_date != "":
where.append(f" T >= '{self.from_date} 00:00:00' ")
if self.to_date != "":
where.append(f" T <= '{self.to_date} 00:00:00' ")
if len(where) > 0:
sql = f"WHERE{'and'.join(where)}"
self.df = pd.read_sql_query(
f"SELECT * FROM {self.symbol} {sql} ORDER BY T",
self.engine,
index_col="T",
)
self.df.index = pd.to_datetime(self.df.index)
if self.interval != "5S":
self.df = (
self.df.resample(self.interval).agg(
{
"O": "first",
"H": "max",
"L": "min",
"C": "last",
"V": "sum",
}
)
).fillna(method="bfill")
def run(self, filename: str = "") -> None:
self._download_data_to_db()
self._create_candles()
self.strategy()
o = self.df.O.values
L = self.df.L.values
h = self.df.H.values
N = len(self.df)
long_trade = np.zeros(N)
short_trade = np.zeros(N)
# buy entry
buy_entry_s = np.hstack((False, self.buy_entry[:-1])) # shift
long_trade[buy_entry_s] = o[buy_entry_s]
# buy exit
buy_exit_s = np.hstack((False, self.buy_exit[:-2], True)) # shift
long_trade[buy_exit_s] = -o[buy_exit_s]
# sell entry
sell_entry_s = np.hstack((False, self.sell_entry[:-1])) # shift
short_trade[sell_entry_s] = o[sell_entry_s]
# sell exit
sell_exit_s = np.hstack((False, self.sell_exit[:-2], True)) # shift
short_trade[sell_exit_s] = -o[sell_exit_s]
long_pl = pd.Series(np.zeros(N)) # profit/loss of buy position
short_pl = pd.Series(np.zeros(N)) # profit/loss of sell position
buy_price = sell_price = 0
long_rr = [] # long return rate
short_rr = [] # short return rate
stop_loss = take_profit = 0
for i in range(1, N):
# buy entry
if long_trade[i] > 0:
if buy_price == 0:
buy_price = long_trade[i]
short_trade[i] = -buy_price # sell exit
else:
long_trade[i] = 0
# sell entry
if short_trade[i] > 0:
if sell_price == 0:
sell_price = short_trade[i]
long_trade[i] = -sell_price # buy exit
else:
short_trade[i] = 0
# buy exit
if long_trade[i] < 0:
if buy_price != 0:
long_pl[i] = (
-(buy_price + long_trade[i]) * self.qty
) # profit/loss fixed
long_rr.append(
round(long_pl[i] / buy_price * 100, 2)
) # long return rate
buy_price = 0
else:
long_trade[i] = 0
# sell exit
if short_trade[i] < 0:
if sell_price != 0:
short_pl[i] = (
sell_price + short_trade[i]
) * self.qty # profit/loss fixed
short_rr.append(
round(short_pl[i] / sell_price * 100, 2)
) # short return rate
sell_price = 0
else:
short_trade[i] = 0
# close buy position with stop loss
if buy_price != 0 and self.stop_loss > 0:
stop_price = buy_price - self.stop_loss
if L[i] <= stop_price:
long_trade[i] = -stop_price
long_pl[i] = (
-(buy_price + long_trade[i]) * self.qty
) # profit/loss fixed
long_rr.append(
round(long_pl[i] / buy_price * 100, 2)
) # long return rate
buy_price = 0
stop_loss += 1
# close buy positon with take profit
if buy_price != 0 and self.take_profit > 0:
limit_price = buy_price + self.take_profit
if h[i] >= limit_price:
long_trade[i] = -limit_price
long_pl[i] = (
-(buy_price + long_trade[i]) * self.qty
) # profit/loss fixed
long_rr.append(
round(long_pl[i] / buy_price * 100, 2)
) # long return rate
buy_price = 0
take_profit += 1
# close sell position with stop loss
if sell_price != 0 and self.stop_loss > 0:
stop_price = sell_price + self.stop_loss
if h[i] >= stop_price:
short_trade[i] = -stop_price
short_pl[i] = (
sell_price + short_trade[i]
) * self.qty # profit/loss fixed
short_rr.append(
round(short_pl[i] / sell_price * 100, 2)
) # short return rate
sell_price = 0
stop_loss += 1
# close sell position with take profit
if sell_price != 0 and self.take_profit > 0:
limit_price = sell_price - self.take_profit
if L[i] <= limit_price:
short_trade[i] = -limit_price
short_pl[i] = (
sell_price + short_trade[i]
) * self.qty # profit/loss fixed
short_rr.append(
round(short_pl[i] / sell_price * 100, 2)
) # short return rate
sell_price = 0
take_profit += 1
win_trades = np.count_nonzero(long_pl.clip(lower=0)) + np.count_nonzero(
short_pl.clip(lower=0)
)
lose_trades = np.count_nonzero(long_pl.clip(upper=0)) + np.count_nonzero(
short_pl.clip(upper=0)
)
trades = (np.count_nonzero(long_trade) // 2) + (
np.count_nonzero(short_trade) // 2
)
gross_profit = long_pl.clip(lower=0).sum() + short_pl.clip(lower=0).sum()
gross_loss = long_pl.clip(upper=0).sum() + short_pl.clip(upper=0).sum()
profit_pl = gross_profit + gross_loss
self.equity = (long_pl + short_pl).cumsum()
mdd = (self.equity.cummax() - self.equity).max()
self.return_rate = pd.Series(short_rr + long_rr)
s = pd.Series(dtype="object")
s.loc["total profit"] = round(profit_pl, 3)
s.loc["total trades"] = trades
s.loc["win rate"] = round(win_trades / trades * 100, 3)
s.loc["profit factor"] = round(-gross_profit / gross_loss, 3)
s.loc["maximum drawdown"] = round(mdd, 3)
s.loc["recovery factor"] = round(profit_pl / mdd, 3)
s.loc["riskreward ratio"] = round(
-(gross_profit / win_trades) / (gross_loss / lose_trades), 3
)
s.loc["sharpe ratio"] = round(
self.return_rate.mean() / self.return_rate.std(), 3
)
s.loc["average return"] = round(self.return_rate.mean(), 3)
s.loc["stop loss"] = stop_loss
s.loc["take profit"] = take_profit
print(s)
fig = plt.figure(figsize=(8, 4))
fig.subplots_adjust(
wspace=0.2, hspace=0.5, left=0.095, right=0.95, bottom=0.095, top=0.95
)
ax1 = fig.add_subplot(3, 1, 1)
ax1.plot(self.df.C, label="close")
ax1.legend()
ax2 = fig.add_subplot(3, 1, 2)
ax2.plot(self.equity, label="equity")
ax2.legend()
ax3 = fig.add_subplot(3, 1, 3)
ax3.hist(self.return_rate, 50, rwidth=0.9)
ax3.axvline(
sum(self.return_rate) / len(self.return_rate),
color="orange",
label="average return",
)
ax3.legend()
if filename == "":
plt.show()
else:
plt.savefig(filename)
def sma(self, *, period: int, price: str = "C") -> pd.DataFrame:
return self.df[price].rolling(period).mean()
def ema(self, *, period: int, price: str = "C") -> pd.DataFrame:
return self.df[price].ewm(span=period).mean()
def bbands(
self, *, period: int = 20, band: int = 2, price: str = "C"
) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
std = self.df[price].rolling(period).std()
mean = self.df[price].rolling(period).mean()
return mean + (std * band), mean, mean - (std * band)
def macd(
self,
*,
fast_period: int = 12,
slow_period: int = 26,
signal_period: int = 9,
price: str = "C",
) -> Tuple[pd.DataFrame, pd.DataFrame]:
macd = (
self.df[price].ewm(span=fast_period).mean()
- self.df[price].ewm(span=slow_period).mean()
)
signal = macd.ewm(span=signal_period).mean()
return macd, signal
def stoch(
self, *, k_period: int = 5, d_period: int = 3
) -> Tuple[pd.DataFrame, pd.DataFrame]:
k = (
(self.df.C - self.df.L.rolling(k_period).min())
/ (self.df.H.rolling(k_period).max() - self.df.L.rolling(k_period).min())
* 100
)
d = k.rolling(d_period).mean()
return k, d
def rsi(self, *, period: int = 14, price: str = "C") -> pd.DataFrame:
return 100 - 100 / (
1
- self.df[price].diff().clip(lower=0).rolling(period).mean()
/ self.df[price].diff().clip(upper=0).rolling(period).mean()
)
```
#### File: bybit-backtest-python/tests/test_bybit_backtest.py
```python
import pytest
from bybit_backtest import Backtest
@pytest.fixture(scope="module", autouse=True)
def scope_module():
class MyBacktest(Backtest):
def strategy(self):
fast_ma = self.ema(period=3)
slow_ma = self.ema(period=5)
self.sell_exit = self.buy_entry = (fast_ma > slow_ma) & (
fast_ma.shift() <= slow_ma.shift()
)
self.buy_exit = self.sell_entry = (fast_ma < slow_ma) & (
fast_ma.shift() >= slow_ma.shift()
)
self.qty = 0.1
self.stop_loss = 5
self.take_profit = 10
yield MyBacktest(
symbol="BTCUSD",
sqlite_file_name="backtest.sqlite3",
from_date="2020-10-01",
to_date="2020-10-10",
interval="1T",
download_data_dir="data",
)
@pytest.fixture(scope="function", autouse=True)
def backtest(scope_module):
yield scope_module
# @pytest.mark.skip
def test_backtest(backtest):
backtest.run("backtest.png")
``` |
{
"source": "10mohi6/discord-webhook-python",
"score": 2
} |
#### File: discord-webhook-python/tests/test_discordwebhook.py
```python
import os
import pytest
import time
import requests
from discordwebhook import Discord
@pytest.fixture(scope="module", autouse=True)
def scope_module():
url = os.environ["DISCORD_WEBHOOK_URL"]
yield Discord(url=url)
@pytest.fixture(scope="function", autouse=True)
def discord(scope_module):
time.sleep(1)
yield scope_module
# @pytest.mark.skip
def test_discord_post_basic(discord):
expected = requests.codes.no_content
actual = discord.post(content="Hello, world.").status_code
assert expected == actual
# @pytest.mark.skip
def test_discord_post_basic_username(discord):
expected = requests.codes.no_content
actual = discord.post(
content="Hello, world.",
username="10mohi6",
avatar_url="https://avatars2.githubusercontent.com/u/38859131?s=460&v=4",
).status_code
assert expected == actual
# @pytest.mark.skip
def test_discord_post_basic_embed(discord):
expected = requests.codes.no_content
actual = discord.post(
embeds=[{"title": "Embed Title", "description": "Embed description"}],
).status_code
assert expected == actual
# @pytest.mark.skip
def test_discord_post_advanced_embed(discord):
expected = requests.codes.no_content
actual = discord.post(
embeds=[
{
"author": {
"name": "Embed Name",
"url": "https://github.com/10mohi6/discord-webhook-python",
"icon_url": "https://picsum.photos/24/24",
},
"title": "Embed Title",
"description": "Embed description",
"fields": [
{"name": "Field Name 1", "value": "Value 1", "inline": True},
{"name": "Field Name 2", "value": "Value 2", "inline": True},
{"name": "Field Name 3", "value": "Field Value 3"},
],
"thumbnail": {"url": "https://picsum.photos/80/60"},
"image": {"url": "https://picsum.photos/400/300"},
"footer": {
"text": "Embed Footer",
"icon_url": "https://picsum.photos/20/20",
},
}
],
).status_code
assert expected == actual
# @pytest.mark.skip
def test_discord_post_send_file(discord):
expected = requests.codes.ok
actual = discord.post(
file={
"file1": open("tests/file1.jpg", "rb"),
"file2": open("tests/file2.jpg", "rb"),
},
).status_code
assert expected == actual
``` |
{
"source": "10mohi6/line-notify-python",
"score": 3
} |
#### File: line-notify-python/linenotipy/linenotipy.py
```python
import requests
class Line:
def __init__(self, *, token):
self.url = "https://notify-api.line.me/api/notify"
self.headers = {"Authorization": "Bearer " + token}
def post(self, **kwargs):
files = kwargs.get("imageFile", None)
if files is not None:
files = {"imageFile": open(kwargs["imageFile"], "rb")}
return requests.post(
self.url, headers=self.headers, params=kwargs, files=files
).json()
``` |
{
"source": "10mohi6/stock-backtest-python",
"score": 3
} |
#### File: stock-backtest-python/stock_backtest/stock_backtest.py
```python
from typing import Tuple
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
import yfinance as yf
class Backtest(object):
def __init__(
self,
ticker: str,
*,
shares: float = 1,
interval: str = "1d", # 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
start: str = "",
end: str = "",
data_dir: str = ".",
) -> None:
self.start = start
self.end = end
self.ticker = ticker
self.interval = interval
self.data_dir = data_dir
self.take_profit = 0
self.stop_loss = 0
self.buy_entry = (
self.buy_exit
) = self.sell_entry = self.sell_exit = pd.DataFrame()
self.shares = shares
if not os.path.exists(data_dir):
os.mkdir(data_dir)
f = "{}/{}-{}-{}.csv".format(data_dir, ticker, start, end)
overtime = True
if os.path.exists(f):
overtime = datetime.now() > (
datetime.fromtimestamp(os.path.getmtime(f)) + timedelta(days=1)
)
if overtime:
if start != "" or end != "":
if start == "":
start = "1985-01-01"
if end == "":
end = datetime.now().strftime("%Y-%m-%d")
self.df = yf.download(
ticker,
start=start,
end=end,
interval=interval,
).set_axis(["O", "H", "L", "C", "AC", "V"], axis=1)
self.df.index.names = ["T"]
else:
self.df = yf.download(ticker, period="max", interval=interval).set_axis(
["O", "H", "L", "C", "AC", "V"], axis=1
)
self.df.index.names = ["T"]
self.df.to_csv(f)
else:
self.df = pd.read_csv(f, index_col="T", parse_dates=True)
def strategy(self) -> None:
pass
def run(self) -> dict:
self.strategy()
o = self.df.O.values
L = self.df.L.values
h = self.df.H.values
N = len(self.df)
long_trade = np.zeros(N)
short_trade = np.zeros(N)
# buy entry
buy_entry_s = np.hstack((False, self.buy_entry[:-1])) # shift
long_trade[buy_entry_s] = o[buy_entry_s]
# buy exit
buy_exit_s = np.hstack((False, self.buy_exit[:-2], True)) # shift
long_trade[buy_exit_s] = -o[buy_exit_s]
# sell entry
sell_entry_s = np.hstack((False, self.sell_entry[:-1])) # shift
short_trade[sell_entry_s] = o[sell_entry_s]
# sell exit
sell_exit_s = np.hstack((False, self.sell_exit[:-2], True)) # shift
short_trade[sell_exit_s] = -o[sell_exit_s]
long_pl = pd.Series(np.zeros(N)) # profit/loss of buy position
short_pl = pd.Series(np.zeros(N)) # profit/loss of sell position
buy_price = sell_price = 0
long_rr = [] # long return rate
short_rr = [] # short return rate
stop_loss = take_profit = 0
for i in range(1, N):
# buy entry
if long_trade[i] > 0:
if buy_price == 0:
buy_price = long_trade[i]
short_trade[i] = -buy_price # sell exit
else:
long_trade[i] = 0
# sell entry
if short_trade[i] > 0:
if sell_price == 0:
sell_price = short_trade[i]
long_trade[i] = -sell_price # buy exit
else:
short_trade[i] = 0
# buy exit
if long_trade[i] < 0:
if buy_price != 0:
long_pl[i] = (
-(buy_price + long_trade[i]) * self.shares
) # profit/loss fixed
long_rr.append(
round(long_pl[i] / buy_price * 100, 2)
) # long return rate
buy_price = 0
else:
long_trade[i] = 0
# sell exit
if short_trade[i] < 0:
if sell_price != 0:
short_pl[i] = (
sell_price + short_trade[i]
) * self.shares # profit/loss fixed
short_rr.append(
round(short_pl[i] / sell_price * 100, 2)
) # short return rate
sell_price = 0
else:
short_trade[i] = 0
# close buy position with stop loss
if buy_price != 0 and self.stop_loss > 0:
stop_price = buy_price - self.stop_loss
if L[i] <= stop_price:
long_trade[i] = -stop_price
long_pl[i] = (
-(buy_price + long_trade[i]) * self.shares
) # profit/loss fixed
long_rr.append(
round(long_pl[i] / buy_price * 100, 2)
) # long return rate
buy_price = 0
stop_loss += 1
# close buy positon with take profit
if buy_price != 0 and self.take_profit > 0:
limit_price = buy_price + self.take_profit
if h[i] >= limit_price:
long_trade[i] = -limit_price
long_pl[i] = (
-(buy_price + long_trade[i]) * self.shares
) # profit/loss fixed
long_rr.append(
round(long_pl[i] / buy_price * 100, 2)
) # long return rate
buy_price = 0
take_profit += 1
# close sell position with stop loss
if sell_price != 0 and self.stop_loss > 0:
stop_price = sell_price + self.stop_loss
if h[i] >= stop_price:
short_trade[i] = -stop_price
short_pl[i] = (
sell_price + short_trade[i]
) * self.shares # profit/loss fixed
short_rr.append(
round(short_pl[i] / sell_price * 100, 2)
) # short return rate
sell_price = 0
stop_loss += 1
# close sell position with take profit
if sell_price != 0 and self.take_profit > 0:
limit_price = sell_price - self.take_profit
if L[i] <= limit_price:
short_trade[i] = -limit_price
short_pl[i] = (
sell_price + short_trade[i]
) * self.shares # profit/loss fixed
short_rr.append(
round(short_pl[i] / sell_price * 100, 2)
) # short return rate
sell_price = 0
take_profit += 1
win_trades = np.count_nonzero(long_pl.clip(lower=0)) + np.count_nonzero(
short_pl.clip(lower=0)
)
lose_trades = np.count_nonzero(long_pl.clip(upper=0)) + np.count_nonzero(
short_pl.clip(upper=0)
)
trades = (np.count_nonzero(long_trade) // 2) + (
np.count_nonzero(short_trade) // 2
)
gross_profit = long_pl.clip(lower=0).sum() + short_pl.clip(lower=0).sum()
gross_loss = long_pl.clip(upper=0).sum() + short_pl.clip(upper=0).sum()
profit_pl = gross_profit + gross_loss
self.equity = (long_pl + short_pl).cumsum()
mdd = (self.equity.cummax() - self.equity).max()
self.return_rate = pd.Series(short_rr + long_rr)
fig = plt.figure(figsize=(8, 4))
fig.subplots_adjust(
wspace=0.2, hspace=0.5, left=0.095, right=0.95, bottom=0.095, top=0.95
)
ax1 = fig.add_subplot(3, 1, 1)
ax1.plot(self.df.C, label="close")
ax1.legend()
ax2 = fig.add_subplot(3, 1, 2)
ax2.plot(self.equity, label="equity")
ax2.legend()
ax3 = fig.add_subplot(3, 1, 3)
ax3.hist(self.return_rate, 50, rwidth=0.9)
ax3.axvline(
sum(self.return_rate) / len(self.return_rate),
color="orange",
label="average return",
)
ax3.legend()
plt.savefig(
"{}/{}-{}-{}.png".format(self.data_dir, self.ticker, self.start, self.end)
)
r = {
"total profit": round(profit_pl, 3),
"total trades": trades,
"win rate": round(win_trades / trades * 100, 3),
"profit factor": round(-gross_profit / gross_loss, 3),
"maximum drawdown": round(mdd, 3),
"recovery factor": round(profit_pl / mdd, 3),
"riskreward ratio": round(
-(gross_profit / win_trades) / (gross_loss / lose_trades), 3
),
"sharpe ratio": round(self.return_rate.mean() / self.return_rate.std(), 3),
"average return": round(self.return_rate.mean(), 3),
"stop loss": stop_loss,
"take profit": take_profit,
}
with open(
"{}/{}-{}-{}.json".format(self.data_dir, self.ticker, self.start, self.end),
"w",
) as f:
f.write(str(r))
return r
def sma(self, *, period: int, price: str = "C") -> pd.DataFrame:
return self.df[price].rolling(period).mean()
def ema(self, *, period: int, price: str = "C") -> pd.DataFrame:
return self.df[price].ewm(span=period).mean()
def bbands(
self, *, period: int = 20, band: int = 2, price: str = "C"
) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
std = self.df[price].rolling(period).std()
mean = self.df[price].rolling(period).mean()
return mean + (std * band), mean, mean - (std * band)
def macd(
self,
*,
fast_period: int = 12,
slow_period: int = 26,
signal_period: int = 9,
price: str = "C",
) -> Tuple[pd.DataFrame, pd.DataFrame]:
macd = (
self.df[price].ewm(span=fast_period).mean()
- self.df[price].ewm(span=slow_period).mean()
)
signal = macd.ewm(span=signal_period).mean()
return macd, signal
def stoch(
self, *, k_period: int = 5, d_period: int = 3
) -> Tuple[pd.DataFrame, pd.DataFrame]:
k = (
(self.df.C - self.df.L.rolling(k_period).min())
/ (self.df.H.rolling(k_period).max() - self.df.L.rolling(k_period).min())
* 100
)
d = k.rolling(d_period).mean()
return k, d
def rsi(self, *, period: int = 14, price: str = "C") -> pd.DataFrame:
return 100 - 100 / (
1
- self.df[price].diff().clip(lower=0).rolling(period).mean()
/ self.df[price].diff().clip(upper=0).rolling(period).mean()
)
def atr(self, *, period: int = 14, price: str = "C") -> pd.DataFrame:
a = (self.df.H - self.df.L).abs()
b = (self.df.H - self.df[price].shift()).abs()
c = (self.df.L - self.df[price].shift()).abs()
df = pd.concat([a, b, c], axis=1).max(axis=1)
return df.ewm(span=period).mean()
``` |
{
"source": "10mohi6/zaif-api-python-client",
"score": 2
} |
#### File: zaif-api-python-client/zaif_client/public.py
```python
from sync import Client
class Public(Client):
def currencies(self, **kwargs):
params = kwargs
path = '/api/1/currencies/{0}'.format(params['currency'])
data = self._request(path, params=params)
return data
def currency_pairs(self, **kwargs):
params = kwargs
path = '/api/1/currency_pairs/{0}'.format(params['currency_pair'])
data = self._request(path, params=params)
return data
def last_price(self, **kwargs):
params = kwargs
path = '/api/1/last_price/{0}'.format(params['currency_pair'])
data = self._request(path, params=params)
return data
def ticker(self, **kwargs):
params = kwargs
path = '/api/1/ticker/{0}'.format(params['currency_pair'])
data = self._request(path, params=params)
return data
def trades(self, **kwargs):
params = kwargs
path = '/api/1/trades/{0}'.format(params['currency_pair'])
data = self._request(path, params=params)
return data
def depth(self, **kwargs):
params = kwargs
path = '/api/1/depth/{0}'.format(params['currency_pair'])
data = self._request(path, params=params)
return data
```
#### File: zaif-api-python-client/zaif_client/sync.py
```python
import hashlib
import requests
import time
import urllib
import hmac
class Client(object):
def __init__(self, **kwargs):
self.origin = kwargs.get('origin', 'https://api.zaif.jp')
self.public_key = kwargs.get('public_key', None)
if self.public_key is None:
raise Exception('public key is absent.')
self.private_key = kwargs.get('private_key', None)
if self.private_key is None:
raise Exception('private key is absent.')
self.timeout = kwargs.get('timeout', None)
def _request(self, path, method='GET', params=None):
uri = '{0}{1}'.format(self.origin, path)
params['nonce'] = time.time()
headers = {
'key': self.public_key,
'sign': self._signature(params)
}
if method == 'GET':
res = requests.get(uri, headers=headers, timeout=self.timeout, params=params)
else: # method == 'POST'
res = requests.post(uri, headers=headers, timeout=self.timeout, data=params)
return res
def _signature(self, params):
sign = hmac.new(bytearray(self.private_key.encode('utf-8')), digestmod=hashlib.sha512)
sign.update(urllib.parse.urlencode(params).encode('utf-8'))
return sign.hexdigest()
``` |
{
"source": "10motico/dogo-disocred-bot",
"score": 3
} |
#### File: 10motico/dogo-disocred-bot/botV02.py
```python
import discord
#import discord.voice_client
import time
import random
from discord.ext import commands
client = discord.Client()
TOKEN= #token goes here
@client.event
async def on_message(message):
# we do not want the bot to reply to itself
if message.author == client.user:
return
if (message.content.startswith('dogo') or message.content.startswith('woof') or message.content.startswith("oof")):
print(str(message.content))
print(str(message.author))
print("_________")
f = open(str(time.gmtime())+".txt", "w+")
f.write(str(message.content)+"\n"+str(message.author)+"\n ___________")
f.close()
if message.content.startswith('dogo hello'):
msg = 'hello @'+str(message.author)
channel = message.channel
await channel.send(msg)
if message.content.startswith('dogo spam'):
channel = message.channel
#msg=message.content
# i=1
await channel.send("illegal command please fuck off")
#while (i<10):
# await channel.send(message.content[9:len(msg)])
# i=i+1
# time.sleep(0.5)
if message.content.startswith("dogo help") or message.content.startswith("dogo commands"):
channel = message.channel
await channel.send('prefix: dogo \n commends: \n dogo i have food- posts a gif related to food \n dogo are you really a dog- answers with yes\n dogo who is a good dogo- answers with "me!"\n "dogo lick" or "dogo no lick"- answers woth lick\n dogo echo - echos messages content\n "dogo pet" or "dogo i pet you"- gif of a peted dog\n "woof" or "dogo woof" - answers with a woof gif \n cute dogo- answers with a cute dogo gif \n dogo fight - a fight betwen dogo and the author \n oof - respones with oof and gmc time (without dogo prefix!) \n dogo time - responses with gmc time \n bot is still in dev and runs on some randome hardwere please be nice to dogo \n ho btw the messages are being loged so no braking dogo!')
if message.content.startswith('dogo echo'):
channel =message.channel
msg=message.content
#await commands.bot(";").delete_message(message)
lengh=len(msg)
await channel.send(msg[10:lengh])
if message.content.startswith("dogo i have food"):
channel = message.channel
await channel.send("https://tenor.com/R6ms.gif")
if message.content.startswith("dogo are you really a dog"):
channel = message.channel
await channel.send("yes, i am a real dog")
if message.content.startswith("who is a good dogo"):
channel = message.channel
await channel.send("me!")
if (message.content.startswith("dogo lick") or message.content.startswith("dogo no lick")):
channel = message.channel
await channel.send("lick ")
if (message.content.startswith("dogo pet") or message.content.startswith("dogo i pet you")):
channel = message.channel
await channel.send("thanks for the pet \n https://tenor.com/GtJ6.gif")
if (message.content.startswith("dogo woof") or message.content.startswith("woof")):
channel = message.channel
await channel.send("https://tenor.com/RGLA.gif")
if (message.content.startswith("cute dogo")):
channel = message.channel
await channel.send("https://tenor.com/vklV.gif")
if (message.content.startswith("dogo fight")):
ans=random.randint(1,3)
channel = message.channel
if (ans==1):
await channel.send("dogo wins")
if (ans==2):
await channel.send(str(message.author)+" wins")
if ans==3:
await channel.send("its a tie")
if (message.content.startswith('dogo time')):
channel=message.channel
await channel.send(str(time.asctime(time.gmtime())))
if message.content.startswith("oof"):
channel=message.channel
await channel.send("oof happened at: GTC- "+str(time.asctime(time.gmtime())))
if message.content.startswith("WTF"):
channel=message.channel
await channel.send("https://tenor.com/view/ew-disgust-gif-3671470")
if message.content.startswith("kill") or message.content.startswith("dogo kill"):
channel=message.channel
await channel.send("https://media.giphy.com/media/26vUN6gcoBuBbO6ic/giphy.gif")
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
# await client.change_presence(name="with its booms day")
client.run(TOKEN)
``` |
{
"source": "10mubeen/PathMe-Viewer",
"score": 2
} |
#### File: src/pathme_viewer/graph_utils.py
```python
from operator import methodcaller
from flask import abort, Response, jsonify, send_file
from flask import current_app
from pybel_tools.mutation.metadata import serialize_authors
from pybel_tools.summary import relation_set_has_contradictions
from six import BytesIO, StringIO
from pathme_viewer.constants import PATHWAYS_ARGUMENT, RESOURCES_ARGUMENT
from pybel import to_bel_lines, to_graphml, to_bytes, to_csv
from pybel import union
from pybel.constants import *
from pybel.io import from_bytes
from pybel.struct import add_annotation_value
from pybel.struct.summary import get_annotation_values_by_annotation
def throw_parameter_error(parameter):
"""Return 500 error.
:param str parameter:
:return: HTTP error
"""
abort(500, '"{}" argument is missing in the request'.format(parameter))
def add_annotation_key(graph):
"""Add annotation key in data (in place operation).
:param pybel.BELGraph graph: BEL Graph
"""
for u, v, k in graph.edges(keys=True):
if ANNOTATIONS not in graph[u][v][k]:
graph[u][v][k][ANNOTATIONS] = {}
def process_request(request):
"""Process request and return it as a dict[pathway ids,resources].
:param flask.request request: http request
:rtype: dict
"""
pathways_list = request.args.getlist(PATHWAYS_ARGUMENT)
resources_list = request.args.getlist(RESOURCES_ARGUMENT)
if not resources_list:
throw_parameter_error(RESOURCES_ARGUMENT)
if not pathways_list:
throw_parameter_error(PATHWAYS_ARGUMENT)
return {
pathway_id: resource
for pathway_id, resource in zip(pathways_list, resources_list)
}
def merge_pathways(pathways):
"""Return merged graphs from pathways in the request.
:param dict pathways: pathways to be merged
:rtype: Optional[pybel.BELGraph]
"""
networks = []
for name, resource in pathways.items():
pathway = current_app.pathme_manager.get_pathway_by_id(name, resource)
if not pathway:
abort(
500,
'Pathway "{}" in resource "{}" was not found in the database. '
'Please check that you have used correctly the autocompletion form.'.format(
name, resource)
)
# Loads the BELGraph and adds annotations to track provenance later
graph = from_bytes(pathway.blob)
graph.annotation_list['Database'] = {'kegg', 'reactome', 'wikipathways'}
graph.annotation_pattern['PathwayID'] = '.*'
graph.annotation_pattern['Pathway name'] = '.*'
graph.annotation_list['Interesting edge'] = {'Contradicts', 'May contradict'}
add_annotation_key(graph)
add_annotation_value(graph, 'Pathway name', pathway.name)
add_annotation_value(graph, 'Database', pathway.resource_name)
add_annotation_value(graph, 'PathwayID', pathway.pathway_id)
log.debug('Adding graph {} {}:with {} nodes and {} edges'.format(
name, resource, graph.number_of_nodes(), graph.number_of_edges())
)
networks.append(graph)
if not networks:
abort(
500,
'Any pathway was requested. Please select at least one pathway.'
)
graph = union(networks)
contradicting_edges = get_contradiction_summary(graph)
for u, v, _ in contradicting_edges:
label_graph_edges(graph, u, v, 'Interesting edge', 'Contradicts')
return graph
def to_json_custom(graph, _id='id', source='source', target='target'):
"""Prepares JSON for the biological network explorer
:type graph: pybel.BELGraph
:param str _id: The key to use for the identifier of a node, which is calculated with an enumeration
:param str source: The key to use for the source node
:param str target: The key to use for the target node
:rtype: dict
"""
result = {}
mapping = {}
result['nodes'] = []
for i, node in enumerate(sorted(graph, key=methodcaller('as_bel'))):
nd = node.copy()
nd[_id] = node.sha512
nd['bel'] = node.as_bel()
if VARIANTS in nd or FUSION in nd or MEMBERS in nd:
nd['cname'] = nd['bel']
result['nodes'].append(nd)
mapping[node] = i
edge_set = set()
rr = {}
for u, v, data in graph.edges(data=True):
if data[RELATION] in TWO_WAY_RELATIONS and (u, v) != tuple(sorted((u, v), key=methodcaller('as_bel'))):
continue # don't keep two way edges twice
entry_code = u, v
if entry_code not in edge_set: # Avoids duplicate sending multiple edges between nodes with same relation
rr[entry_code] = {
source: mapping[u],
target: mapping[v],
'contexts': []
}
edge_set.add(entry_code)
payload = {
'bel': graph.edge_to_bel(u, v, data=data)
}
payload.update(data)
if data[RELATION] in CAUSAL_INCREASE_RELATIONS:
rr[entry_code][RELATION] = INCREASES
elif data[RELATION] in CAUSAL_DECREASE_RELATIONS:
rr[entry_code][RELATION] = DECREASES
rr[entry_code]['contexts'].append(payload)
result['links'] = list(rr.values())
return result
def export_graph(graph, format=None):
"""Convert PyBEL graph to a different format.
:param PyBEL graph graph: graph
:param format: desire format
:return: graph representation in different format
"""
if format is None or format == 'json':
data = to_json_custom(graph)
return jsonify(data)
elif format == 'bytes':
data = BytesIO(to_bytes(graph))
return send_file(
data,
mimetype='application/octet-stream',
as_attachment=True,
attachment_filename='graph.gpickle'
)
elif format == 'bel':
serialize_authors(graph)
data = '\n'.join(to_bel_lines(graph))
return Response(data, mimetype='text/plain')
elif format == 'graphml':
bio = BytesIO()
to_graphml(graph, bio)
bio.seek(0)
return send_file(
bio,
mimetype='text/xml',
attachment_filename='graph.graphml',
as_attachment=True
)
elif format == 'csv':
bio = StringIO()
to_csv(graph, bio)
bio.seek(0)
data = BytesIO(bio.read().encode('utf-8'))
return send_file(
data,
mimetype="text/tab-separated-values",
attachment_filename="graph.tsv",
as_attachment=True
)
abort(500, '{} is not a valid format'.format(format))
def get_tree_annotations(graph):
"""Build tree structure with annotation for a given graph.
:param pybel.BELGraph graph: A BEL Graph
:return: The JSON structure necessary for building the tree box
:rtype: list[dict]
"""
annotations = get_annotation_values_by_annotation(graph)
return [
{
'text': annotation,
'children': [{'text': value} for value in sorted(values)]
}
for annotation, values in sorted(annotations.items())
]
def label_graph_edges(graph, u, v, annotation, value):
"""Label edges between two nodes with an annotation and a value.
:param pybel.BELGraph graph:
:param u: subject node
:param v: object node
:param str annotation: annotation to be labelled
:param str value: value to be labelled
"""
if annotation not in graph.defined_annotation_keywords:
raise ValueError('annotation not defined: {}'.format(annotation))
if not graph.has_edge(u, v):
raise ValueError('edge does not exists')
# Iterate over all edges between u and v
for k, data in graph[u][v].items():
# Add annotation key in data if not exists
if ANNOTATIONS not in data:
graph[u][v][k][ANNOTATIONS] = {}
if annotation not in data[ANNOTATIONS]:
graph[u][v][k][ANNOTATIONS] = {annotation: {}}
graph[u][v][k][ANNOTATIONS][annotation][value] = True
def relation_set_has_differences(relations):
"""Return if the set of relations contains differences.
:param set[str] relations: A set of relations
:rtype: bool
"""
has_causal = any(relation in CAUSAL_RELATIONS for relation in relations)
has_unspecific_event = any(relation in {ASSOCIATION} for relation in relations)
return 1 < sum([has_causal, has_unspecific_event])
def get_contradiction_summary(graph):
"""Yield triplets of (source node, target node, set of relations) for (source node, target node) pairs
that have multiple, contradictory relations.
:param pybel.BELGraph graph: A BEL graph
:rtype: iter[tuple]
"""
for u, v in set(graph.edges()):
relations = {data[RELATION] for data in graph[u][v].values()}
if relation_set_has_contradictions(relations):
yield u, v, relations
```
#### File: src/pathme_viewer/manager.py
```python
import logging
from bio2bel.utils import get_connection
from sqlalchemy import create_engine, func, and_
from sqlalchemy.orm import scoped_session, sessionmaker
from .constants import MODULE_NAME
from .models import Base, Pathway
__all__ = [
'Manager'
]
log = logging.getLogger(__name__)
class Manager(object):
"""Database manager."""
def __init__(self, engine, session):
"""Init PathMe manager."""
self.engine = engine
self.session = session
self.create_all()
@staticmethod
def from_connection(connection=None):
connection = get_connection(MODULE_NAME, connection)
engine = create_engine(connection)
session_maker = sessionmaker(bind=engine, autoflush=False, expire_on_commit=False)
session = scoped_session(session_maker)
return Manager(engine, session)
def create_all(self, check_first=True):
"""Create tables for PathMe."""
Base.metadata.create_all(self.engine, checkfirst=check_first)
def drop_all(self, check_first=True):
"""Drop all tables for PathMe."""
Base.metadata.drop_all(self.engine, checkfirst=check_first)
"""Query methods"""
def count_pathways(self):
"""Count the pathways in the database.
:rtype: int
"""
return self.session.query(Pathway).count()
def count_pathways_by_resource(self):
"""Count the pathways in the database grouping by resource.
:rtype: int
"""
return self.session.query(
Pathway.resource_name, func.count(Pathway.resource_name)
).group_by(Pathway.resource_name).all()
def get_all_pathways(self):
"""Get all pathways in the database.
:rtype: list[Pathway]
"""
return self.session.query(Pathway).all()
def get_pathway_by_id(self, pathway_id, resource_name):
"""Get pathway by canonical identifier.
:param str pathway_id: pathway identifier
:param str resource_name: name of the database
:rtype: Optional[Pathway]
"""
condition = and_(Pathway.pathway_id == pathway_id, Pathway.resource_name == resource_name)
return self.session.query(Pathway).filter(condition).one_or_none()
def get_pathway_by_name(self, pathway_name, resource_name):
"""Get pathway by name.
:param str pathway_name: pathway identifier
:param str resource_name: name of the database
:rtype: Optional[Pathway]
"""
condition = and_(Pathway.name == pathway_name, Pathway.resource_name == resource_name)
return self.session.query(Pathway).filter(condition).one_or_none()
def get_pathways_from_resource(self, resource_name):
"""Get pathways from a given database.
:param str resource_name: name of the database
:rtype: Optional[list[Pathway]]
"""
return self.session.query(Pathway).filter(Pathway.resource_name == resource_name).all()
def create_pathway(self, pathway_dict):
"""Create pathway.
:param dict pathway_dict: pathway identifier
:rtype: Pathway
"""
pathway = Pathway(**pathway_dict)
self.session.add(pathway)
self.session.commit()
return pathway
def delete_pathway(self, pathway_id, resource_name):
"""Delete a pathway.
:param str pathway_id: pathway identifier
:param str resource_name: name of the database
:rtype: bool
"""
pathway = self.get_pathway_by_id(pathway_id, resource_name)
if pathway:
self.session.delete(pathway)
self.session.commit()
return True
return False
def delete_all_pathways(self):
"""Delete all the pathways."""
self.session.query(Pathway).delete()
self.session.commit()
def delete_pathways_from_resource(self, resource_name):
"""Delete pathways from a given database.
:param str resource_name: name of the database
:rtype: bool
"""
pathways_in_resource = self.session.query(Pathway).filter(Pathway.resource_name == resource_name)
if not pathways_in_resource:
return False
pathways_in_resource.delete()
self.session.commit()
return True
def get_or_create_pathway(self, pathway_dict):
"""Get or create pathway.
:param dict pathway_dict: pathway info
:rtype: Pathway
"""
pathway = self.get_pathway_by_id(pathway_dict['pathway_id'], pathway_dict['resource_name'])
if pathway is None:
pathway = self.create_pathway(pathway_dict)
return pathway
def query_pathway_by_name(self, query, resource, limit=None):
"""Return all pathways having the query in their names.
:param str query: query string
:param Optional[int] limit: limit result query
:rtype: list[Pathway]
"""
q = self.session.query(Pathway).filter(Pathway.name.contains(query))
if limit:
q = q.limit(limit)
return q.all()
def query_pathway_by_name_and_resource(self, query, resource_name, limit=None):
"""Return all pathways having the query in their names.
:param str query: query string
:param str resource_name: database name
:param Optional[int] limit: limit result query
:rtype: list[Pathway]
"""
condition = and_(Pathway.name.contains(query), Pathway.resource_name == resource_name)
q = self.session.query(Pathway).filter(condition)
if limit:
q = q.limit(limit)
return q.all()
``` |
{
"source": "1-0/neox_project",
"score": 2
} |
#### File: neox_project/api/serializers.py
```python
from rest_auth import serializers
from rest_framework import serializers as rf_serializers
from neox_project.models import CustomUser
from post import models
class PostSerializer(serializers.JWTSerializer):
def create(self, validated_data):
pass
def update(self, instance, validated_data):
pass
class Meta:
fields = (
'id',
'title',
'user',
'content',
'pub_date',
)
model = models.Post
class RatingSerializer(rf_serializers.Serializer):
def create(self, validated_data):
pass
def update(self, instance, validated_data):
pass
class Meta:
fields = (
'id',
'like',
'user',
'post',
)
model = models.Rating
class UserActivitySerializer(serializers.JWTSerializer):
def create(self, validated_data):
pass
def update(self, instance, validated_data):
pass
class Meta:
fields = (
'id',
'username',
'date_joined',
)
model = CustomUser
class UserSerializer(rf_serializers.HyperlinkedModelSerializer):
class Meta:
model = CustomUser
fields = ('email', 'first_name', 'last_name', 'password')
extra_kwargs = {'password': {'<PASSWORD>': True}}
def create(self, validated_data):
password = validated_data.pop('password')
user = CustomUser(**validated_data)
user.set_password(password)
user.save()
return user
``` |
{
"source": "10nin/blo",
"score": 3
} |
#### File: blo/blo/DBControl.py
```python
import sqlite3
from datetime import datetime
from blo.BloArticle import BloArticle
class DBControl:
def __init__(self, db_name: str=":memory:"):
"""Initializer for blo blog engine database controller.
:param db_name: database file name connect to it database file. default use on memory database.
"""
self.db_conn = sqlite3.connect(db_name)
def create_tables(self):
"""Create tables for blo blog engine.
"""
c = self.db_conn.cursor()
c.execute("""CREATE TABLE IF NOT EXISTS Articles (
id INTEGER PRIMARY KEY AUTOINCREMENT,
text TEXT,
digest TEXT UNIQUE,
updatedate TEXT);""")
c.execute("CREATE VIRTUAL TABLE IF NOT EXISTS Articles_fts USING fts4( words TEXT );")
self.db_conn.commit()
def close_connect(self):
"""Close database connect.
"""
self.db_conn.close()
self.db_conn = None
def is_exists(self, digest: str):
c = self.db_conn.cursor()
c.execute("SELECT * FROM Articles WHERE digest = ?;", (digest,))
ret = c.fetchall()
if ret is None:
return False
else:
return True
def insert_article(self, article: BloArticle, template_name: str=''):
"""Insert article html and wakati text to Article/Article_fts tables.
:param article: target article
:param template_name: template file name on template directory.
"""
assert(article is not None)
# if has not text data then no action on this method.
if article.has_text:
c = self.db_conn.cursor()
html = article.get_html(template_name)
digest = article.get_digest()
timestamp = self._get_timestamp()
wakati = article.get_wakati_txt()
if not self.is_exists(digest):
c.execute("INSERT INTO Articles (text, digest, updatedate) VALUES (?, ?, ?);", (html, digest, timestamp))
c.execute("INSERT INTO Articles_fts (words) VALUES (?);", (wakati,))
self.db_conn.commit()
def select_last_n(self, n) -> list:
assert(self.db_conn is not None)
c = self.db_conn.cursor()
c.execute("SELECT * FROM Articles ORDER BY updatedate DESC LIMIT ?;", (n,))
return c.fetchall()
@staticmethod
def _get_timestamp() -> str:
"""Get timestamp formatted yyyy/mm/dd hh:nn:ss"""
return datetime.now().strftime('%Y/%m/%d %H:%M:%S')
def _select_all(self, table_name: str) -> list:
"""Select all rows from table_name table. but this method is for testing only."""
c = self.db_conn.cursor()
c.execute("SELECT * FROM " + table_name)
return c.fetchall()
``` |
{
"source": "10nin/wakame",
"score": 3
} |
#### File: wakame/tests/test_htmlConverter.py
```python
import os
from pathlib import Path
from unittest import TestCase
from wakame.html_converter import HtmlConverter
class TestHtmlConverter(TestCase):
def test_convert_to_html(self):
hc = HtmlConverter()
with open('test_files/test1.html', 'r') as f:
html = ''.join(f.readlines())
self.assertEqual(html, hc.convert_to_html('test_files/test1.rst', '../templates/article_template.html'))
def test_save_html(self):
hc = HtmlConverter()
p = 'test_files/test1.html'
html = hc.convert_to_html('test_files/test1.rst', '../templates/article_template.html')
hc.save_html(html, p)
self.assertTrue(Path(p).exists())
os.remove(p)
```
#### File: wakame/wakame/file_manager.py
```python
from hashlib import sha1
from os import path
from pathlib import Path
class FileManager:
def get_files(self, target_dir, ext):
return [str(e) for e in Path(target_dir).glob('*.' + ext)]
def get_hash(self, file_path):
with open(file_path, 'r') as f:
h = sha1()
h.update(''.join(f.readlines()).encode('utf-8'))
return h.hexdigest().upper()
def get_basename(self, file_path):
p = path.basename(file_path)
for e in Path(p).suffixes:
p = p.replace(e, '')
return p
``` |
{
"source": "10notechnologies/troikajs_public",
"score": 2
} |
#### File: troikajs_public/game/utils.py
```python
def decode_bid (bid):
suit = bid[-1:]
if suit in ('C','H','D','S','N'):
bid = bid[:-1]
else:
suit = None
return bid, suit
def decode_card (card):
if card[0] == "3":
return 3, card[1]
if card[0] == "5":
return 5, card[1]
if card[0] == "7":
return 7, card[1]
if card[0] == "8":
return 8, card[1]
if card[0] == "9":
return 9, card[1]
if card[0] == "T":
return 10, card[1]
if card[0] == "J":
return 11, card[1]
if card[0] == "Q":
return 12, card[1]
if card[0] == "K":
return 13, card[1]
if card[0] == "A":
return 14, card[1]
return 0, ""
def get_better_bid (a,b):
value_a, suit_a = decode_bid(a)
value_b, suit_b = decode_bid(b)
# Convert "pass" to a value of 0
if value_a == "pass":
value_a = 0
if value_b == "pass":
value_b = 0
no_a = 0
if suit_a == 'N':
no_a = 1
no_b = 0
if suit_b == 'N':
no_b = 1
if int(value_a)*2+no_a > int(value_b)*2+no_b:
return a
else:
return b
def get_winner (first_suit, trump_suit, played_cards):
winner = 0
winner_score = 0
had_5 = False
had_3 = False
for i in range(0, 4):
i_score, i_suit = decode_card(played_cards[i])
if i_score == 3:
had_3 = True
if i_score == 5:
had_5 = True
if trump_suit == i_suit:
i_score += 200
elif first_suit == i_suit:
i_score += 100
if i_score > winner_score:
winner = i
winner_score = i_score
return winner
``` |
{
"source": "10se1ucgo/cue_sdk",
"score": 3
} |
#### File: cue_sdk/cue_sdk/enumerations.py
```python
from enum import Enum, EnumMeta
__all__ = ['CLK', 'CDT', 'CPL', 'CLL', 'CDC', 'CAM', 'CE']
# Taken from six.py. I didn't use enough six functionality to justify requiring the entire module.
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
class CEnum(Enum):
"""ctypes compatible Enum"""
@classmethod
def from_param(cls, obj):
return obj.value
class KeywordMeta(EnumMeta):
"""god help me
things such as CLK_1 and CDC_None don't work as enums, as 1 and None are invalid Python identifiers.
So, to make it more convinient, this maps CLK[1] to CLK._1, CDC[None] to CDC._None, etc.
Also, it allows more convinient membership testing for string values.
"""
def __getitem__(self, item):
if not isinstance(item, str):
return super(KeywordMeta, self).__getitem__(str(item))
return super(KeywordMeta, self).__getitem__(item)
def __contains__(self, item):
contains = super(KeywordMeta, self).__contains__(item)
if contains is False:
return item in self.__members__
return contains
class CLK(with_metaclass(KeywordMeta, CEnum)):
"""
Enumeration containing available keys
"""
CLI_Invalid = 0 # dummy value
# keyboard leds
Escape = 1
F1 = 2
F2 = 3
F3 = 4
F4 = 5
F5 = 6
F6 = 7
F7 = 8
F8 = 9
F9 = 10
F10 = 11
F11 = 12
GraveAccentAndTilde = 13
_1 = 14
_2 = 15
_3 = 16
_4 = 17
_5 = 18
_6 = 19
_7 = 20
_8 = 21
_9 = 22
_0 = 23
MinusAndUnderscore = 24
Tab = 25
Q = 26
W = 27
E = 28
R = 29
T = 30
Y = 31
U = 32
I = 33
O = 34
P = 35
BracketLeft = 36
CapsLock = 37
A = 38
S = 39
D = 40
F = 41
G = 42
H = 43
J = 44
K = 45
L = 46
SemicolonAndColon = 47
ApostropheAndDoubleQuote = 48
LeftShift = 49
NonUsBackslash = 50
Z = 51
X = 52
C = 53
V = 54
B = 55
N = 56
M = 57
CommaAndLessThan = 58
PeriodAndBiggerThan = 59
SlashAndQuestionMark = 60
LeftCtrl = 61
LeftGui = 62
LeftAlt = 63
Lang2 = 64
Space = 65
Lang1 = 66
International2 = 67
RightAlt = 68
RightGui = 69
Application = 70
LedProgramming = 71
Brightness = 72
F12 = 73
PrintScreen = 74
ScrollLock = 75
PauseBreak = 76
Insert = 77
Home = 78
PageUp = 79
BracketRight = 80
Backslash = 81
NonUsTilde = 82
Enter = 83
International1 = 84
EqualsAndPlus = 85
International3 = 86
Backspace = 87
Delete = 88
End = 89
PageDown = 90
RightShift = 91
RightCtrl = 92
UpArrow = 93
LeftArrow = 94
DownArrow = 95
RightArrow = 96
WinLock = 97
Mute = 98
Stop = 99
ScanPreviousTrack = 100
PlayPause = 101
ScanNextTrack = 102
NumLock = 103
KeypadSlash = 104
KeypadAsterisk = 105
KeypadMinus = 106
KeypadPlus = 107
KeypadEnter = 108
Keypad7 = 109
Keypad8 = 110
Keypad9 = 111
KeypadComma = 112
Keypad4 = 113
Keypad5 = 114
Keypad6 = 115
Keypad1 = 116
Keypad2 = 117
Keypad3 = 118
Keypad0 = 119
KeypadPeriodAndDelete = 120
G1 = 121
G2 = 122
G3 = 123
G4 = 124
G5 = 125
G6 = 126
G7 = 127
G8 = 128
G9 = 129
G10 = 130
VolumeUp = 131
VolumeDown = 132
MR = 133
M1 = 134
M2 = 135
M3 = 136
G11 = 137
G12 = 138
G13 = 139
G14 = 140
G15 = 141
G16 = 142
G17 = 143
G18 = 144
International5 = 145
International4 = 146
Fn = 147
# Mouse leds
CLM_1 = 148
CLM_2 = 149
CLM_3 = 150
CLM_4 = 151
# Headset leds
CLH_LeftLogo = 152
CLH_RightLogo = 153
Logo = 154
CLI_Last = 154
CLK._member_map_['1'] = CLK._1
CLK._member_map_['2'] = CLK._2
CLK._member_map_['3'] = CLK._3
CLK._member_map_['4'] = CLK._4
CLK._member_map_['5'] = CLK._5
CLK._member_map_['6'] = CLK._6
CLK._member_map_['7'] = CLK._7
CLK._member_map_['8'] = CLK._8
CLK._member_map_['9'] = CLK._9
CLK._member_map_['0'] = CLK._0
class CDT(CEnum):
"""
Enumeration containing available device types.
"""
Unknown = 0
Mouse = 1
Keyboard = 2
Headset = 3
class CPL(CEnum):
"""
Enumeration containing available device types.
"""
Invalid = 0 # dummy value
# valid values for keyboard
US = 1
UK = 2
BR = 3
JP = 4
KR = 5
# valid values for mouse
Zones1 = 6
Zones2 = 7
Zones3 = 8
Zones4 = 9
class CLL(CEnum):
"""
Enumeration containing available logical layouts for keyboards.
"""
Invalid = 0 # dummy value
US_Int = 1
NA = 2
EU = 3
UK = 4
BE = 5
BR = 6
CH = 7
CN = 8
DE = 9
ES = 10
FR = 11
IT = 12
ND = 13
RU = 14
JP = 15
KR = 16
TW = 17
MEX = 18
# contains list of device capabilities
class CDC(with_metaclass(KeywordMeta, CEnum)):
_None = 0
Lighting = 1
CDC._member_map_['None'] = CDC._None
# contains list of available SDK access modes
class CAM(CEnum):
ExclusiveLightingControl = 0
# contains shared list of all errors which could happen during calling of Corsair* functions
class CE(CEnum):
# if previously called function completed successfully
Success = 0
# CUE is not running or was shut down or third-party control is disabled in CUE settings(runtime error)
ServerNotFound = 1
# if some other client has or took over exclusive control (runtime error)
NoControl = 2
# if developer did not perform protocol handshake(developer error)
ProtocolHandshakeMissing = 3
# if developer is calling the function that is not supported by the server (either because protocol has broken by
# server or client or because the function is new and server is too old. Check CorsairProtocolDetails for details)
# (developer error)
IncompatibleProtocol = 4
# if developer supplied invalid arguments to the function (for specifics look at function descriptions).
# (developer error)
InvalidArguments = 5
```
#### File: cue_sdk/examples/color_pulse.py
```python
from __future__ import division
import win32api
import win32con
import time
import sys
from cue_sdk import *
def range_float(start, stop, step):
while start < stop:
yield start
start += step
def get_available_keys():
colors = []
for device_id in range(cue.get_device_count()):
device_info = cue.get_device_info(device_id)
if device_info.type == CDT.Mouse:
for x in range(device_info.physicalLayout.value - CPL.Zones1.value + 1):
led_id = CLK(CLK.CLM_1.value + x)
colors.append(CorsairLedColor(led_id, 0, 0, 0))
elif device_info.type == CDT.Keyboard:
led_positions = cue.get_led_positions(device_id)
for led in led_positions.pLedPosition:
colors.append(CorsairLedColor(led.ledId, 0, 0, 0))
elif device_info.type == CDT.Headset:
colors.append(CorsairLedColor(CLK.CLH_LeftLogo, 0, 0, 0))
colors.append(CorsairLedColor(CLK.CLH_RightLogo, 0, 0, 0))
return colors
def perform_pulse_effect(colors, wave_duration, time_per_frame):
for x in range_float(0, 2, time_per_frame / wave_duration):
val = (1 - pow(x - 1, 2)) * 255
for led in colors:
led.g = int(val)
cue.set_led_colors_async(colors)
time.sleep(time_per_frame / 1000)
def main():
wave_duration = 500
time_per_frame = 25
colors = get_available_keys()
if not colors:
return
print("Working... Use \"KP_PLUS\" or \"KP_MINUS\" to increase or decrease speed.\nPress Escape to close program")
while not win32api.GetAsyncKeyState(win32con.VK_ESCAPE):
perform_pulse_effect(colors, wave_duration, time_per_frame)
if win32api.GetAsyncKeyState(win32con.VK_ADD) and wave_duration > 100:
wave_duration -= 100
print(wave_duration)
if win32api.GetAsyncKeyState(win32con.VK_SUBTRACT) and wave_duration < 2000:
wave_duration += 100
print(wave_duration)
time.sleep(0.025)
if __name__ == "__main__":
# To determine whether or not we are using a 64-bit version of Python,
# we will check sys.maxsize. 64-bit Python will have a maxsize value of
# 9223372036854775807, while 32-bit Python will have a mazsize value of
# 2147483647.
if sys.maxsize == 9223372036854775807:
cue = CUESDK("CUESDK.x64_2015.dll")
else:
cue = CUESDK("CUESDK_2015.dll")
cue.request_control(CAM.ExclusiveLightingControl)
main()
``` |
{
"source": "10se1ucgo/escapists-map-tools",
"score": 3
} |
#### File: 10se1ucgo/escapists-map-tools/run.py
```python
import sys
import Tkinter
import tkFileDialog
import maptools
try:
import _winreg as winreg
winmachine = True
except ImportError:
winmachine = False
try:
import argparse
argparse_enabled = True
except ImportError:
argparse_enabled = False
def main():
if argparse_enabled:
commandln()
else:
print "argparse not found, using GUI instead"
gui()
def commandln():
parser = argparse.ArgumentParser(description='Escapists Map Tools')
parsegroup = parser.add_mutually_exclusive_group()
parsegroup.add_argument("-dc", "--decompile", type=str, nargs=2, metavar=('map_file.map', 'encryption_key'),
help="Decompiles a map with a given encryption key")
parsegroup.add_argument("-d", "--decrypt", type=str, nargs=2, metavar=('map_file.map', 'encryption_key'),
help="Decrypts a map with a given encryption key")
parsegroup.add_argument("-e", "--encrypt", type=str, nargs=2, metavar=('map_file.map', 'encryption_key'),
help="Encrypts a map with a given encryption key")
args = parser.parse_args()
if args.decompile != None:
maptools.decompilemap(args.decompile[0], args.decompile[1])
elif args.decrypt != None:
maptools.decryptmap(args.decrypt[0], args.decrypt[1])
elif args.encrypt != None:
maptools.encryptmap(args.encrypt[0], args.encrypt[1])
else:
print "No command line arguments, using GUI."
gui()
class maptoolsgui:
def __init__(self, winname):
self.winname = winname
winname.title("Escapists Map Tools")
self.label = Tkinter.Label(winname, text="Select an action")
self.label.pack(side="top")
self.decompmap = Tkinter.Button(winname, text="Decompile a map", command=lambda: self.enterkey(1))
self.decompmap.pack(side="left")
self.dcryptmap = Tkinter.Button(winname, text="Decrypt a map", command=lambda: self.enterkey(2))
self.dcryptmap.pack(side="left")
self.ecryptmap = Tkinter.Button(winname, text="Encrypt a map", command=lambda: self.enterkey(3))
self.ecryptmap.pack(side="left")
def enterkey(self, somevariable):
if somevariable == 1:
self.action = 1
elif somevariable == 2:
self.action = 2
elif somevariable == 3:
self.action = 3
self.keylevel = Tkinter.Toplevel()
self.keylevel.title("Enter encryption key")
self.keylevel.protocol("WM_DELETE_WINDOW", self.winname.destroy)
self.keylevel.focus_set()
self.winname.withdraw()
self.label = Tkinter.Label(self.keylevel, text="Enter an encryption key (Default: mothking)")
self.label.pack()
self.entry = Tkinter.Entry(self.keylevel)
self.entry.pack()
self.entry.insert(0, "mothking")
self.okay = Tkinter.Button(self.keylevel, text="OK", command=self.dotheactionyo)
self.okay.pack(side="left")
self.cancel = Tkinter.Button(self.keylevel, text="Cancel", command=sys.exit)
self.cancel.pack(side="right")
def dotheactionyo(self):
self.encryptkey = self.entry.get()
self.keylevel.destroy()
self.filepath = tkFileDialog.askopenfilename(defaultextension=".map",
filetypes=[('Escapists map file', '*.map'),
('Escapists project file', '*.proj'),
('All files', '*.*')],
initialdir=self.escapistspath())
if not self.filepath:
sys.exit()
elif self.action == 1:
maptools.decompilemap(self.filepath, self.encryptkey)
elif self.action == 2:
maptools.decryptmap(self.filepath, self.encryptkey)
elif self.action == 3:
maptools.encryptmap(self.filepath, self.encryptkey)
def escapistspath(self):
if winmachine:
try:
self.steam_reg_path = r"Software\Valve\Steam"
self.reg_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, self.steam_reg_path, 0, winreg.KEY_READ)
self.value, regtype = winreg.QueryValueEx(self.reg_key, r"SteamPath")
return self.value + r"/steamapps/common/The Escapists/Data/Maps"
except WindowsError:
return None
else:
return None
def gui():
window = Tkinter.Tk()
maptoolsgui(window)
window.mainloop()
if __name__ == '__main__':
main()
``` |
{
"source": "10sr/archsh_py",
"score": 2
} |
#### File: archsh_py/archsh/execute.py
```python
handlers = []
from archsh.handler.tar import TAR, TGZ, TBZ, TXZ
handlers.extend([TAR, TGZ, TBZ, TXZ])
from archsh.handler.zip import ZIP
handlers.append(ZIP)
from archsh.handler.p7z import P7Z
handlers.append(P7Z)
from posixpath import normpath, join
from subprocess import call
from os.path import join as osjoin, basename as osbasename, dirname
from os import mkdir, makedirs, access, F_OK
from shutil import move
from tempfile import mkdtemp, TemporaryDirectory as TempDir
from random import random
try : # this function is available only after 3.3
from shutil import get_terminal_size
except ImportError:
def get_terminal_size():
from os import getenv
return (getenv("COLUMNS") or "80", getenv("LINES") or "24")
def mk_temp_dir(dir, prefix):
def randstr():
return hex(hash(random()))[2:7]
base = osjoin(dir, prefix)
while True:
try:
newdir = base + randstr()
mkdir(newdir)
return newdir
except OSError:
pass
class Execute():
handler = None
outdir = None
cachedir = None
def __init__(self, env):
for h in handlers:
for s in h.suffixes:
if env.find_suffix(s):
self.handler = h(env.file)
break
if self.handler:
break
if self.handler:
env.set_list(self.handler.get_list())
self.env = env
return
def finalize(self):
"""Remove cache diretory."""
return
def conv_path(self, args):
"""Convert pathes so that they can be passed to handler."""
r = [normpath(join(self.env.pwd(), f)).lstrip("/") for f in args]
return r
def run_cd(self, args):
if len(args) == 0:
self.env.set_dir()
elif self.env.set_dir(args[0]) == None:
print("cd: dir {} not found.".format(args[0]))
return
def run_ls(self, args, all=True):
flist = sorted(self.env.current) # copied list
if not all:
flist = [f for f in flist if not f.startswith(".")]
num = len(flist)
if num == 0:
return
size = get_terminal_size()
col = int(size[0]) - 1
m = max([len(e) for e in flist]) + 1
items = [f.ljust(m) for f in flist]
prow, rem = divmod(col, m)
rows = num // prow
if rem != 0 or rows == 0:
rows += 1
lines = [""] * rows # what is the best way to make list?
i = 0
for f in items:
lines[i] += f
if i == rows - 1:
i = 0
else:
i += 1
for l in lines:
print(l)
return
def run_get(self, files, path=False, force=False):
def make_file(path, dst, use_stream, arg, force=False):
"""Create file."""
try:
makedirs(dirname(dst))
except OSError:
pass
if access(dst, F_OK) and force == False:
print("'{}' alread exists.".format(dst))
else:
if use_stream:
fo = open(dst, mode="wb")
fo.write(arg.read())
fo.close()
else:
move(arg, dst)
print("'{}' -> '{}'".format(path, dst))
if use_stream:
arg.close()
return
def make_out_path(f, d=None):
if d:
return osjoin(d, f)
else:
return osjoin(".", osbasename(f))
afiles = self.conv_path(files)
with TempDir(prefix="archsh-") as tempdir:
try:
r = self.handler.extract_files(afiles, tempdir)
if r:
use_stream = False
except NotImplementedError:
try:
r = self.handler.cat_files(afiles)
if r:
use_stream = True
except NotImplementedError:
print("That operation is not supported for this archive.")
return
if path:
if not self.outdir:
self.outdir = mk_temp_dir(prefix=self.env.basename + "-",
dir=".")
outdir = self.outdir
else:
outdir = None
for f, out in r:
dst = make_out_path(f, outdir)
make_file(f, dst, use_stream, out, force)
return
def run_pager_from_stream(self, program ,l):
for f, out in l:
if program == "cat":
for l in out:
print(l.decode(), end="")
else:
call([program], stdin=out)
out.close()
return
def run_pager_from_file(self, program, l):
if program == "cat":
for f, out in l:
with open(out, "r") as fo:
for l in fo:
print(l, end="")
else:
call([program] + [i[1] for i in l])
return
def run_pager(self, files, program):
# should use temp file and open at once?
afiles = self.conv_path(files)
try:
r = self.handler.cat_files(afiles)
if r:
self.run_pager_from_stream(program, r)
except NotImplementedError:
with TempDir(prefix="archsh-") as tempdir:
try:
r = self.handler.extract_files(afiles, tempdir)
if r:
self.run_pager_from_file(program, r)
except NotImplementedError:
print("That operation is not supported for this archive.")
return
def run_editor(self, files, program):
print("Modifying file is not supported yet...")
return
```
#### File: archsh_py/archsh/__init__.py
```python
from os.path import isfile, isdir
from archsh.archcmd import ArchCmd
from archsh.execute import Execute
from archsh.environ import Environ
class Archsh():
def __init__(self, filename, type_=None):
if isdir(filename):
raise OSError("{} is a directory".format(filename))
elif not isfile(filename):
raise OSError("{} not found".format(filename))
self.filename = filename
self.e = Environ(filename, type_)
self.x = Execute(self.e)
self.c = ArchCmd(self.e, self.x)
# for e in self.e.list:
# print(e)
return
def main(self):
if self.x.handler:
try:
self.c.cmdloop()
finally:
self.x.finalize()
else:
print("No handler found for {}.".format(self.e.file))
return
def main(filename, type_=None):
a = Archsh(filename, type_)
return a.main()
__version__ = "0.0.4"
``` |
{
"source": "10sr/dowwner",
"score": 2
} |
#### File: dowwner/tests/views.py
```python
from django.test import TestCase
from django.urls import reverse
class ViewTests(TestCase):
def test_index(self):
response = self.client.get(reverse("dowwner:index"))
self.assertEqual(response.status_code, 200)
return
``` |
{
"source": "10sr/flake8-no-implicit-concat",
"score": 2
} |
#### File: 10sr/flake8-no-implicit-concat/setup.py
```python
from setuptools import setup
def _get_version() -> str:
with open("flake8_no_implicit_concat/_version.py") as f:
for line in f:
if line.startswith("__version__"):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
raise RuntimeError("Unable to find version string.")
setup(version=_get_version())
```
#### File: flake8-no-implicit-concat/tests/test_checker.py
```python
import ast
import tokenize
import unittest
from io import BytesIO
from typing import Iterable
from flake8_no_implicit_concat import Checker
def _tokenize(input_: str) -> Iterable[tokenize.TokenInfo]:
return tokenize.tokenize(BytesIO(input_.encode("utf-8")).readline)
class TestChecker(unittest.TestCase):
"""Test NIC Chekcer."""
def test_noerror(self) -> None:
"""Test checker with valid input."""
input_ = "a = 'aaa'"
checker = Checker(ast.parse(input_), _tokenize(input_))
result = checker.run()
self.assertEqual(len(list(result)), 0)
return
def test_error(self) -> None:
"""Test checker with invalid input."""
# Contents of results are checked in run_flake8/
input_ = "a = 'aaa' 'bbb'"
checker = Checker(ast.parse(input_), _tokenize(input_))
result = checker.run()
self.assertEqual(len(list(result)), 1)
return
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "10sr/junks",
"score": 3
} |
#### File: ansible/filter_plugins/decrypt.py
```python
import base64
from cryptography.fernet import Fernet
import hashlib
def _enc(data, key):
key_32 = base64.urlsafe_b64encode(hashlib.sha512(key).digest()[:32])
f = Fernet(key_32)
return f.encrypt(data)
def _dec(b, key):
key_32 = base64.urlsafe_b64encode(hashlib.sha512(key).digest()[:32])
f = Fernet(key_32)
return f.decrypt(b)
def decrypt_filter(data, key):
return _dec(data.encode("utf-8"), key.encode("utf-8")).decode("utf-8")
class FilterModule (object):
def filters(self):
return {
"decrypt": decrypt_filter
}
if __name__ == "__main__":
import sys
print(_enc(sys.argv[1].encode("utf-8"), sys.argv[2]))
```
#### File: ansible/library/yaml_file.py
```python
import os
import yaml
# import module snippets
from ansible.module_utils.basic import AnsibleModule
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
class _TaskFailedException(Exception):
def __init__(self, rc, msg):
self.rc = rc
self.msg = msg
return
def do_yaml(module, filename, data, indent_size=2, backup=False, create=False):
orig_data = None
if not os.path.exists(filename):
if not create:
raise _TaskFailedException(
257, 'Destination {} does not exist !'.format(filename)
)
destdir = os.path.dirname(filename)
if not os.path.exists(destdir) and not module.check_mode:
os.makedirs(destdir)
else:
try:
with open(filename) as f:
orig_data = yaml.safe_load(f)
except yaml.error.YAMLError:
# Ignore parse error (and possibly overwrite its content)
pass
if orig_data == data:
changed = False
msg = "OK"
else:
changed = True
msg = "Data changed"
backup_file = None
if changed and not module.check_mode:
if backup:
backup_file = module.backup_local(filename)
with open(filename, "w") as f:
yaml.dump(
data, f,
explicit_start=True,
default_flow_style=False,
indent=indent_size
)
return {
'changed': changed,
'msg': msg,
'backup_file': backup_file
}
# ==============================================================
# main
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(required=True, aliases=['dest'], type='path'),
data=dict(required=True, aliases=['content'], type='raw'),
indent_size=dict(default=2, type='int'),
backup=dict(default=False, type='bool'),
create=dict(default=True, type='bool')
),
add_file_common_args=True,
supports_check_mode=True
)
path = module.params['path']
data = module.params['data']
indent_size = module.params['indent_size']
backup = module.params['backup']
create = module.params['create']
try:
result = do_yaml(
module=module,
filename=path,
data=data,
indent_size=indent_size,
backup=backup,
create=create
)
if not module.check_mode and os.path.exists(path):
file_args = module.load_file_common_arguments(module.params)
result['changed'] = module.set_fs_attributes_if_different(
file_args, result['changed']
)
except _TaskFailedException as e:
module.fail_json(
rc=e.rc,
msg=e.msg
)
return
module.exit_json(
changed=result['changed'],
msg=result['msg'],
path=path,
backup_file=result['backup_file']
)
return
if __name__ == '__main__':
main()
```
#### File: .github/workflows/check-latest-actions.py
```python
import os
import sys
from contextlib import suppress
from pathlib import Path
from typing import cast, DefaultDict, List, NamedTuple, Optional, Set
import requests
import yaml
from termcolor import colored
API_URL = "https://api.github.com/graphql"
API_TOKEN = os.getenv("GITHUB_TOKEN")
GQL_LATEST_RELEASE = """
query getLatestRelease($owner: String!, $name: String!) {
repository(owner: $owner, name: $name) {
releases(last: 1) {
nodes {
tagName
}
}
}
}
"""
try:
YAML_LOADER = yaml.CSafeLoader
except AttributeError:
YAML_LOADER = yaml.SafeLoader # type: ignore
class VersionDiff(NamedTuple):
action: str
current_version: Optional[str]
latest_version: str
@property
def raw_action(self) -> str:
return (
f"{self.action}@{self.current_version}"
if self.current_version
else self.action
)
def check_latest_version(
session: requests.Session, *, value: str
) -> Optional[VersionDiff]:
current_version: Optional[str] = None
if "@" in value:
action, current_version = value.split("@", 1)
else:
action = value
if "/" not in value:
print(
colored(f"ERROR: Wrong action {value}. Skip...", "red"),
file=sys.stderr,
)
return None
owner, name = action.split("/", 1)
latest_version = get_latest_version(session, owner=owner, name=name)
if latest_version is None:
print(
colored(
f"ERROR: Unable to fetch latest version of {action}. Skip...",
"red",
),
file=sys.stderr,
)
return None
return VersionDiff(action, current_version, latest_version)
def get_latest_version(
session: requests.Session, *, owner: str, name: str
) -> Optional[str]:
with suppress(requests.RequestException, IndexError, KeyError, ValueError):
with session.post(
API_URL,
json={
"query": GQL_LATEST_RELEASE,
"variables": {"owner": owner, "name": name},
},
) as response:
response.raise_for_status()
data = response.json()
return cast(
str,
data["data"]["repository"]["releases"]["nodes"][0]["tagName"],
)
return None
def print_diff(diff: VersionDiff, paths: List[Path]) -> None:
if diff.current_version == diff.latest_version:
print(
f"{colored(diff.raw_action, 'green', attrs=['bold'])} is "
"up-to-date\n"
)
return
current_label = (
f" (current version: {colored(diff.current_version, 'red')})"
if diff.current_version
else ""
)
print(
f"{colored(diff.action, 'yellow', attrs=['bold'])} is outdated. "
f"Latest version: {colored(diff.latest_version, 'green')}"
f"{current_label}"
)
print("Affected files:")
for path in sorted(item.absolute() for item in paths):
print(f" - {path}")
print("")
def process_workflow_yaml(path: Path) -> Optional[Set[str]]:
if not path.exists():
return None
try:
content = yaml.load(path.read_bytes(), Loader=YAML_LOADER)
except ValueError:
return None
actions: Set[str] = set()
for job in (content.get("jobs") or {}).values():
for step in job.get("steps") or {}:
maybe_action = step.get("uses")
if maybe_action is None:
continue
actions.add(maybe_action)
return actions
def main(argv: List[str] = None) -> int:
if argv is None:
argv = sys.argv[1:]
if not argv:
print(
colored(
"Usage: check-latest-actions.py WORKFLOW_YAML [WORKFLOW_YAML]",
"red",
),
file=sys.stderr,
)
return 1
if not API_TOKEN:
print(
colored("ERROR: GITHUB_TOKEN env var is empty. Exit...", "red"),
file=sys.stderr,
)
return 1
all_actions: Set[str] = set()
storage: DefaultDict[str, List[Path]] = DefaultDict(list)
for item in argv:
path = Path(item)
maybe_actions = process_workflow_yaml(path)
if maybe_actions is None:
print(
colored(
f"ERROR: Unable to process workflow config at {item}. "
"Exit...",
"red",
),
file=sys.stderr,
)
return 1
all_actions.update(maybe_actions)
for action in maybe_actions:
storage[action].append(path)
with requests.session() as session:
session.headers["Authorization"] = f"Bearer {API_TOKEN}"
for item in sorted(all_actions):
diff = check_latest_version(session, value=item)
if diff is None:
continue
print_diff(diff, storage[diff.raw_action])
return 0
if __name__ == "__main__":
raise SystemExit(main())
```
#### File: python/dataclass/main2.py
```python
from __future__ import annotations
from typing import Optional
import dataclasses
from pprint import pprint
import typing
@dataclasses.dataclass
class BClass():
aattr: str
battr: int = 2
def __init__(self):
self.aattr = "hoe"
return
b = BClass()
print(b)
```
#### File: management/commands/countsleep.py
```python
from django.core.management.base import BaseCommand, CommandError
from app.models import TimeOfSleep
class Command(BaseCommand):
help = "Count sleep record num"
def add_arguments(self, parser):
return
def handle(self, *args, **kargs):
self.stdout.write(args)
self.stdout.write(kargs)
self.stdout.write(TimeOfSleep.objects.count())
return
```
#### File: python/paramiko/main.py
```python
from paramiko import SSHClient, AutoAddPolicy
import os
import sys
HOST = "v133-130-91-27.a020.g.tyo1.static.cnode.io"
PORT = 22
USER = os.environ["USERNAME"]
COMMAND = "hostname; sleep 1; echo 1; echo 2 1>&2; sleep 1; echo 3"
def main():
ssh = SSHClient()
ssh.set_missing_host_key_policy(AutoAddPolicy())
ssh.connect(HOST, PORT, USER)
chan = ssh.get_transport().open_session()
chan.set_combine_stderr(True)
chan.exec_command(COMMAND)
data = chan.recv(1)
while data:
sys.stdout.write(data)
data = chan.recv(1)
exit_status = chan.recv_exit_status()
print("Exit: {}".format(exit_status))
ssh.close()
return
if __name__ == "__main__":
sys.exit(main())
```
#### File: python/py_gtk/menu.py
```python
import pygtk
pygtk.require('2.0')
import gtk
class MenuExample:
def __init__(self):
# create a new window
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
window.set_size_request(200, 100)
window.set_title("GTK Menu Test")
window.connect("delete_event", lambda w,e: gtk.main_quit())
# Init the menu-widget, and remember -- never
# show() the menu widget!!
# This is the menu that holds the menu items, the one that
# will pop up when you click on the "Root Menu" in the app
menu = gtk.Menu()
# Next we make a little loop that makes three menu-entries for
# "test-menu". Notice the call to gtk_menu_append. Here we are
# adding a list of menu items to our menu. Normally, we'd also
# catch the "clicked" signal on each of the menu items and setup a
# callback for it, but it's omitted here to save space.
for i in range(3):
# Copy the names to the buf.
buf = "Test-undermenu - %d" % i
# Create a new menu-item with a name...
menu_items = gtk.MenuItem(buf)
# ...and add it to the menu.
menu.append(menu_items)
# Do something interesting when the menuitem is selected
menu_items.connect("activate", self.menuitem_response, buf)
# Show the widget
menu_items.show()
# This is the root menu, and will be the label
# displayed on the menu bar. There won't be a signal handler attached,
# as it only pops up the rest of the menu when pressed.
root_menu = gtk.MenuItem("Root Menu")
root_menu.show()
# Now we specify that we want our newly created "menu" to be the
# menu for the "root menu"
root_menu.set_submenu(menu)
# A vbox to put a menu and a button in:
vbox = gtk.VBox(False, 0)
window.add(vbox)
vbox.show()
# Create a menu-bar to hold the menus and add it to our main window
menu_bar = gtk.MenuBar()
vbox.pack_start(menu_bar, False, False, 2)
menu_bar.show()
# Create a button to which to attach menu as a popup
button = gtk.Button("press me")
button.connect_object("event", self.button_press, menu)
vbox.pack_end(button, True, True, 2)
button.show()
# And finally we append the menu-item to the menu-bar -- this is the
# "root" menu-item I have been raving about =)
menu_bar.append (root_menu)
# always display the window as the last step so it all splashes on
# the screen at once.
window.show()
def posfunc(menu, data=None):
pos = button.get_pointer()
return (pos[0], pos[1], True)
menu.popup(None, None, posfunc, 1, 0, None)
# Respond to a button-press by posting a menu passed in as widget.
#
# Note that the "widget" argument is the menu being posted, NOT
# the button that was pressed.
def button_press(self, widget, event):
if event.type == gtk.gdk.BUTTON_PRESS:
widget.popup(None, None, None, event.button, event.time)
# Tell calling code that we have handled this event the buck
# stops here.
return True
# Tell calling code that we have not handled this event pass it on.
return False
# Print a string when a menu item is selected
def menuitem_response(self, widget, string):
print "%s" % string
def main():
gtk.main()
return 0
if __name__ == "__main__":
MenuExample()
main()
```
#### File: junks/python/reverse.py
```python
def reverse(sec):
return _reverse([], sec)
def _reverse(ret, rest):
if not rest:
return ret
first = rest[0]
rest = rest[1:]
return _reverse([first] + ret, rest)
def reverse_for(seq):
ret = []
for e in seq:
ret = [e] + ret
return ret
if __name__ == "__main__":
print(reverse_for([1, 2, 3, 45, 6]))
```
#### File: python/tpg_try/tpg_example1.py
```python
import math
import operator
import string
import tpg
if tpg.__python__ == 3:
operator.div = operator.truediv
raw_input = input
def make_op(op):
return {
'+' : operator.add,
'-' : operator.sub,
'*' : operator.mul,
'/' : operator.div,
'%' : operator.mod,
'^' : lambda x,y:x**y,
'**' : lambda x,y:x**y,
'cos' : math.cos,
'sin' : math.sin,
'tan' : math.tan,
'acos': math.acos,
'asin': math.asin,
'atan': math.atan,
'sqr' : lambda x:x*x,
'sqrt': math.sqrt,
'abs' : abs,
'norm': lambda x,y:math.sqrt(x*x+y*y),
}[op]
class Calc(tpg.Parser, dict):
r"""
separator space '\s+' ;
token pow_op '\^|\*\*' $ make_op
token add_op '[+-]' $ make_op
token mul_op '[*/%]' $ make_op
token funct1 '(cos|sin|tan|acos|asin|atan|sqr|sqrt|abs)\b' $ make_op
token funct2 '(norm)\b' $ make_op
token real '(\d+\.\d*|\d*\.\d+)([eE][-+]?\d+)?|\d+[eE][-+]?\d+' $ float
token integer '\d+' $ int
token VarId '[a-zA-Z_]\w*' ;
START/e ->
'vars' $ e=self.mem()
| VarId/v '=' Expr/e $ self[v]=e
| Expr/e
;
Var/$self.get(v,0)$ -> VarId/v ;
Expr/e -> Term/e ( add_op/op Term/t $ e=op(e,t)
)*
;
Term/t -> Fact/t ( mul_op/op Fact/f $ t=op(t,f)
)*
;
Fact/f ->
add_op/op Fact/f $ f=op(0,f)
| Pow/f
;
Pow/f -> Atom/f ( pow_op/op Fact/e $ f=op(f,e)
)?
;
Atom/a ->
real/a
| integer/a
| Function/a
| Var/a
| '\(' Expr/a '\)'
;
Function/y ->
funct1/f '\(' Expr/x '\)' $ y = f(x)
| funct2/f '\(' Expr/x1 ',' Expr/x2 '\)' $ y = f(x1,x2)
;
"""
def mem(self):
vars = sorted(self.items())
memory = [ "%s = %s"%(var, val) for (var, val) in vars ]
return "\n\t" + "\n\t".join(memory)
print("Calc (TPG example)")
calc = Calc()
while 1:
l = raw_input("\n:")
if l:
try:
print(calc(l))
except Exception:
print(tpg.exc())
else:
break
``` |
{
"source": "10sr/kvkv",
"score": 2
} |
#### File: kvkv/tests/tests.py
```python
import unittest
from django.utils import timezone
from django.test import TestCase
from django.urls import reverse
from app import models
class TestFailure(TestCase):
@unittest.skip("demonstrating failing and skipping")
def test_failure(self):
self.assertEqual(True, False)
return
``` |
{
"source": "10sr/pyltsv",
"score": 2
} |
#### File: 10sr/pyltsv/setup.py
```python
from setuptools import setup
def _get_version():
with open("pyltsv/_version.py") as f:
for line in f:
if line.startswith("__version__"):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
raise RuntimeError("Unable to find version string.")
setup(
version=_get_version(),
)
```
#### File: pyltsv/tests/test_read.py
```python
import unittest
from typing import List
from typing import Optional
from typing import Text
from typing import Tuple
from parameterized import parameterized
from six import BytesIO
from six import StringIO
import pyltsv
from pyltsv.read import BytesLineParser
from pyltsv.read import StrLineParser
class TestReader(unittest.TestCase):
"""Test reader."""
def test_iter(self):
# type: () -> None
"""Test basic usage of reader."""
f = StringIO(u"a:1\tb:2\n\na:3\tb:4\n")
ret = list(pyltsv.reader(f))
self.assertEqual(len(ret), 3)
self.assertEqual(list(ret[0]), [(u"a", u"1"), (u"b", u"2")])
self.assertEqual(list(ret[1]), [])
self.assertEqual(list(ret[2]), [(u"a", u"3"), (u"b", u"4")])
return
def test_readline(self):
# type: () -> None
"""Test readline of reader."""
f = StringIO(u"a:1\tb:2\n\na:3\tb:4\n")
r = pyltsv.reader(f)
self.assertEqual(list(r.readline() or []), [(u"a", u"1"), (u"b", u"2")])
self.assertEqual(list(r.readline() or []), [])
self.assertEqual(list(r.readline() or []), [(u"a", u"3"), (u"b", u"4")])
self.assertEqual(r.readline(), None) # end of file
return
def test_invalid_strict_setup(self):
# type: () -> None
"""Test invalid setup of strict mode."""
f = StringIO(u"a:1\tb:2\n\na:3\tb:4\n")
with self.assertRaises(pyltsv.ParserConfigError):
_ = pyltsv.reader(f, strict=True, delimiter=",")
return
class TestBreader(unittest.TestCase):
"""Test breader."""
def test_iter(self):
# type: () -> None
"""Test basic usage of breader."""
f = BytesIO(b"a:1\tb:2\n\na:3\tb:4\n")
ret = list(pyltsv.breader(f))
self.assertEqual(len(ret), 3)
self.assertEqual(list(ret[0]), [(b"a", b"1"), (b"b", b"2")])
self.assertEqual(list(ret[1]), [])
self.assertEqual(list(ret[2]), [(b"a", b"3"), (b"b", b"4")])
return
class TestStrLineParser(unittest.TestCase):
"""Test StrLineParser."""
@parameterized.expand(
[
("basic", u"a:1\tb:2\n", [(u"a", u"1"), (u"b", u"2")]),
("crlf", u"a:1\tb:2\r\n", [(u"a", u"1"), (u"b", u"2")]),
("empty", u"\n", []),
("labelonly", u"a\n", [(u"a", "")]),
("emptyvalue", u"a:\n", [(u"a", u"")]),
("emptykey", u":1\n", [(u"", u"1")]),
(
"blankfield",
u"\ta:1\t\tb:2\n",
[(u"a", u"1"), (u"b", u"2")],
),
# When directly passed newlines are allowed when strict=False
(
"newlineinside",
u"\ta:1\n\t\tb:2\n",
[(u"a", u"1\n"), (u"b", u"2")],
),
]
)
def test_parse(self, name, input, expected):
# type: (str, Text, List[Tuple[Text, Optional[Text]]]) -> None
"""Test basic usage of parse.
:param name: Name of this parameter
:param input: Input LTSV line
:param expected: Expected parsed result
"""
actual = StrLineParser().parse(input)
self.assertEqual(list(actual), expected)
return
@parameterized.expand(
[
("basic", u"a=1,b=3|", [(u"a", u"1"), (u"b", u"3")]),
]
)
def test_parse_custom_params(self, name, input, expected):
# type: (str, Text, List[Tuple[Text, Optional[Text]]]) -> None
"""Test parser with custom parameters.
:param name: Name of this parameter
:param input: Input line
:param expected: Expected parsed result
"""
actual = StrLineParser(delimiter=u",", labeldelimiter=u"=", eols=(u"|",)).parse(
input
)
self.assertEqual(list(actual), expected)
return
def test_parse_strict_invalid_config(self):
# type: () -> None
"""Test parser with strict mode enabled and invalid config given."""
with self.assertRaises(StrLineParser.ParserConfigError):
_ = StrLineParser(strict=True, delimiter=u",")
return
@parameterized.expand(
[
("basic", u"a:1\tb:2\n", [(u"a", u"1"), (u"b", u"2")]),
("crlf", u"a:1\tb:2\r\n", [(u"a", u"1"), (u"b", u"2")]),
("empty", u"\n", []),
("emptyvalue", u"a:\n", [(u"a", u"")]),
("colonvalue", u"a::\n", [(u"a", u":")]),
]
)
def test_parse_strict_ok(self, name, input, expected):
# type: (str, Text, List[Tuple[Text, Optional[Text]]]) -> None
"""Test parser with strict mode enabled and invalid config given.
:param name: Name of this parameter
:param input: Input line
:param expected: Expected parsed result
"""
actual = StrLineParser(strict=True).parse(input)
self.assertEqual(list(actual), expected)
return
@parameterized.expand(
[
("emptyfield", u"a:1\t\tb:2\n", StrLineParser.EmptyFieldParseError),
("labelonly", u"a:1\tb\n", StrLineParser.LabelOnlyParseError),
("startswithtab", u"\ta:1\t\tb:2\n", StrLineParser.EmptyFieldParseError),
("emptylabel", u"a:1\t:2\n", StrLineParser.InvalidLabelParseError),
("invalidlabel", u"^:1\tb:2\n", StrLineParser.InvalidLabelParseError),
("invalidlabel", u"a:1\tあ:2\n", StrLineParser.InvalidLabelParseError),
("invalidvalue", u"a:1\n\tb:2\n", StrLineParser.InvalidValueParseError),
]
)
def test_parse_strict_err(self, name, input, expected_err):
# type: (str, Text, StrLineParser.ParseError) -> None
"""Test parser with strict mode enabled and invalid input given.
:param name: Name of this parameter
:param input: Input line
:param expected_err: Expected Error
"""
parser = StrLineParser(strict=True)
with self.assertRaises(expected_err): # type: ignore
_ = parser.parse(input)
return
class TestBytesLineParser(unittest.TestCase):
"""Test BytesLineParser."""
def test_parse(self):
# type: () -> None
"""Test basic usage of parse."""
actual = BytesLineParser().parse(b"a:1\tb:2\n")
self.assertEqual(list(actual), [(b"a", b"1"), (b"b", b"2")])
return
@parameterized.expand(
[
("emptyfield", b"a:1\t\tb:2\n", BytesLineParser.EmptyFieldParseError),
("labelonly", b"a:1\tb\n", BytesLineParser.LabelOnlyParseError),
("startswithtab", b"\ta:1\t\tb:2\n", BytesLineParser.EmptyFieldParseError),
("emptylabel", b"a:1\t:2\n", BytesLineParser.InvalidLabelParseError),
("invalidlabel", b"^:1\tb:2\n", BytesLineParser.InvalidLabelParseError),
(
"invalidlabel",
u"a:1\tあ:2\n".encode("utf-8"),
BytesLineParser.InvalidLabelParseError,
),
("invalidvalue", b"a:1\n\tb:2\n", BytesLineParser.InvalidValueParseError),
]
)
def test_parse_strict_err(self, name, input, expected_err):
# type: (str, bytes, BytesLineParser.ParseError) -> None
"""Test parser with strict mode enabled and invalid input given.
:param name: Name of this parameter
:param input: Input line
:param expected_err: Expected Error
"""
parser = BytesLineParser(strict=True)
with self.assertRaises(expected_err): # type: ignore
_ = parser.parse(input)
return
``` |
{
"source": "10sr/server-provisions",
"score": 3
} |
#### File: server-provisions/sakura/check_ansible_version.py
```python
import subprocess
import sys
import toml
def get_version_installed(module: str) -> str:
out = subprocess.check_output(
["pipenv", "run", "python", "-m", "pip", "freeze"], encoding="utf-8"
)
for l in out.split("\n"):
if l.startswith(module + "=="):
return l.split("==")[1]
raise Exception(f"Module {module} not found")
def get_version_required(module: str) -> str:
with open("Pipfile") as f:
obj = toml.load(f)
return obj["packages"]["ansible"].lstrip("=")
def main():
assert get_version_installed("ansible") == get_version_required("ansible")
return 0
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "10sr/tbar",
"score": 3
} |
#### File: tbar/tbar/__init__.py
```python
__version__ = "0.1"
import sys
from tbar.tbar import TBar
from tbar.reader import Reader
def main(infile, comment, sep, field, regexp,
max, length, vertical):
infile = infile or sys.stdin
r = Reader(infile=infile, comment=comment, sep=sep, field=field,
regexp=regexp)
b = TBar(_max=max, length=length, vertical=vertical)
b.add_data_itr(r.data)
s = str(b)
if s:
print(s)
else:
print("No data.")
return
```
#### File: tbar/tbar/tbar.py
```python
class TBar():
_max = -1
length = 50
infile = None
rawdata = None
normdata = None
vertical = False
def __init__(self, _max=0, length=0, vertical=False):
if _max:
self._max = _max
if length:
self.length = length
self.vertical = vertical
self.rawdata = []
self.normdata = []
return
def add_data_itr(self, itr):
# itr: iterable of (key, value), where key is str, value is float
self.rawdata.extend(itr)
return
def __str__(self):
if len(self.rawdata) == 0:
return ""
self.__set_normdata()
bars = []
maxkeylen = max(len(k) for k, v in self.normdata)
fillspace = " " * maxkeylen
if self.vertical:
sep = "-"
else:
sep = "|"
for k, v in self.normdata:
if self.vertical:
# reverse the string of key
k = k[::-1]
bars.append(
(fillspace + k)[-maxkeylen:] +
sep +
("*" * int(self.length * v) + " " * self.length)[:self.length] +
sep
)
# transpose
if self.vertical:
bars = zip(*bars)
bars = list("".join(e) for e in reversed(tuple(bars)))
# add scale strings
if self.vertical:
scalestr = str(self._max)
leftspaces = " " * len(scalestr)
for i in range(len(bars)):
if i == 0:
bars[i] = scalestr + bars[i]
else:
bars[i] = leftspaces + bars[i]
else:
bars.insert(0,
(" " * (maxkeylen + 1 + self.length)) + str(self._max))
return str("\n".join(bars))
def __set_max_from_data(self):
self._max = max(tuple(zip(*self.rawdata))[1])
return self._max
def __set_normdata(self):
if self._max == -1:
self.__set_max_from_data()
for k, v in self.rawdata:
self.normdata.append((k, v / self._max))
return
``` |
{
"source": "10sr/webtools",
"score": 2
} |
#### File: webtools/export_as_bookmark/apps.py
```python
from django.apps import AppConfig
from django.conf import settings
from .redis import Redis
class ExportAsBookmarkConfig(AppConfig): # type: ignore # disallow_subclassing_any
"""export_as_bookmark app definition."""
name = "export_as_bookmark"
label = "export_as_bookmark"
def ready(self) -> None:
"""Initialize app."""
Redis.get_instance().ready(settings.EXPORT_AS_BOOKMARK_REDIS_URL)
return
```
#### File: tests/export_as_bookmark/views.py
```python
from unittest import mock
from django.test import TestCase
from django.urls import reverse
from export_as_bookmark.redis import Redis
class ViewTests(TestCase):
"""View test."""
def _request_get(self, name, reverse_args=None, *args, **kargs):
return self.client.get(
reverse(f"export_as_bookmark:{name}", args=reverse_args), *args, **kargs
)
def _request_post(self, name, reverse_args=None, *args, **kargs):
return self.client.post(
reverse(f"export_as_bookmark:{name}", args=reverse_args), *args, **kargs
)
def test_index(self):
response = self._request_get("index")
self.assertEqual(response.status_code, 200)
return
@mock.patch.object(Redis, "get_instance")
def test_post(self, redis_mock):
body = """http://google.com
http://yahoo.co.jp
"""
response = self._request_post("post", None, {"body": body})
self.assertEqual(response.status_code, 302)
redis_mock.return_value.set.assert_called_once()
return
@mock.patch.object(Redis, "get_instance")
def test_done(self, redis_mock):
redis_mock.return_value.pttl.return_value = 1
response = self._request_get("done", ("12345", "export-name"))
self.assertEqual(response.status_code, 200)
redis_mock.return_value.pttl.assert_called_once_with("12345")
return
@mock.patch.object(Redis, "get_instance")
def test_download(self, redis_mock):
redis_mock.return_value.get.return_value = b"hoehoe"
response = self._request_get("download", ("12345", "export-name"))
self.assertEqual(response.status_code, 200)
redis_mock.return_value.get.assert_called_once_with("12345")
return
@mock.patch.object(Redis, "get_instance")
def test_download_404(self, redis_mock):
redis_mock.return_value.get.return_value = None
# TODO: Supress WARNING:django.request:Not Found: warning
response = self._request_get("download", ("12345", "export-name"))
self.assertEqual(response.status_code, 404)
redis_mock.return_value.get.assert_called_once_with("12345")
return
```
#### File: tests/lggr/views.py
```python
import logging
from django.test import TestCase
from django.urls import reverse
logging.basicConfig(level=logging.ERROR)
class ViewTests(TestCase):
"""View tests."""
def _request_get(self, name, *args, **kargs):
return self.client.get(reverse(f"lggr:{name}"), *args, **kargs)
def _request_post(self, name, *args, **kargs):
return self.client.post(reverse(f"lggr:{name}"), *args, **kargs)
def test_index(self):
response = self._request_get("index")
self.assertEqual(response.status_code, 200)
return
def test_get(self):
response = self._request_get("get", {"key1": "v1", "key2": "value2"})
self.assertEqual(response.status_code, 200)
return
def test_post(self):
response = self._request_post("post", {"key1": "v1", "key2": "value2"})
self.assertEqual(response.status_code, 200)
return
``` |
{
"source": "10sujitkhanal/forzza",
"score": 3
} |
#### File: admin/common/forms.py
```python
from django import forms
from accounts.models import User
from django.contrib.auth.forms import UserCreationForm
class UserRegistrationForm(UserCreationForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['username'].widget.attrs.update({'placeholder': 'Enter Username'})
self.fields['email'].widget.attrs.update({'placeholder': 'Enter Email'})
self.fields['password1'].widget.attrs.update({'placeholder': 'Enter password'})
self.fields['password2'].widget.attrs.update({'placeholder': 'Repeat your password'})
# self.fields['email'].widget.attrs['placeholder'] = self.fields['email'].label or '<EMAIL>'
class Meta:
model = User
fields = ("username",
"email",
"gender",
"is_superuser",
"is_staff",
"<PASSWORD>",
"password2")
# widgets = {
# 'password1': forms.TextInput(attrs={'placeholder': 'Password'}),
# 'password2': forms.TextInput(attrs={'placeholder': 'Repeat your password'}),
# }
def clean_username(self):
username = self.cleaned_data['username']
print(username)
if ' ' in username:
raise forms.ValidationError("Username can't contain spaces.")
return username
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.username = self.cleaned_data['username']
user.email = self.cleaned_data['email']
if commit:
user.save()
return user
```
#### File: admin/common/views.py
```python
from django.shortcuts import render, redirect
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from django.views.generic import TemplateView, CreateView
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib import messages
from deposit.models import Deposit
from withdraw.models import Withdraw
import json
from notifications.models import Notification
from notifications.signals import notify
from accounts.models import User
from django.views.generic import CreateView, FormView, RedirectView
from .forms import *
from django.db.models import Q
from core.models import Feedback
from userprofile.models import Profile
def deposit_status(request):
print("Sujit")
if request.method == 'POST':
id = request.POST['id']
print(id)
update = Deposit.objects.get(id=id, status=False)
update.status = True
update.save()
notify.send(request.user, recipient=update.user, verb='complete a deposit',
level=Notification.LEVELS.success)
return HttpResponse("Complete")
def complete_user(request):
if request.method == 'POST':
id = request.POST['id']
forzzaUser = request.POST['forzzaUser']
forzzaPass = request.POST['forzzaPass']
update = User.objects.get(id=id)
update.complete = True
update.save()
profile = Profile.objects.get(user__id=id)
profile.forzza_username = forzzaUser
profile.forzza_password = <PASSWORD>
profile.save()
notify.send(request.user, recipient=update, verb='Your Forzza Username is '+forzzaUser+' and Password is '+forzzaPass+ ' <a href="https://www.forzza.com/" target="_blank">click here</a> to login in Forzza',
level=Notification.LEVELS.success)
return HttpResponse("Complete")
def add_user(request):
if request.user.is_superuser == False:
redirect('common:dashboard')
users = User.objects.filter(Q(is_staff=True) & Q(is_active=True))
context = {
'users':users,
'page_url' : '/admins/add/user'
}
if request.method == 'POST':
if User.objects.filter(email=request.POST['email']).exists():
messages.warning(request, 'This email is already taken')
return redirect('common:add-user')
user_form = UserRegistrationForm(data=request.POST)
if user_form.is_valid():
user = user_form.save(commit=False)
password = user_form.cleaned_data.get("<PASSWORD>")
user.set_password(password)
user.save()
return redirect('common:add-user')
else:
print(user_form.errors)
return render(request, 'accounts/register.html', {'form': user_form})
return render(request, 'common/user/add_user.html', context)
def delete_user(request):
if request.method == 'POST':
id = request.POST['id']
update = User.objects.get(id=id)
update.is_active = False
update.save()
return redirect('common:add-user')
def withdraw_status(request):
print("Sujit")
if request.method == 'POST':
id = request.POST['id']
print(id)
update = Withdraw.objects.get(id=id, status=False)
update.status = True
update.save()
notify.send(request.user, recipient=update.user, verb='complete a withdraw',
level=Notification.LEVELS.success)
return HttpResponse("Complete")
def deposit(request):
context ={
'page_url' : '/admins/deposit'
}
return render(request, 'common/deposit/deposit.html', context)
def request_user(request):
context ={
'page_url' : '/admins/request/user'
}
return render(request, 'common/request_user/user.html', context)
def request_pending(request):
pendings = User.objects.filter(complete=False)
context = {
'pendings':pendings,
}
return render(request, 'common/request_user/request_pending.html', context)
def request_complete(request):
completes = User.objects.filter(complete=True).order_by('-date_joined')
context = {
'completes':completes,
}
return render(request, 'common/request_user/request_complete.html', context)
def deposit_pending(request):
pendings = Deposit.objects.filter(status=False)
context = {
'pendings':pendings,
}
return render(request, 'common/deposit/deposit_pending.html', context)
def deposit_complete(request):
completes = Deposit.objects.filter(status=True).order_by('-date')
context = {
'completes':completes,
}
return render(request, 'common/deposit/deposit_complete.html', context)
def withdraw(request):
context ={
'page_url' : '/admins/withdraw'
}
return render(request, 'common/withdraw/withdraw.html', context)
def withdraw_pending(request):
pendings = Withdraw.objects.filter(status=False)
context = {
'pendings':pendings,
}
return render(request, 'common/withdraw/withdraw_pending.html', context)
def withdraw_complete(request):
completes = Withdraw.objects.filter(status=True).order_by('-date')
context = {
'completes':completes,
}
return render(request, 'common/withdraw/withdraw_complete.html', context)
class DashboardView(LoginRequiredMixin, TemplateView):
template_name = 'common/dashboard.html'
login_url = reverse_lazy('home')
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# Add in a QuerySet of all the books
deposit = Deposit.objects.all().count()
deposit_pending = Deposit.objects.filter(status=False).count()
deposit_complete = Deposit.objects.filter(status=True).count()
withdraw = Withdraw.objects.all().count()
withdraw_pending = Withdraw.objects.filter(status=False).count()
withdraw_complete = Withdraw.objects.filter(status=True).count()
user = User.objects.exclude(is_superuser = True).count()
context['deposit'] = deposit
context['deposit_pending'] = deposit_pending
context['deposit_complete'] = deposit_complete
context['withdraw'] = withdraw
context['withdraw_pending'] = withdraw_pending
context['withdraw_complete'] = withdraw_complete
context['user'] = user
context['page_url'] = '/admins/dashboard'
return context
def feedback(request):
if request.user.is_superuser == False:
redirect('common:dashboard')
feedback = Feedback.objects.all()
context = {
'feedback':feedback,
'page_url' : '/admins/feedback'
}
return render(request, 'common/feedback/feedback.html', context)
```
#### File: forzza/deposit/views.py
```python
from django.shortcuts import render, redirect
from django.urls import reverse_lazy
from django.db.models import Q
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.utils.safestring import mark_safe
import json
from django.http import HttpResponse
from deposit.models import Deposit
import json
from django.contrib import messages
from notifications.models import Notification
from notifications.signals import notify
from accounts.models import User
from userprofile.models import Profile
def deposit(request):
if not request.user.is_authenticated:
return redirect(reverse_lazy('accounts:login'))
histories = Deposit.objects.filter(user=request.user).order_by('-date')
allAdmin = User.objects.filter(is_superuser = True)
profiles = Profile.objects.get(user=request.user)
if request.method == 'POST':
deposit = request.POST['deposit']
username = request.POST['username']
review = request.POST['review']
photo = request.FILES['photo']
for admin in allAdmin:
notify.send(request.user, recipient=allAdmin, verb='deposit a balance',
level=Notification.LEVELS.success)
add = Deposit.objects.create(deposit_amount=deposit, frozza_username = username, review=review, photo= photo, user=request.user )
messages.warning(request, 'Deposit Successfull')
context = {
'histories':histories,
'page_url' : '/deposit',
'profiles' : profiles
}
return render(request, 'deposit/deposit.html', context)
```
#### File: forzza/userprofile/views.py
```python
from django.shortcuts import render, redirect
from django.urls import reverse_lazy
from django.views.generic import DetailView, UpdateView
from django.contrib import messages
from accounts.models import User
from userprofile.models import Profile
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.forms import PasswordChangeForm
class ProfileEditView(UpdateView):
model = Profile
template_name = "profile/edit-my-profile.html"
context_object_name = "profile"
object = None
fields = "__all__"
def get_object(self, queryset=None):
return self.request.user.profile
def post(self, request, *args, **kwargs):
print(request.POST.get('first_name'))
user = request.user
user.first_name = request.POST.get('first_name')
user.last_name = request.POST.get('last_name')
user.about = request.POST.get('about')
if request.POST.get('gender') == "male":
user.gender = "male"
else:
user.gender = "female"
user.save()
profile = user.profile
profile.country = request.POST.get('country')
profile.city = request.POST.get('city')
profile.phone = request.POST.get('phone')
profile.save()
return redirect(reverse_lazy('profile:edit-profile'))
def change_password(request):
if request.method == 'POST':
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user)
messages.success(request, 'Your password was successfully updated!')
return redirect('profile:change-password')
else:
messages.error(request, 'Please correct the error below.')
else:
form = PasswordChangeForm(request.user)
return render(request, 'profile/change_password.html', {
'form': form
})
``` |
{
"source": "10thJanuary/subtype_scz",
"score": 3
} |
#### File: subtype_scz/data_process/pgrs_process.py
```python
import os
import CONFIG
import pandas as pd
from general_io import dump_obj_pkl, load_obj_pkl
def create_pgrs_df():
cached_path = os.path.join(os.path.abspath('.'), 'cached_objects')
if not os.path.exists(cached_path):
os.mkdir(cached_path)
cached_objects = ['SZ_PGRS.pkl', 'NC_PGRS.pkl']
for obj in cached_objects:
if not os.path.exists(os.path.join(cached_path, obj)):
_load_subject_pgrs()
break
CONFIG.SZ_PGRS = load_obj_pkl(file_name='SZ_PGRS')
CONFIG.NC_PGRS = load_obj_pkl(file_name='NC_PGRS')
def _load_subject_pgrs():
encoding = 'ISO-8859-15'
index_name = 'FID'
df = pd.read_csv(CONFIG.pgrs_path, encoding=encoding).set_index(index_name)
nc_subjs = [subj for subj in df.index if subj.startswith('NC')]
df_nc = df.loc[nc_subjs]
sz_subjs = [subj for subj in df.index if subj.startswith('SZ')]
df_sz = df.loc[sz_subjs]
dump_obj_pkl(obj=df_nc, file_name='NC_PGRS')
dump_obj_pkl(obj=df_sz, file_name='SZ_PGRS')
```
#### File: subtype_scz/data_process/subject_info_process.py
```python
import os
import CONFIG
import pandas as pd
import numpy as np
from general_io import dump_obj_pkl, load_obj_pkl
from scz_center_973 import get_center_info
def create_info_df(cached_path=os.path.join(os.path.abspath('.'), 'cached_objects'), from_cached=True):
if not os.path.exists(cached_path):
os.mkdir(cached_path)
create_new_flag = False
if from_cached:
cached_objects = ['SZ_INFO.pkl', 'NC_INFO.pkl']
for obj in cached_objects:
if not os.path.exists(os.path.join(cached_path, obj)):
create_new_flag = True
break
else:
create_new_flag = True
if create_new_flag:
_create_subj_info()
CONFIG.SZ_INFO = load_obj_pkl(file_name='SZ_INFO', dir_path=cached_path)
CONFIG.NC_INFO = load_obj_pkl(file_name='NC_INFO', dir_path=cached_path)
def _create_subj_info():
# read SZ NC subj info csv
encoding = 'gb2312'
index_name = 'subject'
df_nc = pd.read_csv(CONFIG.NC_info_path, encoding=encoding).set_index(index_name)
df_sz = pd.read_csv(CONFIG.SZ_info_path, encoding=encoding).set_index(index_name)
# filter SZ NC: subject missing basic info
df_nc, df_sz = _filter_subject_info(df_nc=df_nc, df_sz=df_sz)
# read center info: center_no, center_info
subject_center_info = df_nc.index.map(get_center_info)
df_nc = df_nc.assign(center_no=[c['center_no_string'] for c in subject_center_info])
df_nc = df_nc.assign(center_name=[c['center_name'] for c in subject_center_info])
subject_center_info = df_sz.index.map(get_center_info)
df_sz = df_sz.assign(center_no=[c['center_no_string'] for c in subject_center_info])
df_sz = df_sz.assign(center_name=[c['center_name'] for c in subject_center_info])
dump_obj_pkl(obj=df_nc, file_name='NC_INFO')
dump_obj_pkl(obj=df_sz, file_name='SZ_INFO')
def _filter_subject_info(df_nc, df_sz):
# sex in ['1', '2']
df_nc = df_nc.loc[(df_nc['sex'] == '1') | (df_nc['sex'] == '2')]
df_sz = df_sz.loc[(df_sz['sex'] == '1') | (df_sz['sex'] == '2')]
# 0 < age_research < 100
mask = (df_nc['age_research'].replace(' ', '-1').astype(np.float32) > 0) & \
(df_nc['age_research'].replace(' ', '-1').astype(np.float32) < 100)
df_nc = df_nc.loc[mask]
mask = (df_sz['age_research'].replace(' ', '-1').astype(np.float32) > 0) & \
(df_sz['age_research'].replace(' ', '-1').astype(np.float32) < 100)
df_sz = df_sz.loc[mask]
# education != ' '
df_nc = df_nc.loc[df_nc['education'] != ' ']
df_sz = df_sz.loc[df_sz['education'] != ' ']
# sz panss total != ' ', nc has no panss
df_sz = df_sz.loc[df_sz['PANSS_Total'] != ' ']
return df_nc, df_sz
```
#### File: 10thJanuary/subtype_scz/general_io.py
```python
import os
import pickle
CACHED_DIR = os.path.join(os.path.abspath('.'), 'cached_objects')
def _check_pkl_name(file_name):
if not file_name:
raise ValueError('pkl object file name invalid!')
if not file_name.endswith('.pkl'):
file_name = file_name + '.pkl'
return file_name
def dump_obj_pkl(obj, file_name, dir_path=CACHED_DIR):
file_name = _check_pkl_name(file_name=file_name)
file_path = os.path.join(dir_path, file_name)
with open(file_path, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj_pkl(file_name, dir_path=CACHED_DIR):
file_name = _check_pkl_name(file_name=file_name)
file_path = os.path.join(dir_path, file_name)
with open(file_path, 'rb') as f:
return pickle.load(f)
```
#### File: 10thJanuary/subtype_scz/scz_center_973.py
```python
CENTER_NO_NAME_973 = {
'01': 'PKU6',
'02': 'HLG',
'04': 'ANHUI',
'05_01': 'XINXIANG_01',
'05_02': 'XINXIANG_02',
'07': 'XIAN',
'08': 'GUANGZHOU',
'09': 'HUBEI',
'10': 'ZMD',
}
def get_center_info(subj_name):
subj_name_split = subj_name.split('_')
assert len(subj_name_split) >= 3
subj_type_string = subj_name_split[0]
center_no_string = subj_name_split[1]
subj_no_number = int(subj_name_split[2])
if subj_type_string == 'SZ':
res = _get_sz_center(center_no_string, subj_no_number)
elif subj_type_string == 'NC':
res = _get_nc_center(center_no_string, subj_no_number)
elif subj_type_string == 'HR':
res = _get_hr_center(center_no_string, subj_no_number)
else:
res = {'center_no_string': '', 'center_name': ''}
return res
def _get_sz_center(center_no_string, subj_no_number):
if center_no_string == '05':
if subj_no_number > 86 or subj_no_number == 42:
center_no_string += '_02'
else:
center_no_string += '_01'
res = {'center_no_string': center_no_string, 'center_name': CENTER_NO_NAME_973[center_no_string]}
return res
def _get_nc_center(center_no_string, subj_no_number):
if center_no_string == '05':
if subj_no_number > 99:
center_no_string += '_02'
else:
center_no_string += '_01'
res = {'center_no_string': center_no_string, 'center_name': CENTER_NO_NAME_973[center_no_string]}
return res
def _get_hr_center(center_no_string, subj_no_number):
if center_no_string == '05':
if subj_no_number > 97:
center_no_string += '_02'
else:
center_no_string += '_01'
res = {'center_no_string': center_no_string, 'center_name': CENTER_NO_NAME_973[center_no_string]}
return res
``` |
{
"source": "10tus/web-scraping-bigboi",
"score": 3
} |
#### File: web-scraping-bigboi/scripts/stocktifyprocess.py
```python
from bs4 import BeautifulSoup
import requests
import time
import json
import os.path
import concurrent.futures
from kivymd.uix.card import MDCardSwipe
from kivy.properties import StringProperty
from kivymd.utils import asynckivy
#make this asynchronous
class SwipeStockItem(MDCardSwipe):
sCode=StringProperty()
def Tick(self,*args):
async def Update():
req = requests.get("https://www.investagrams.com/Stock/PSE:"+self.sCode)
s=BeautifulSoup(req.text,"lxml")
lp=s.find('span',{'id':'lblStockLatestLastPrice'}).text
self.ids.curr_price.text=f'{lp}'
asynckivy.start(Update())
class ProcessManager():
stocksList=[]
def WriteToJSON(self,data,file='data.json'):
with open(file,'w') as f:
json.dump(data, f, indent=4)
"""
def Tick(self):
with concurrent.futures.ProcessPoolExecutor(max_workers=1) as executor:
executor.map(self.AnalyzeInput,self.stocksList)"""
def ReadJSON(self):
return 0
def CreateJSON(self,sCard,resPrice,supPrice,file="data.json"):
data=''
if not(os.path.exists(file)): #checks if data.json is not yet created
data = {"stock":[{sCard:{
"resist":resPrice,
"support":supPrice
}
}
]
}
else:
with open('data.json') as json_file:
data = json.load(json_file)
temp = data['stock']
# python object to be appended
obj = {sCard:{
"resist":resPrice,
"support":supPrice
}
}
# appending data to emp_details
temp.append(obj)
self.stocksList.append(sCard)
return data
``` |
{
"source": "1-0/weatherx",
"score": 3
} |
#### File: 1-0/weatherx/weather.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# http://rp5.ua/archive.php?wmo_id=34300&lang=ru
# lt_kh - local Kharkov Time, a30 - end
COLUM_NAME = [
"lt_kh","T","Po","P","Pa","U","DD","Ff","ff10","ff3","N","WW","W1","W2","Tn","Tx","Cl","Nh","H","Cm","Ch","VV","Td","RRR","tR","E","Tg","E'","sss","a30"
]
def show_extrems_column(df, freq, column, maximum):
dataframe = df.groupby(pd.Grouper(key="lt_kh", freq=freq)).mean()
if maximum:
data = np.max(dataframe,axis=0)
else:
data = np.min(dataframe,axis=0)
arr = dataframe[column]
d = np.where(arr==data[column])
try:
res1, res2 = int(d[-1]), data[column]
except TypeError:
res1, res2 = d, data[column]
return res1, res2
# 1. Найти самый ветреный месяц - (месяц и средняя скорость ветра)
# 2. Найти самый холодный месяц - (месяц и средняя температура)
# 3. Найти самый холодный день - (день и средняя температура)
# 4. Найти самый тёплый месяц - (месяц и средняя температура)
# 5. Найти самый тёплый день - (день и средняя температура)
# 6. Найти самую дождливую неделю - (период и количество осадков)
if __name__ == "__main__":
d = pd.read_csv(
'34300.01.01.2016.01.01.2017.1.0.0.ru.csv.gz',
comment='#',
parse_dates=[0, ],
skiprows=[0, 1, 2, 3 , 4, 5, 6],
delimiter=';',
names = COLUM_NAME,
compression='gzip',
error_bad_lines=False
)
print("#1. Найти самый ветреный месяц - (месяц и средняя скорость ветра)", show_extrems_column(d, '1M', 'Ff', maximum = True))
print("#2. Найти самый холодный месяц - (месяц и средняя температура)", show_extrems_column(d, '1M', 'T', maximum = False))
print("#3. Найти самый холодный день - (день и средняя температура)", show_extrems_column(d, '1D', 'T', maximum = False))
print("#4. Найти самый тёплый месяц - (месяц и средняя температура)", show_extrems_column(d, '1M', 'T', maximum = True))
print("#5. Найти самый тёплый день - (день и средняя температура)", show_extrems_column(d, '1D', 'T', maximum = True))
print("#6. Найти самую дождливую неделю - (период и количество осадков)", show_extrems_column(d, '1W', 'tR', maximum = True))
``` |
{
"source": "10xbeta/tb2s",
"score": 2
} |
#### File: follow_behavior/scripts/behavior_service.py
```python
import rospy
import actionlib
import behavior_common.msg
import time
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from geometry_msgs.msg import Twist
from math import radians, degrees
import tf
import os, thread
# for talking
# import actionlib
import actionlib.action_client
import audio_and_speech_common.msg
from geometry_msgs.msg import PointStamped, Point, PoseStamped, Pose, Pose2D
from body_tracker_msgs.msg import BodyTracker
from enum import Enum
import tf
from playsound import playsound
# SHELDON ONLY
#from dynamixel_controllers.srv import TorqueEnable, SetServoTorqueLimit, SetSpeed
#from sheldon_servos.servo_joint_list import all_joints, head_joints, right_arm_joints
#from sheldon_servos.head_servo_publishers import *
#from sheldon_servos.standard_servo_positions import *
#from sheldon_servos.set_servo_speed import *
#from sheldon_servos.set_servo_torque import *
from tb2s_pantilt.set_servo_speed import *
#from sheldon_servos.set_servo_torque import *
# TB2S ONLY
head_pan_pub = rospy.Publisher('/head_pan_controller/command', Float64, queue_size=1)
head_tilt_pub = rospy.Publisher('/head_tilt_controller/command', Float64, queue_size=1)
move_pub = rospy.Publisher('/cmd_vel_mux/behavior', Twist, queue_size=5)
class BehaviorAction(object):
def __init__(self, name):
self._action_name = name
rospy.loginfo('%s: Initializing Python behavior service' % (self._action_name))
self._as = actionlib.SimpleActionServer(self._action_name,
behavior_common.msg.behaviorAction, execute_cb=self.execute_cb, auto_start = False)
self._as.start()
# Behavior Settings
# TODO - How to get these parameters passed from launch file to here?
# for now, just set here.
self.enable_body_tracking = True # rospy.get_param('~enable_body_tracking', True)
# rospy.loginfo('%s: PARAM: enable_body_tracking = %d'.(self._action_name),
# self.enable_body_tracking)
self.enable_random_head_movement = False # rospy.get_param('~enable_random_head_movement', True)
# rospy.loginfo('%s: PARAM: enable_random_head_movement = %d'.(self._action_name),
# self.enable_random_head_movement)
self.camera_link = 'camera_link' # rospy.get_param('~camera_link', 'camera_link')
self.head_pan_joint = rospy.get_param('~head_pan_joint', 'head_pan_joint')
self.head_tilt_joint = rospy.get_param('~head_tilt_joint', 'head_tilt_joint')
self.resource_dir = rospy.get_param('resource_dir',
"/home/system/catkin_robot/src/tb2s/tb2s_behaviors/follow_behavior/scripts/resources/")
self.ding_path = os.path.join(self.resource_dir, "ding.wav")
rospy.loginfo("DBG: DING PATH: %s", self.ding_path)
# constants
self.MAX_PAN = 1.5708 # 90 degrees
self.MAX_TILT = 0.60 # Limit vertical to assure good tracking
self.DEADBAND_ANGLE = 0.0872665 # 5 deg deadband in middle to prevent osc
self.FIRST_TARGET_TIMEOUT_SECONDS = 5.0
self.TRACKING_TIMEOUT_SECONDS = 3.0
self.GESTURE_TIMEOUT_SECONDS = 10.0
self.DEFAULT_TILT_ANGLE = -0.35 # tilt head up slightly to find people more easily
# Timeout timers
self.first_target_timer = 0
self.tracking_timer = 0
self.gesture_timer = 0
self.joint_state = JointState() # for reading servo positions
#self.astra_target = list()
self.id_to_track = 0 # 0 = not tracking anyone
# Initialize State Machine
self.TrackingState = Enum('TrackingState',
'WAITING_FOR_FIRST_ID TRACKING WAITING_FOR_GESTURE')
self.tracking_state = self.TrackingState.WAITING_FOR_FIRST_ID
rospy.loginfo("%s: init complete", self._action_name)
playsound(self.ding_path)
#------------------------------------------------------------------------
def Clamp(self, value, max_value): # clamp between pos and neg max_value
return max(min(value, max_value), -max_value)
def MoveRobot(self, tracking_angle, tracking_distance):
rospy.loginfo("%s: MoveRobot: Angle = %f, Distance = %f",
self._action_name, tracking_angle, tracking_distance)
TURN_MULTIPLIER = 3.0
FORWARD_ACCELERATION_MULTIPLIER = 0.2
TURN_DEADBAND = 0.01
FOWARD_DEADBAND = 1.2 #1.6
BACKWARD_DEADBAND = 0.9 # 1.3
BACKWARD_ACCELERATION_MULTIPLIER = 0.1
speed = 0.0
turn = 0.0
# Set Turn
turn = tracking_angle * TURN_MULTIPLIER;
# Set Speed
if tracking_distance > FOWARD_DEADBAND:
speed = FORWARD_ACCELERATION_MULTIPLIER * (tracking_distance * tracking_distance);
elif (tracking_distance < BACKWARD_DEADBAND) and tracking_distance > 0:
speed = -BACKWARD_ACCELERATION_MULTIPLIER * (1 / (tracking_distance * tracking_distance));
twist = Twist()
twist.linear.x = speed; twist.linear.y = 0; twist.linear.z = 0
twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = turn
move_pub.publish(twist)
def StopRobot(self):
twist = Twist()
twist.linear.x = 0; twist.linear.y = 0; twist.linear.z = 0
twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = 0
move_pub.publish(twist)
#------------------------------------------------------------------------
# Callbacks
def joint_state_cb(self, msg):
#rospy.loginfo("%s: joint_state_cb called", self._action_name)
try:
test = msg.name.index(self.head_pan_joint)
self.joint_state = msg
except:
return
# Get the current servo pan and tilt position
try:
current_pan = self.joint_state.position[
self.joint_state.name.index(self.head_pan_joint)]
current_tilt = self.joint_state.position[
self.joint_state.name.index(self.head_tilt_joint)]
except:
return
#rospy.loginfo("%s: joint_state_cb: Current Pan = %f, Tilt = %f",
# self._action_name, current_pan, current_tilt)
#------------------------------------------------------------------------
# 3D Pose Tracking: Message contains xyz of person
# position is relative to the robot
# NOT USED CURRENTLY. May use later with stationary camera or Astra SDK
def body_pose_cb(self, msg):
rospy.loginfo('%s: ERROR ERROR got 3D body_pose message' % (self._action_name))
return
if person_id != self.id_to_track:
# not the right person, skip
rospy.loginfo("%s: Body Tracker: Tracking ID is %d, skipping pose2D for ID %d",
self._action_name, self.id_to_track, person_id )
return
# position component of the target pose stored as a PointStamped() message.
# create a PointStamped structure to transform via transformPoint
target = PointStamped()
target.header.frame_id = msg.header.frame_id
target.point = msg.pose.position
if target.point.z == 0.0:
rospy.loginfo('%s: skipping blank message' % (self._action_name))
return
rospy.loginfo("%s: Body Tracker: Tracking person at %f, %f, %f", self._action_name,
target.point.x, target.point.y, target.point.z)
# convert from xyz to pan tilt angles
# TODO: 1) Handle Children - currently assumes everyone is 6 foot tall!
# 2) What happens if robot bows?
if target.point.x < 0.2: # min range of most depth cameras
#rospy.loginfo("%s: Body Tracker: Bad Distance (x) value! %f",
# self._action_name, target.point.x)
return
# math shortcut for approx radians
pan_angle = target.point.y / target.point.x
# OPTION 1: Track actual target height
#person_head_offset_radians = 0.52 # TB2S value - TODO Tune this
#tilt_angle = (target.point.z / target.point.x) + person_head_offset_radians
# OPTION 2: Guess height, based upon distance to person
# FUTURE: combine the two, use "guess" when person is too close?
tilt_angle = 0.4 / target.point.x # SHELDON, CHEST MOUNTED camera
rospy.loginfo("%s: Body Tracker: Pan = %f (%f), Tilt = %f (%f)", self._action_name,
pan_angle, degrees(pan_angle), tilt_angle, degrees(tilt_angle))
# Send servo commands
if abs(pan_angle) > MAX_PAN: # just over 45 degrees - TODO put in actual limits here!
rospy.loginfo("%s: Body Tracker: Pan %f exceeds MAX", self._action_name, pan_angle)
return
if abs(tilt_angle) > MAX_TILT: # Limit vertical to assure good tracking
rospy.loginfo("%s: Body Tracker: Tilt %f exceeds MAX", self._action_name, tilt_angle)
return
head_pan_pub.publish(pan_angle)
head_tilt_pub.publish(-tilt_angle)
# SHELDON ONLY
#sidetiltAmt = 0.0
#head_sidetilt_pub.publish(sidetiltAmt)
#------------------------------------------------------------------------
# 2D Tracking: Message contains person horizontal (x) and vertical (y)
# position is relative to the depth image.
# we use this to control the head tracking (less oscillation?)
def position_cb(self, msg):
#rospy.loginfo('%s: got position_cb message' % (self._action_name))
delta_angle_x = msg.position2d.x # position in radians from center of camera lens
delta_angle_y = msg.position2d.y
person_tracking_distance = msg.position2d.z
person_id = msg.body_id
gesture = msg.gesture
if self.tracking_state == self.TrackingState.WAITING_FOR_FIRST_ID:
# this is the first tracking frame we have received
self.id_to_track = person_id # no id assigned yet, so use this one
self.tracking_timer = rospy.Time.now() # start tracking timer
self.tracking_state = self.TrackingState.TRACKING
rospy.loginfo("%s: Tracking Person_ID %d", self._action_name, person_id)
elif self.tracking_state == self.TrackingState.WAITING_FOR_GESTURE:
# lost person, waiting to restart tracking with a gesture
if gesture > -1:
playsound(self.ding_path)
self.id_to_track = person_id # got a gesture, so use this ID
self.tracking_timer = rospy.Time.now() # start tracking timer
self.tracking_state = self.TrackingState.TRACKING
rospy.loginfo("%s: ---------------------> Person_ID %d Gesture detected: %d",
self._action_name, person_id, gesture)
rospy.loginfo("%s: Tracking Person_ID %d", self._action_name, person_id)
# say something
rospy.loginfo("Talking")
goal = audio_and_speech_common.msg.speechGoal(
text_to_speak="ok, i see you")
self.speech_client.send_goal(goal)
#result = self.speech_client.wait_for_result() # wait for speech to complete
#rospy.loginfo("Speech goal returned result: %d", result)
else:
return # continue waiting
elif self.tracking_state != self.TrackingState.TRACKING:
rospy.logwarn("%s: TRACKING STATE ERROR, unknown state: %d",
self._action_name, self.tracking_state)
return
if person_id != self.id_to_track:
# not the right person, skip
time_since_last_target = rospy.Time.now() - self.tracking_timer
rospy.loginfo("%s: Skipping ID %d, tracking ID is %d, timer is %f",
self._action_name, self.id_to_track, person_id, time_since_last_target.to_sec() )
return
# --------------------------------------------------------------------
# GOT A PERSON TO TRACK
rospy.loginfo("%s: Body Tracker: Person %d 2D Delta: x = %f, y = %f",
self._action_name, person_id, delta_angle_x, delta_angle_y )
# Get the current servo pan and tilt position
try:
current_pan = self.joint_state.position[
self.joint_state.name.index(self.head_pan_joint)]
current_tilt = self.joint_state.position[
self.joint_state.name.index(self.head_tilt_joint)] * -1.0
except:
return
#rospy.loginfo("%s: Body Tracker: Current Servo: Pan = %f, Tilt = %f",
# self._action_name, current_pan, current_tilt)
# add target position to current servo position
pan_angle = current_pan + (delta_angle_x * 0.75) #shoot for less
tilt_angle = current_tilt + (delta_angle_y * 0.75)
#rospy.loginfo("%s: Before Clamp: Servo Command: Pan = %f, Tilt = %f",
# self._action_name, pan_angle, tilt_angle)
# limit to valid servo range
pan_angle = self.Clamp(pan_angle, self.MAX_PAN)
tilt_angle = self.Clamp(tilt_angle, self.MAX_TILT)
#rospy.loginfo("%s: After Clamp: Servo Command: Pan = %f, Tilt = %f",
# self._action_name, pan_angle, tilt_angle)
# Save value to steer the robot toward
person_tracking_angle = pan_angle
# command servos to move to target, if not in deadband
pan_on_target = True
tilt_on_target = True
if abs(delta_angle_x) > self.DEADBAND_ANGLE:
head_pan_pub.publish(pan_angle)
pan_on_target = False
if abs(delta_angle_y) > self.DEADBAND_ANGLE:
head_tilt_pub.publish(-tilt_angle)
tilt_on_target = False
#if pan_on_target and tilt_on_target:
# rospy.loginfo("%s: Body Track On target ID %d",
# self._action_name, person_id)
#else:
# rospy.loginfo("%s: Body Track ID %d: Pan delta = %f, Tilt Delta = %f",
# self._action_name, person_id, delta_angle_x, delta_angle_y)
# SHELDON ONLY
#side_tilt_angle = 0.0
#head_sidetilt_pub.publish(side_tilt_angle)
# Move the robot to follow person
self.MoveRobot(person_tracking_angle, person_tracking_distance)
self.tracking_timer = rospy.Time.now() # reset tracking timer
#------------------------------------------------------------------------
# Execute Behavior - Starts when this behavior goes active
def execute_cb(self, goal):
#r = rospy.Rate(1)
# Initalize state engine
self.tracking_state = self.TrackingState.WAITING_FOR_FIRST_ID
# initialize Speech
rospy.loginfo("Waiting for speech server (press ctrl-c to cancel at anytime)")
self.speech_client = actionlib.SimpleActionClient("/speech_service",
audio_and_speech_common.msg.speechAction)
self.speech_client.wait_for_server()
# Subscribers - begin listening for tracking messages
if self.enable_body_tracking:
position_sub = rospy.Subscriber("/body_tracker/position", BodyTracker, self.position_cb, queue_size=1)
#rospy.Subscriber("/body_tracker/pose", PoseStamped, self.body_pose_cb, queue_size=1)
#pose2d_sub = rospy.Subscriber("/body_tracker/pose2d", Pose2D, self.pose_2d_cb, queue_size=1)
servo_sub = rospy.Subscriber('/joint_states', JointState, self.joint_state_cb) # for servos
#gesture_sub = rospy.Subscriber('/body_tracker/gesture', Pose2D, self.gesture_cb)
# Set servos speed and torque
# TODO SetServoTorque(0.5, head_joints)
SetServoSpeed(0.5, head_joints)
# Center Camera Head
#head_pan_pub.publish(0.0)
#head_tilt_pub.publish(self.DEFAULT_TILT_ANGLE) # tilt head up to find people more easily
#head_sidetilt_pub.publish(0.0) # SHELDON ONLY
# start with robot stopped
self.StopRobot()
# say something
rospy.loginfo("Talking")
goal = audio_and_speech_common.msg.speechGoal(text_to_speak="ok, i will follow you")
self.speech_client.send_goal(goal)
#result = self.speech_client.wait_for_result() # wait for speech to complete
#rospy.loginfo("Speech goal returned result: %d", result)
# Initialize timers
self.first_target_timer = rospy.Time.now()
self.tracking_timer = rospy.Time.now()
self.gesture_timer = rospy.Time.now()
rospy.loginfo('==================================================')
rospy.loginfo('==================================================')
rospy.loginfo('==================================================')
rospy.loginfo('%s: waiting to spot first person...' % (self._action_name))
# -------- LOOP --------
while True:
rospy.loginfo('==================================================')
rospy.loginfo('%s: Tracking State: %s', self._action_name, self.tracking_state.name)
rospy.loginfo('==================================================')
if self._as.is_preempt_requested():
break # higher priority behavior requested
if self.tracking_state == self.TrackingState.WAITING_FOR_FIRST_ID:
time_waiting_for_first_target = rospy.Time.now() - self.first_target_timer
if time_waiting_for_first_target > rospy.Duration.from_sec(
self.FIRST_TARGET_TIMEOUT_SECONDS):
rospy.logwarn("%s: time_waiting_for_first_target: I DONT SEE ANYONE TO TRACK!",
self._action_name)
# say something
rospy.loginfo("Talking")
goal = audio_and_speech_common.msg.speechGoal(text_to_speak="darn, I dont see anyone to follow")
self.speech_client.send_goal(goal)
#result = self.speech_client.wait_for_result() # wait for speech to complete
#rospy.loginfo("Speech goal returned result: %d", result)
break # did not find a person to track
elif self.tracking_state == self.TrackingState.TRACKING:
time_since_last_target = rospy.Time.now() - self.tracking_timer
rospy.loginfo("%s: State = TRACKING. Time since last frame: %d",
self._action_name, time_since_last_target.to_sec() )
if time_since_last_target > rospy.Duration.from_sec(
self.TRACKING_TIMEOUT_SECONDS):
# target timed out! Lost User!
head_tilt_pub.publish(self.DEFAULT_TILT_ANGLE) # Set tilt for optimal capture
rospy.loginfo("%s: LOST USER! waiting for gesture...", self._action_name)
self.gesture_timer = rospy.Time.now()
self.tracking_state = self.TrackingState.WAITING_FOR_GESTURE
# say something
rospy.loginfo("Talking")
goal = audio_and_speech_common.msg.speechGoal(
text_to_speak="darn, I lost you. please wave or something")
self.speech_client.send_goal(goal)
#result = self.speech_client.wait_for_result() # wait for speech to complete
#rospy.loginfo("Speech goal returned result: %d", result)
elif self.tracking_state == self.TrackingState.WAITING_FOR_GESTURE:
time_waiting_for_gesture = rospy.Time.now() - self.gesture_timer
if time_waiting_for_gesture > rospy.Duration.from_sec(
self.GESTURE_TIMEOUT_SECONDS):
rospy.logwarn("%s: time_waiting_for_gesture: I DONT SEE ANY GESTURES!",
self._action_name)
# say something
rospy.loginfo("Talking")
goal = audio_and_speech_common.msg.speechGoal(
text_to_speak="i have stopped following, now what")
self.speech_client.send_goal(goal)
#result = self.speech_client.wait_for_result() # wait for speech to complete
#rospy.loginfo("Speech goal returned result: %d", result)
break # did not find a gesture to restart tracking
else:
rospy.logwarn("%s: BAD STATE!", self._action_name )
time.sleep(0.5)
#----------------------------------------------------------------
# Behavior Exit
# Stop wheels before exiting
self.StopRobot()
rospy.loginfo('%s: Behavior Exiting', self._action_name)
position_sub.unregister()
#pose2d_sub.unregister()
servo_sub.unregister()
#gesture_sub.unregister()
# Report exit status
if self._as.is_preempt_requested():
self._as.set_preempted();
else:
self._as.set_succeeded();
if __name__ == '__main__':
rospy.init_node('follow_behavior')
server = BehaviorAction(rospy.get_name())
rospy.spin()
```
#### File: idle_behavior/scripts/behavior_service.py
```python
import rospy
import actionlib
import behavior_common.msg
import time
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
import random
from math import radians, degrees
import tf
import os, thread
from playsound import playsound
# SHELDON ONLY
#from dynamixel_controllers.srv import TorqueEnable, SetServoTorqueLimit, SetSpeed
#from sheldon_servos.servo_joint_list import all_joints, head_joints, right_arm_joints
#from sheldon_servos.head_servo_publishers import *
#from sheldon_servos.standard_servo_positions import *
#from sheldon_servos.set_servo_speed import *
#from sheldon_servos.set_servo_torque import *
from tb2s_pantilt.set_servo_speed import *
#from sheldon_servos.set_servo_torque import *
from body_tracker_msgs.msg import BodyTracker
#from sensor_msgs.msg import Joy
from geometry_msgs.msg import PointStamped, Point, PoseStamped, Pose, Pose2D
#import geometry_msgs.msg
import tf
# TB2S ONLY
head_pan_pub = rospy.Publisher('/head_pan_controller/command', Float64, queue_size=1)
head_tilt_pub = rospy.Publisher('/head_tilt_controller/command', Float64, queue_size=1)
class BehaviorAction(object):
def __init__(self, name):
self._action_name = name
self._as = actionlib.SimpleActionServer(self._action_name,
behavior_common.msg.behaviorAction, execute_cb=self.execute_cb, auto_start = False)
self._as.start()
rospy.loginfo('%s: Initializing Python behavior service' % (self._action_name))
#====================================================================
# Behavior Settings
# TODO - How to get these parameters passed from launch file to here?
# for now, just set here.
self.enable_body_tracking = True # rospy.get_param('~enable_body_tracking', True)
# rospy.loginfo('%s: PARAM: enable_body_tracking = %d'.(self._action_name),
# self.enable_body_tracking)
self.enable_random_head_movement = True # rospy.get_param('~enable_random_head_movement', True)
# rospy.loginfo('%s: PARAM: enable_random_head_movement = %d'.(self._action_name),
# self.enable_random_head_movement)
self.camera_link = 'camera_link' # rospy.get_param('~camera_link', 'camera_link')
self.head_pan_joint = rospy.get_param('~head_pan_joint', 'head_pan_joint')
self.head_tilt_joint = rospy.get_param('~head_tilt_joint', 'head_tilt_joint')
self.resource_dir = rospy.get_param('resource_dir',
"/home/system/catkin_robot/src/tb2s/tb2s_behaviors/follow_behavior/scripts/resources/")
self.ding_path = os.path.join(self.resource_dir, "ding.wav")
rospy.loginfo("DBG: DING PATH: %s", self.ding_path)
#====================================================================
# constants
self.MAX_PAN = 1.5708 # 90 degrees
self.MAX_TILT = 0.60 # Limit vertical to assure good tracking
self.DEADBAND_ANGLE = 0.0872665 # 5 deg deadband in middle to prevent osc
self.DEFAULT_TILT_ANGLE = -0.35 # tilt head up slightly to find people more easily
self.IDLE_TILT_MIN = 0.2
self.IDLE_TILT_MAX = 0.3
self.IDLE_PAN_MAX = 0.5
self.tracking = False
self.joint_state = JointState() # for reading servo positions
#self.astra_target = list()
# Remember which person to track (so the camera does not oscilate)
self.id_to_track = 0 # 0 = not tracking anyone
self.last_target_time = rospy.Time.now() # start timer
# Initialize tf listener
#self.tf = tf.TransformListener()
# Allow tf to catch up
#rospy.sleep(2)
#------------------------------------------------------------------------
def Clamp(self, value, max_value): # clamp between pos and neg max_value
return max(min(value, max_value), -max_value)
def joint_state_cb(self, msg):
#rospy.loginfo("%s: joint_state_cb called", self._action_name)
try:
test = msg.name.index(self.head_pan_joint)
self.joint_state = msg
except:
return
# Get the current servo pan and tilt position
try:
current_pan = self.joint_state.position[
self.joint_state.name.index(self.head_pan_joint)]
current_tilt = self.joint_state.position[
self.joint_state.name.index(self.head_tilt_joint)]
except:
return
#rospy.loginfo("%s: joint_state_cb: Current Pan = %f, Tilt = %f",
# self._action_name, current_pan, current_tilt)
def gesture_cb(self, msg):
rospy.loginfo('%s: ERROR ERROR got gesture_cb message' % (self._action_name))
return
gesture = msg.x # position in radians from center of camera lens
# msg.y not used
person_id = int(msg.theta)
# whenever we get a gesture, force this to be the current user
# TODO decode the gestures to what kind they are...
self.id_to_track = person_id
self.last_target_time = rospy.Time.now() # reset timer
#====================================================================
# 3D Pose Tracking: Message contains xyz of person
# position is relative to the robot
def body_pose_cb(self, msg):
rospy.loginfo('%s: ERROR! ERROR! got body_pose message' % (self._action_name))
return # THIS CB IS DISABLED
# position component of the target pose stored as a PointStamped() message.
# create a PointStamped structure to transform via transformPoint
target = PointStamped()
target.header.frame_id = msg.header.frame_id
target.point = msg.pose.position
if target.point.z == 0.0:
rospy.loginfo('%s: skipping blank message' % (self._action_name))
return
# frame should be "camera_depth_frame"
#target = self.tf.transformPoint(self.camera_link, raw_target)
rospy.loginfo("%s: Body Tracker: Tracking person at %f, %f, %f", self._action_name,
target.point.x, target.point.y, target.point.z)
# convert from xyz to pan tilt angles
# TODO: 1) Handle Children - currently assumes everyone is 6 foot tall!
# 2) What happens if robot bows?
if target.point.x < 0.2: # min range of most depth cameras
#rospy.loginfo("%s: Body Tracker: Bad Distance (x) value! %f",
# self._action_name, target.point.x)
return
# math shortcut for approx radians
pan_angle = target.point.y / target.point.x
# OPTION 1: Track actual target height
#person_head_offset_radians = 0.52 # TB2S value - TODO Tune this
#tilt_angle = (target.point.z / target.point.x) + person_head_offset_radians
# OPTION 2: Guess height, based upon distance to person
# FUTURE: combine the two, use "guess" when person is too close?
tilt_angle = 0.4 / target.point.x # SHELDON, CHEST MOUNTED camera
rospy.loginfo("%s: Body Tracker: Pan = %f (%f), Tilt = %f (%f)", self._action_name,
pan_angle, degrees(pan_angle), tilt_angle, degrees(tilt_angle))
# Send servo commands
if abs(pan_angle) > MAX_PAN: # just over 45 degrees - TODO put in actual limits here!
rospy.loginfo("%s: Body Tracker: Pan %f exceeds MAX", self._action_name, pan_angle)
return
if abs(tilt_angle) > MAX_TILT: # Limit vertical to assure good tracking
rospy.loginfo("%s: Body Tracker: Tilt %f exceeds MAX", self._action_name, tilt_angle)
return
head_pan_pub.publish(pan_angle)
head_tilt_pub.publish(-tilt_angle)
# SHELDON ONLY
#sidetiltAmt = 0.0
#pub_head_sidetilt.publish(sidetiltAmt)
self.tracking = True # don't do idle movements
#====================================================================
# 2D Tracking: Message contains person horizontal (x) and vertical (y)
# position is relative to the depth image.
def position_cb(self, msg):
#rospy.loginfo('%s: got position_cb message' % (self._action_name))
delta_angle_x = msg.position2d.x # position in radians from center of camera lens
delta_angle_y = msg.position2d.y
person_id = msg.body_id
gesture = msg.gesture
if self.id_to_track == 0:
self.id_to_track = person_id # no id assigned yet, so use this one
rospy.loginfo("%s: Tracking Person_ID %d", self._action_name, person_id)
elif gesture > -1:
playsound(self.ding_path) # indicate gesture recognized with a sound
self.id_to_track = person_id # got a gesture, so use this ID
rospy.loginfo("%s: ---------------------> Person_ID %d Gesture detected: %d",
self._action_name, person_id, gesture)
rospy.loginfo("%s: Tracking Person_ID %d", self._action_name, person_id)
elif person_id != self.id_to_track:
# not the right person, see if the old one timed out
time_since_last_target = rospy.Time.now() - self.last_target_time
if time_since_last_target > rospy.Duration.from_sec(3.0):
# target timed out, use this one TODO - see which target is closest?
rospy.loginfo("%s: Body Tracker: ID %d Timed out, changing to ID %d",
self._action_name, self.id_to_track, person_id )
self.id_to_track = person_id
else:
rospy.loginfo("%s: Body Tracker: Tracking ID %d, so skipping pose2D for ID %d",
self._action_name, self.id_to_track, person_id )
return
self.last_target_time = rospy.Time.now() # reset timer
# Calculate amount to move
rospy.loginfo("%s: Body Tracker: Person %d 2D Delta: x = %f, y = %f",
self._action_name, person_id, delta_angle_x, delta_angle_y )
# Get the current servo pan and tilt position
try:
current_pan = self.joint_state.position[
self.joint_state.name.index(self.head_pan_joint)]
current_tilt = self.joint_state.position[
self.joint_state.name.index(self.head_tilt_joint)] * -1.0
except:
return
#rospy.loginfo("%s: Body Tracker: Current Servo: Pan = %f, Tilt = %f",
# self._action_name, current_pan, current_tilt)
# add target position to current servo position
pan_angle = current_pan + (delta_angle_x * 0.75) #shoot for less
tilt_angle = current_tilt + (delta_angle_y * 0.75)
#rospy.loginfo("%s: Before Clamp: Servo Command: Pan = %f, Tilt = %f",
# self._action_name, pan_angle, tilt_angle)
# limit to valid servo range
pan_angle = self.Clamp(pan_angle, self.MAX_PAN)
tilt_angle = self.Clamp(tilt_angle, self.MAX_TILT)
#rospy.loginfo("%s: After Clamp: Servo Command: Pan = %f, Tilt = %f",
# self._action_name, pan_angle, tilt_angle)
# command servos to move to target, if not in deadband
pan_on_target = True
tilt_on_target = True
if abs(delta_angle_x) > self.DEADBAND_ANGLE:
head_pan_pub.publish(pan_angle)
pan_on_target = False
if abs(delta_angle_y) > self.DEADBAND_ANGLE:
head_tilt_pub.publish(-tilt_angle)
tilt_on_target = False
if pan_on_target and tilt_on_target:
rospy.loginfo("%s: Body Track On target ID %d",
self._action_name, person_id)
else:
rospy.loginfo("%s: Body Track ID %d: Pan delta = %f, Tilt Delta = %f",
self._action_name, person_id, delta_angle_x, delta_angle_y)
# SHELDON ONLY
#side_tilt_angle = 0.0
#pub_head_sidetilt.publish(side_tilt_angle)
self.tracking = True # don't do idle movements
#====================================================================
# Main loop
def execute_cb(self, goal):
#r = rospy.Rate(1)
# Idle Behavior has gone Active!
# Set servos speed and torque
# TODO SetServoTorque(0.5, head_joints)
SetServoSpeed(0.5, head_joints)
# Center Camera Head
head_pan_pub.publish(0.0)
head_tilt_pub.publish(self.DEFAULT_TILT_ANGLE) # tilt head up to find people more easily
#pub_head_sidetilt.publish(0.0) # SHELDON ONLY
if self.enable_random_head_movement:
rospy.loginfo('%s: random head movements enabled...' % (self._action_name))
else:
rospy.loginfo('%s: random head movements DISABLED' % (self._action_name))
if self.enable_body_tracking:
rospy.loginfo('%s: waiting for person tracking...' % (self._action_name))
else:
rospy.loginfo('%s: body tracking DISABLED' % (self._action_name))
if self.enable_body_tracking:
# Enable Subscribers
#rospy.Subscriber("/body_tracker/pose", PoseStamped, self.body_pose_cb, queue_size=1)
position_sub = rospy.Subscriber("/body_tracker/position", BodyTracker, self.position_cb, queue_size=1)
# pose2d_sub = rospy.Subscriber("/body_tracker/pose2d", Pose2D, self.pose_2d_cb, queue_size=1)
servo_sub = rospy.Subscriber('/joint_states', JointState, self.joint_state_cb) # servos
#gesture_sub = rospy.Subscriber('/body_tracker/gesture', Pose2D, self.gesture_cb)
while True:
if self._as.is_preempt_requested():
break
if not self.tracking and self.enable_random_head_movement:
# Idle: Move head to constrained random location, at random intervals
tiltAmt = random.uniform(self.IDLE_TILT_MIN, self.IDLE_TILT_MAX) # 0.3
head_tilt_pub.publish(-tiltAmt)
# rospy.loginfo('%s: Doing Random Movement' % (self._action_name))
panAmt = random.uniform(-self.IDLE_PAN_MAX, self.IDLE_PAN_MAX)
head_pan_pub.publish(panAmt)
# SHELDON ONLY
#sidetiltAmt = 0.0 # TODO random.uniform(-0.05, 0.05)
#pub_head_sidetilt.publish(sidetiltAmt)
else:
rospy.loginfo('%s: Main Loop sleep...' % (self._action_name))
self.tracking = False # do Idle if tracking gets lost
ranTime = random.uniform(1.0, 3.5)
time.sleep(ranTime)
# Behavior Exit / Cleanup
position_sub.unregister()
#pose2d_sub.unregister()
servo_sub.unregister()
#gesture_sub.unregister()
# Idle behavior runs forever until interrupted, so this behavior
# can only get prempted/cancelled.
self._as.set_preempted();
if __name__ == '__main__':
rospy.init_node('idle_behavior')
server = BehaviorAction(rospy.get_name())
rospy.spin()
```
#### File: tb2s_joybuttons/scripts/joy_buttons.py
```python
import rospy
import logging
from sensor_msgs.msg import Joy
from std_msgs.msg import String
from behavior_common.msg import CommandState
def callback(data):
msg = CommandState()
# we only allow one behavior state at a time
if data.buttons[2] == 1: # Red B button - Stop current behavior
msg.commandState = "STOP"
pub_behavior.publish(msg)
elif data.buttons[1] == 1: # Green A button
msg.commandState = "FOLLOW_ME"
pub_behavior.publish(msg)
elif data.buttons[0] == 1: # Blue X button
msg.commandState = "TURN_LEFT"
pub_behavior.publish(msg)
elif data.buttons[3] == 1: # Yellow Y button
pub_behavior.publish('TURN_RIGHT')
def listener():
rospy.init_node('listener', anonymous=True)
rospy.Subscriber("/joy", Joy, callback)
rospy.spin()
if __name__ == '__main__':
# Publish an action for the behavior engine to handle
pub_behavior = rospy.Publisher('behavior/cmd', CommandState, queue_size=2)
listener()
```
#### File: tb2s_pantilt/scripts/joy_to_dynamixel.py
```python
import rospy
from sensor_msgs.msg import Joy
from std_msgs.msg import Float64
def callback(data):
if data.buttons[5] == 1:
pub_pan.publish(data.axes[2])
pub_tilt.publish(data.axes[3])
def listener():
rospy.init_node('listener', anonymous=True)
rospy.Subscriber("/joy", Joy, callback)
rospy.spin()
if __name__ == '__main__':
pub_pan = rospy.Publisher('/head_pan_controller/command', Float64, queue_size=1)
pub_tilt = rospy.Publisher('/head_tilt_controller/command', Float64, queue_size=1)
listener()
``` |
{
"source": "10x-Engineers/cva6",
"score": 2
} |
#### File: 10x-Engineers/cva6/config_pkg_generator.py
```python
import sys
import os
import argparse
import re
def setup_parser_config_generator():
parser = argparse.ArgumentParser()
parser.add_argument("--default_config", type=str, default="cv64a6_imafdc_sv39", required=True,
choices=["cv32a6_imac_sv0","cv32a6_imac_sv32","cv32a6_imafc_sv32","cv64a6_imafdc_sv39"],
help="Default configuration is one of the 4 preexisting configuration: \
cv32a6_imac_sv0, cv32a6_imac_sv32, cv32a6_imafc_sv32, cv64a6_imafdc_sv39")
parser.add_argument("--isa", type=str, default=None, required=True,
help="RISC-V ISA subset")
parser.add_argument("--fpu", type=int, default=None, choices=[0,1],
help="FPU enable ? 1 : enable, 0 : disable")
parser.add_argument("--cvxif", type=int, default=None, choices=[0,1],
help="CoreV-X-Interface enable ? 1 : enable, 0 : disable")
parser.add_argument("--c_ext", type=int, default=None, choices=[0,1],
help="C extension enable ? 1 : enable, 0 : disable")
parser.add_argument("--iuser_en", type=int, default=None, choices=[0,1],
help="Fetch User enable ? 1 : enable, 0 : disable")
parser.add_argument("--iuser_w", type=int, default=None, choices=list(range(1,64)),
help="Fetch User Width ? [1-64]")
parser.add_argument("--duser_en", type=int, default=None, choices=[0,1],
help="Data User enable ? 1 : enable, 0 : disable")
parser.add_argument("--duser_w", type=int, default=None, choices=list(range(1,64)),
help="Data User Width ? [1-64]")
return parser
ISA = ""
MABI = ""
Config = dict()
MapArgsToParameter={
"xlen" : "CVA6ConfigXlen",
"fpu" : "CVA6ConfigFpuEn",
"cvxif" : "CVA6ConfigCvxifEn",
"c_ext" : "CVA6ConfigCExtEn",
"iuser_en" : "CVA6ConfigFetchUserEn",
"iuser_w" : "CVA6ConfigFetchUserWidth",
"duser_en" : "CVA6ConfigDataUserEn",
"duser_w" : "CVA6ConfigDataUserWidth"
}
MapParametersToArgs = {i:k for k, i in MapArgsToParameter.items()} #reverse map
def generate_config(argv):
parser = setup_parser_config_generator()
args = parser.parse_args(argv)
Args = vars(args) # give a dictionary of args
print("\n[Generating configuration ... ]")
print("Default configuration is "+Args['default_config'])
print("Make sure to compile your code with this ISA :", Args["isa"], "!")
ISA = Args["isa"]
if "cv32" in Args['default_config']:
gen = "gen32"
Args['xlen'] = 32
MABI = "ilp32"
else:
gen = "gen64"
Args['xlen'] = 64
MABI = "lp64"
os.system("cp core/Flist."+Args['default_config']+" core/Flist."+gen) #copy Flist
os.system("cp core/include/"+Args['default_config']+"_config_pkg.sv core/include/"+gen+"_config_pkg.sv") # copy package
Flistfile = open("core/Flist."+gen, "r")
Flistlines = []
for line in Flistfile :
line = re.sub(r"(\${CVA6_REPO_DIR}/core/include/)"+Args['default_config']+"(_config_pkg.sv)", r"\g<1>"+gen+"\g<2>", line) # change package name in Flist to the one generated
Flistlines.append(line)
Flistfile = open("core/Flist."+gen, "w")
Flistfile.writelines(Flistlines)
Flistfile.close
for i in Args:
configfile = open("core/include/"+gen+"_config_pkg.sv", "r")
if i not in ['default_config', 'isa', 'xlen']:
if Args[i] != None:
print("setting", i, "to", Args[i])
alllines = []
lineXlen = None
for line in configfile :
lineXlen = re.match(r"( localparam CVA6ConfigXlen = )(?P<value>.*)(;)", line) if lineXlen == None else lineXlen
line = re.sub(r"( localparam "+MapArgsToParameter[i]+" = )(.*)(;)", r"\g<1>"+str(Args[i])+"\g<3>", line) # change parameter if required by Args
alllines.append(line)
linematch = re.match(r"( localparam (CVA6Config)(?P<param>.*) = )(?P<value>.*)(;)", line) # and read the modified line to know which configuration we are creating
if linematch:
Param = MapParametersToArgs['CVA6Config'+linematch.group('param')]
Config[Param] = lineXlen.group('value') if linematch.group('value') == "CVA6ConfigXlen" else linematch.group('value')
for k in Config.keys():
Config[k] = int(Config[k]) # Convert value from str to int
configfile = open("core/include/"+gen+"_config_pkg.sv", "w")
configfile.writelines(alllines)
configfile.close
print("[Generating configuration Done]")
return [ISA, MABI, gen, Config] # return ISA, MABI, gen (for testharness), Config for cva6.py in core-v-verif
if __name__ == "__main__":
generate_config(sys.argv[1:])
``` |
{
"source": "10XGenomics/lariat",
"score": 2
} |
#### File: lariat/go/check.py
```python
import tenkit.bam
import subprocess
import os
import sys
import numpy as np
import itertools
rfa = os.path.join(os.environ["GOPATH"], "bin", "RFA")
test_fq = os.path.join(os.environ["GOPATH"], "src/test/inputs", "segdups.fastq.gz")
hg19 = os.path.join(os.environ['TENX_REFDATA_PATH'], "fasta/hg19/hg19.fa")
if len(sys.argv) < 2:
print "Running RFA"
args = [rfa, "-reads", test_fq, "-genome", hg19]
print args
subprocess.check_call(args)
bam_fn = "default.bam"
else:
print "Skipping RFA. Analyzing %s" % sys.argv[1]
bam_fn = sys.argv[1]
try:
subprocess.check_call("go tool pprof -dot bin/RFA cpu.pprof > prof.dot", shell=True)
subprocess.check_call("dot -Tpng cpu.dot > dot.png", shell=True)
except Exception:
print "failed making profiling graph. continuing"
#subprocess.check_call(['samtools','sort','default.bam','default_sorted'])
#subprocess.check_call(['samtools','index','default_sorted.bam'])
bam = tenkit.bam.create_bam_infile(bam_fn)
reads = list(bam)
logf = open("check_log.txt", "a")
date = subprocess.check_output("date")
git = subprocess.check_output("git describe --tags --dirty", shell=True)
log_lines = ['', '----------', date, git]
def show_frac(label, r, f):
n_match = len([x for x in r if f(x)])
frac = float(n_match) / len(r)
s = "{0:15}: {1:3f}".format(label, frac)
log_lines.append(s)
show_frac("Unmapped", reads, lambda x: x.is_unmapped)
show_frac("Proper pair", reads, lambda x: x.is_proper_pair)
show_frac("mapq = 0", reads, lambda x: x.mapq == 0)
show_frac("mapq < 30", reads, lambda x: x.mapq < 30)
show_frac("mapq >= 30", reads, lambda x: x.mapq >= 30)
def correct_alignment(bam,r):
# qname = mol:blah:chr3:number:number:80771341:80771523
parts = r.qname.split(":")
al_pos = int(parts[5])
return parts[2] == bam.references[r.tid] and abs(r.pos - al_pos) < 200
def round_closest(val, opts):
diff = np.abs(val - opts)
return opts[np.argmin(diff)]
def report_mapq(res):
mapqs = np.array([x[1] for x in res])
correct = np.array([int(x[2]) for x in res])
return {'med_map': np.median(mapqs), 'emp_mapq': -10.0*np.log10(1.0-correct.mean()), 'n': len(res)}
def analyze_mapqs(bam):
opts = np.array([5, 15, 30, 45])
obs = []
bam.reset()
for r in bam:
rqv = round_closest(r.mapq, opts)
obs.append((rqv, r.mapq, correct_alignment(bam, r)))
obs.sort()
print obs[:5]
print obs[-5:]
# Split into 5 groups and report mapqs
results = []
for (k, vals) in itertools.groupby(obs, lambda x: x[0]):
stats = report_mapq(list(vals))
results.append(stats)
return results
rname_parts = reads[0].qname.split(":")
if len(rname_parts) == 7 and rname_parts[0] == 'mol':
r = analyze_mapqs(bam)
print r
for l in r:
log_lines.append("{}".format(l))
logf.writelines(log_lines)
for l in log_lines:
print l
``` |
{
"source": "10XGenomics/napari",
"score": 3
} |
#### File: experimental/monitor/_monitor.py
```python
import errno
import json
import logging
import os
import sys
from pathlib import Path
from typing import Optional
LOGGER = logging.getLogger("napari.monitor")
# If False monitor is disabled even if we meet all other requirements.
ENABLE_MONITOR = True
def _load_config(path: str) -> dict:
"""Load the JSON formatted config file.
Parameters
----------
path : str
The path of the JSON file we should load.
Return
------
dict
The parsed data from the JSON file.
"""
path = Path(path).expanduser()
if not path.exists():
raise FileNotFoundError(
errno.ENOENT, f"Monitor: Config file not found: {path}"
)
with path.open() as infile:
return json.load(infile)
def _load_monitor_config() -> Optional[dict]:
"""Return the MonitorService config file data, or None.
Return
------
Optional[dict]
The parsed config file data or None if no config.
"""
# We shouldn't even call into this file unless NAPARI_MON is defined
# but check to be sure.
value = os.getenv("NAPARI_MON")
if value in [None, "0"]:
return None
return _load_config(value)
def _setup_logging(config: dict) -> None:
"""Log "napari.monitor" messages to the configured file.
Parameters
----------
config : dict
Monitor configuration
"""
try:
log_path = config['log_path']
except KeyError:
return # No log file.
# Nuke/reset log for now.
# Path(log_path).unlink()
fh = logging.FileHandler(log_path)
LOGGER.addHandler(fh)
LOGGER.setLevel(logging.DEBUG)
LOGGER.info("Writing to log path %s", log_path)
def _get_monitor_config() -> Optional[dict]:
"""Create and return the configuration for the MonitorService.
The routine might return None for one serveral reasons:
1) We're not running under Python 3.9 or now.
2) The monitor is explicitly disable, ENABLED_MONITOR is False.
3) The NAPARI_MON environment variable is not defined.
4) The NAPARI_MON config file cannot be found and parsed.
Return
------
Optional[dict]
The configuration for the MonitorService.
"""
if sys.version_info[:2] < (3, 9):
# We require Python 3.9 for now. The shared memory features we need
# were added in 3.8, but the 3.8 implemention was buggy. It's
# possible we could backport to or otherwise fix 3.8 or even 3.7,
# but for now we're making 3.9 a requirement.
print("Monitor: not starting, requires Python 3.9 or newer")
return None
if not ENABLE_MONITOR:
print("Monitor: not starting, disabled")
return None
# The NAPARI_MON environment variable points to our config file.
config = _load_monitor_config()
if config is None:
print("Monitor: not starting, no usable config file")
return None
return config
class Monitor:
"""Wraps the monitor service.
We can't start the monitor service at import time. Under the hood the
multiprocessing complains about a "partially started process".
Instead someone must call our start() method explicitly once the
process has fully started.
"""
def __init__(self):
# Both are set when start() is called, and only if we have
# a parseable config file, have Python 3.9, etc.
self._service = None
self._api = None
self._running = False
def __nonzero__(self) -> bool:
"""Return True if the service is running.
So that callers can do:
if monitor:
monitor.add(...)
"""
return self._running
@property
def run_command_event(self):
"""The MonitorAPI fires this event for commands from clients."""
return self._api.events.run_command
def start(self) -> bool:
"""Start the monitor service, if it hasn't been started already.
Return
------
bool
True if we started the service or it was already started.
"""
if self._running:
return True # It was already started.
config = _get_monitor_config()
if config is None:
return False # Can't start without config.
_setup_logging(config)
# Late imports so no multiprocessing modules are even
# imported unless we are going to start the service.
from ._api import MonitorApi
from ._service import MonitorService
# Create the API first. It will register our callbacks, then
# we start the manager that will serve those callbacks.
self._api = MonitorApi()
# Now we can start our service.
self._service = MonitorService(config, self._api.manager)
self._running = True
return True # We started the service.
def stop(self) -> None:
"""Stop the monitor service."""
if not self._running:
return
self._api.stop()
self._api = None
self._service.stop()
self._service = None
self._running = False
def on_poll(self, event=None) -> None:
"""The QtPoll object polls us.
Probably we could get rid of polling by creating a thread that
blocks waiting for client messages. Then it posts those messages as
Qt Events. So the GUI doesn't block, but gracefully handles
incoming messages as Qt events.
"""
if self._running:
self._api.poll()
# Handle the event to say "keep polling us".
event.handled = True
def add_data(self, data) -> None:
"""Add data to the monitor service.
Caller should use this pattern:
if monitor:
monitor.add(...)
So no time wasted assembling the dict unless the monitor is running.
"""
if self._running:
self._api.add_napari_data(data)
def send_message(self, message: dict) -> None:
"""Send a message to shared memory clients.
Parameters
----------
message : dict
Post this message to clients.
"""
if self._running:
self._api.send_napari_message(message)
monitor = Monitor()
```
#### File: experimental/remote/_manager.py
```python
import logging
from ....utils.events import Event
from ...layerlist import LayerList
from ._commands import RemoteCommands
from ._messages import RemoteMessages
LOGGER = logging.getLogger("napari.monitor")
class RemoteManager:
"""Interacts with remote clients.
The monitor system itself purposely does not depend on anything else in
napari except for utils.events.
However RemoteManager and its children RemoteCommands and
RemoteMessages do very much depend on napari. RemoteCommands
executes commands sent to napari by clients. RemoteMessages
send message to remote clients, so as the current state
of the layers.
TODO_OCTREE: We expect this class to grow, if not we can nuke it
and let ViewerModel just create both of these directly.
Parameters
----------
layers : LayerList
The viewer's layers.
"""
def __init__(self, layers: LayerList):
self._commands = RemoteCommands(layers)
self._messages = RemoteMessages(layers)
def process_command(self, event: Event) -> None:
"""Process this command from a remote client.
Parameters
----------
event : Event
Contains the command to process.
"""
return self._commands.process_command(event)
def on_poll(self, _event: Event) -> None:
"""Send out messages when polled."""
self._messages.on_poll()
```
#### File: napari/napari/conftest.py
```python
import os
import sys
import warnings
from functools import partial
from typing import List
import numpy as np
import pytest
from napari import Viewer
from napari.components import LayerList
from napari.layers import Image, Labels, Points, Shapes, Vectors
from napari.plugins._builtins import (
napari_write_image,
napari_write_labels,
napari_write_points,
napari_write_shapes,
)
from napari.utils import io
from napari.utils.config import async_loading
try:
from skimage.data import image_fetcher
except ImportError:
from skimage.data import data_dir
class image_fetcher:
def fetch(data_name):
if data_name.startswith("data/"):
data_name = data_name[5:]
path = os.path.join(data_dir, data_name)
if not os.path.exists(path):
raise ValueError(
f"Legacy skimage image_fetcher cannot find file: {path}"
)
return path
def pytest_addoption(parser):
"""Add napari specific command line options.
--show-viewer
Show viewers during tests, they are hidden by default. Showing viewers
decreases test speed by around 20%.
--aysnc_only
Run only asynchronous tests, not sync ones.
Notes
-----
Due to the placement of this conftest.py file, you must specifically name
the napari folder such as "pytest napari --show-viewer"
"""
parser.addoption(
"--show-viewer",
action="store_true",
default=False,
help="don't show viewer during tests",
)
parser.addoption(
"--async_only",
action="store_true",
default=False,
help="run only asynchronous tests",
)
@pytest.fixture
def qtbot(qtbot):
"""A modified qtbot fixture that makes sure no widgets have been leaked."""
from qtpy.QtWidgets import QApplication
initial = QApplication.topLevelWidgets()
prior_exception = getattr(sys, 'last_value', None)
yield qtbot
# if an exception was raised during the test, we should just quit now and
# skip looking for leaked widgets.
if getattr(sys, 'last_value', None) is not prior_exception:
return
QApplication.processEvents()
leaks = set(QApplication.topLevelWidgets()).difference(initial)
# still not sure how to clean up some of the remaining vispy
# vispy.app.backends._qt.CanvasBackendDesktop widgets...
if any([n.__class__.__name__ != 'CanvasBackendDesktop' for n in leaks]):
raise AssertionError(f'Widgets leaked!: {leaks}')
if leaks:
warnings.warn(f'Widgets leaked!: {leaks}')
@pytest.fixture(scope="function")
def make_test_viewer(qtbot, request):
viewers: List[Viewer] = []
def actual_factory(*model_args, viewer_class=Viewer, **model_kwargs):
model_kwargs['show'] = model_kwargs.pop(
'show', request.config.getoption("--show-viewer")
)
viewer = viewer_class(*model_args, **model_kwargs)
viewers.append(viewer)
return viewer
yield actual_factory
for viewer in viewers:
viewer.close()
@pytest.fixture(
params=['image', 'labels', 'points', 'points-with-properties', 'shapes']
)
def layer_writer_and_data(request):
"""Fixture that supplies layer io utilities for tests.
Parameters
----------
request : _pytest.fixtures.SubRequest
The pytest request object
Returns
-------
tuple
``(writer, layer_data, extension, reader, Layer)``
- writer: a function that can write layerdata to a path
- layer_data: the layerdata tuple for this layer
- extension: an appropriate extension for this layer type
- reader: a function that can read this layer type from a path and
returns a ``(data, meta)`` tuple.
- Layer: the Layer class
"""
if request.param == 'image':
data = np.random.rand(20, 20)
Layer = Image
layer = Image(data)
writer = napari_write_image
extension = '.tif'
def reader(path):
return (io.imread(path), {}, 'image') # metadata
elif request.param == 'labels':
data = np.random.randint(0, 16000, (32, 32), 'uint64')
Layer = Labels
layer = Labels(data)
writer = napari_write_labels
extension = '.tif'
def reader(path):
return (io.imread(path), {}, 'labels') # metadata
elif request.param == 'points':
data = np.random.rand(20, 2)
Layer = Points
layer = Points(data)
writer = napari_write_points
extension = '.csv'
reader = partial(io.csv_to_layer_data, require_type='points')
elif request.param == 'points-with-properties':
data = np.random.rand(20, 2)
Layer = Points
layer = Points(data, properties={'values': np.random.rand(20)})
writer = napari_write_points
extension = '.csv'
reader = partial(io.csv_to_layer_data, require_type='points')
elif request.param == 'shapes':
np.random.seed(0)
data = [
np.random.rand(2, 2),
np.random.rand(2, 2),
np.random.rand(6, 2),
np.random.rand(6, 2),
np.random.rand(2, 2),
]
shape_type = ['ellipse', 'line', 'path', 'polygon', 'rectangle']
Layer = Shapes
layer = Shapes(data, shape_type=shape_type)
writer = napari_write_shapes
extension = '.csv'
reader = partial(io.csv_to_layer_data, require_type='shapes')
else:
return None, None, None, None, None
layer_data = layer.as_layer_data_tuple()
return writer, layer_data, extension, reader, Layer
@pytest.fixture
def layer_data_and_types():
"""Fixture that provides some layers and filenames
Returns
-------
tuple
``layers, layer_data, layer_types, filenames``
- layers: some image and points layers
- layer_data: same as above but in LayerData form
- layer_types: list of strings with type of layer
- filenames: the expected filenames with extensions for the layers.
"""
layers = [
Image(np.random.rand(20, 20), name='ex_img'),
Image(np.random.rand(20, 20)),
Points(np.random.rand(20, 2), name='ex_pts'),
Points(
np.random.rand(20, 2), properties={'values': np.random.rand(20)}
),
]
extensions = ['.tif', '.tif', '.csv', '.csv']
layer_data = [layer.as_layer_data_tuple() for layer in layers]
layer_types = [layer._type_string for layer in layers]
filenames = [layer.name + e for layer, e in zip(layers, extensions)]
return layers, layer_data, layer_types, filenames
@pytest.fixture(
params=[
'image',
'labels',
'points',
'shapes',
'shapes-rectangles',
'vectors',
]
)
def layer(request):
"""Parameterized fixture that supplies a layer for testing.
Parameters
----------
request : _pytest.fixtures.SubRequest
The pytest request object
Returns
-------
napari.layers.Layer
The desired napari Layer.
"""
np.random.seed(0)
if request.param == 'image':
data = np.random.rand(20, 20)
return Image(data)
elif request.param == 'labels':
data = np.random.randint(10, size=(20, 20))
return Labels(data)
elif request.param == 'points':
data = np.random.rand(20, 2)
return Points(data)
elif request.param == 'shapes':
data = [
np.random.rand(2, 2),
np.random.rand(2, 2),
np.random.rand(6, 2),
np.random.rand(6, 2),
np.random.rand(2, 2),
]
shape_type = ['ellipse', 'line', 'path', 'polygon', 'rectangle']
return Shapes(data, shape_type=shape_type)
elif request.param == 'shapes-rectangles':
data = np.random.rand(7, 4, 2)
return Shapes(data)
elif request.param == 'vectors':
data = np.random.rand(20, 2, 2)
return Vectors(data)
else:
return None
@pytest.fixture()
def layers():
"""Fixture that supplies a layers list for testing.
Returns
-------
napari.components.LayerList
The desired napari LayerList.
"""
np.random.seed(0)
list_of_layers = [
Image(np.random.rand(20, 20)),
Labels(np.random.randint(10, size=(20, 2))),
Points(np.random.rand(20, 2)),
Shapes(np.random.rand(10, 2, 2)),
Vectors(np.random.rand(10, 2, 2)),
]
return LayerList(list_of_layers)
@pytest.fixture
def two_pngs():
return [image_fetcher.fetch(f'data/{n}.png') for n in ('moon', 'camera')]
@pytest.fixture
def rgb_png():
return [image_fetcher.fetch('data/astronaut.png')]
@pytest.fixture
def single_png():
return [image_fetcher.fetch('data/camera.png')]
@pytest.fixture
def irregular_images():
return [image_fetcher.fetch(f'data/{n}.png') for n in ('camera', 'coins')]
@pytest.fixture
def single_tiff():
return [image_fetcher.fetch('data/multipage.tif')]
# Currently we cannot run async and async in the invocation of pytest
# because we get a segfault for unknown reasons. So for now:
# "pytest" runs sync_only
# "pytest napari --async_only" runs async only
@pytest.fixture(scope="session", autouse=True)
def configure_loading(request):
"""Configure async/async loading."""
async_mode = request.config.getoption("--async_only")
if async_mode:
# Late import so we don't import experimental code unless using it.
from napari.components.experimental.chunk import synchronous_loading
with synchronous_loading(False):
yield
else:
yield # Sync so do nothing.
def _is_async_mode() -> bool:
"""Return True if we are currently loading chunks asynchronously
Return
------
bool
True if we are currently loading chunks asynchronously.
"""
if not async_loading:
return False # Not enabled at all.
else:
# Late import so we don't import experimental code unless using it.
from napari.components.experimental.chunk import chunk_loader
return not chunk_loader.force_synchronous
@pytest.fixture(autouse=True)
def skip_sync_only(request):
"""Skip async_only tests if running async."""
sync_only = request.node.get_closest_marker('sync_only')
if _is_async_mode() and sync_only:
pytest.skip("running with --async_only")
@pytest.fixture(autouse=True)
def skip_async_only(request):
"""Skip async_only tests if running sync."""
async_only = request.node.get_closest_marker('async_only')
if not _is_async_mode() and async_only:
pytest.skip("not running with --async_only")
# _PYTEST_RAISE=1 will prevent pytest from handling exceptions.
# Use with a debugger that's set to break on "unhandled exceptions".
# https://github.com/pytest-dev/pytest/issues/7409
if os.getenv('_PYTEST_RAISE', "0") != "0":
@pytest.hookimpl(tryfirst=True)
def pytest_exception_interact(call):
raise call.excinfo.value
@pytest.hookimpl(tryfirst=True)
def pytest_internalerror(excinfo):
raise excinfo.value
def pytest_collection_modifyitems(session, config, items):
put_at_end = ('test_trace_on_start',)
at_end = []
for i, item in enumerate(items):
if item.name in put_at_end:
at_end.append(items.pop(i))
items.extend([x for _, x in sorted(zip(put_at_end, at_end))])
```
#### File: image/experimental/_chunked_image_loader.py
```python
import logging
from typing import Optional
from .._image_loader import ImageLoader
from ._chunked_slice_data import ChunkedSliceData
from ._image_location import ImageLocation
LOGGER = logging.getLogger("napari.loader")
class ChunkedImageLoader(ImageLoader):
"""Load images using the Chunkloader: synchronously or asynchronously.
Attributes
----------
_current : Optional[ImageLocation]
The location we are currently loading or showing.
"""
def __init__(self):
# We're showing nothing to start.
self._current: Optional[ImageLocation] = None
def load(self, data: ChunkedSliceData) -> bool:
"""Load this ChunkedSliceData (sync or async).
Parameters
----------
data : ChunkedSliceData
The data to load
Return
------
bool
True if load happened synchronously.
"""
location = ImageLocation(data.layer, data.indices)
LOGGER.debug("ChunkedImageLoader.load")
if self._current is not None and self._current == location:
# We are already showing this slice, or its being loaded
# asynchronously.
return False
# Now "showing" this slice, even if it hasn't loaded yet.
self._current = location
if data.load_chunks():
return True # Load was sync, load is done.
return False # Load was async, so not loaded yet.
def match(self, data: ChunkedSliceData) -> bool:
"""Return True if slice data matches what we are loading.
Parameters
----------
data : ChunkedSliceData
Does this data match what we are loading?
Return
------
bool
Return True if data matches.
"""
location = data.request.location
if self._current == location:
LOGGER.debug("ChunkedImageLoader.match: accept %s", location)
return True
# Data was for a slice we are no longer looking at.
LOGGER.debug("ChunkedImageLoader.match: reject %s", location)
return False
```
#### File: image/experimental/octree_level.py
```python
import logging
import math
from typing import Dict, List, Optional
import numpy as np
from ....types import ArrayLike
from .octree_chunk import OctreeChunk, OctreeChunkGeom, OctreeLocation
from .octree_util import OctreeMetadata
LOGGER = logging.getLogger("napari.octree")
class OctreeLevelInfo:
"""Information about one level of the octree.
This should be a NamedTuple.
Parameters
----------
meta : OctreeMetadata
Information about the entire octree.
level_index : int
The index of this level within the whole tree.
"""
def __init__(self, meta: OctreeMetadata, level_index: int):
self.meta = meta
self.level_index = level_index
self.scale = 2 ** self.level_index
base = meta.base_shape
self.image_shape = (
int(base[0] / self.scale),
int(base[1] / self.scale),
)
tile_size = meta.tile_size
scaled_size = tile_size * self.scale
self.rows = math.ceil(base[0] / scaled_size)
self.cols = math.ceil(base[1] / scaled_size)
self.shape_in_tiles = [self.rows, self.cols]
self.num_tiles = self.rows * self.cols
class OctreeLevel:
"""One level of the octree.
An OctreeLevel is "sparse" in that it only contains a dict of
OctreeChunks for the portion of the octree that is currently being
rendered. So even if the full level contains hundreds of millions of
chunks, this class only contains a few dozens OctreeChunks.
This was necessary because even having a null reference for every
OctreeChunk in a level would use too much space and be too slow to
construct.
Parameters
----------
slice_id : int
The id of the OctreeSlice we are in.
data : ArrayLike
The data for this level.
meta : OctreeMetadata
The base image shape and other details.
level_index : int
Index of this specific level (0 is full resolution).
Attributes
----------
info : OctreeLevelInfo
Metadata about this level.
_tiles : Dict[tuple, OctreeChunk]
Maps (row, col) tuple to the OctreeChunk at that location.
"""
def __init__(
self,
slice_id: int,
data: ArrayLike,
meta: OctreeMetadata,
level_index: int,
):
self.slice_id = slice_id
self.data = data
self.info = OctreeLevelInfo(meta, level_index)
self._tiles: Dict[tuple, OctreeChunk] = {}
def get_chunk(
self, row: int, col: int, create=False
) -> Optional[OctreeChunk]:
"""Return the OctreeChunk at this location if it exists.
If create is True, an OctreeChunk will be created if one
does not exist at this location.
Parameters
----------
row : int
The row in the level.
col : int
The column in the level.
create : bool
If True, create the OctreeChunk if it does not exist.
Return
------
Optional[OctreeChunk]
The OctreeChunk if one existed or we just created it.
"""
try:
return self._tiles[(row, col)]
except KeyError:
if not create:
return None # It didn't exist so we're done.
rows, cols = self.info.shape_in_tiles
if row < 0 or row >= rows or col < 0 or col >= cols:
# The coordinates are not in the level. Not an exception because
# callers might be trying to get children just over the edge
# for non-power-of-two base images.
return None
# Create a chunk at this location and return it.
octree_chunk = self._create_chunk(row, col)
self._tiles[(row, col)] = octree_chunk
return octree_chunk
def _create_chunk(self, row: int, col: int) -> OctreeChunk:
"""Create a new OctreeChunk for this location in the level.
Parameters
----------
row : int
The row in the level.
col : int
The column in the level.
Return
------
OctreeChunk
The newly created chunk.
"""
level_index = self.info.level_index
meta = self.info.meta
layer_ref = meta.layer_ref
location = OctreeLocation(
layer_ref, self.slice_id, level_index, row, col
)
scale = self.info.scale
tile_size = self.info.meta.tile_size
scaled_size = tile_size * scale
pos = np.array(
[col * scaled_size, row * scaled_size], dtype=np.float32
)
data = self._get_data(row, col)
# Create OctreeChunkGeom used by the visual for rendering this
# chunk. Size it based on the base image pixels, not based on the
# data in this level, so it's exact.
base = np.array(meta.base_shape[::-1], dtype=np.float)
remain = base - pos
size = np.minimum(remain, [scaled_size, scaled_size])
geom = OctreeChunkGeom(pos, size)
# Return the newly created chunk.
return OctreeChunk(data, location, geom)
def _get_data(self, row: int, col: int) -> ArrayLike:
"""Get the chunk's data at this location.
Parameters
----------
row : int
The row coordinate.
col : int
The column coordinate.
Return
------
ArrayLike
The data at this location.
"""
tile_size = self.info.meta.tile_size
array_slice = (
slice(row * tile_size, (row + 1) * tile_size),
slice(col * tile_size, (col + 1) * tile_size),
)
if self.data.ndim == 3:
array_slice += (slice(None),) # Add the colors.
return self.data[array_slice]
def log_levels(levels: List[OctreeLevel], start_level: int = 0) -> None:
"""Log the dimensions of each level nicely.
We take start_level so we can log the "extra" levels we created but
with their correct level numbers.
Parameters
----------
levels : List[OctreeLevel]
Print information about these levels.
start_level : int
Start the indexing at this number, shift the indexes up.
"""
from ...._vendor.experimental.humanize.src.humanize import intword
def _dim_str(dim: tuple) -> None:
return f"({dim[0]}, {dim[1]}) = {intword(dim[0] * dim[1])}"
for index, level in enumerate(levels):
level_index = start_level + index
image_str = _dim_str(level.info.image_shape)
tiles_str = _dim_str(level.info.shape_in_tiles)
LOGGER.info(
"Level %d: %s pixels -> %s tiles",
level_index,
image_str,
tiles_str,
)
```
#### File: napari/plugins/__init__.py
```python
import os
import sys
from inspect import isclass, signature
from typing import (
TYPE_CHECKING,
Callable,
Dict,
List,
Sequence,
Tuple,
Type,
Union,
)
from warnings import warn
from magicgui import magicgui
from napari_plugin_engine import HookImplementation, PluginManager
from ..types import AugmentedFunction, AugmentedWidget
from ..utils._appdirs import user_site_packages
from ..utils.misc import camel_to_spaces, running_as_bundled_app
from . import _builtins, hook_specifications
if sys.platform.startswith('linux') and running_as_bundled_app():
sys.path.append(user_site_packages())
if TYPE_CHECKING:
from qtpy.QtWidgets import QWidget
# the main plugin manager instance for the `napari` plugin namespace.
plugin_manager = PluginManager(
'napari', discover_entry_point='napari.plugin', discover_prefix='napari_'
)
with plugin_manager.discovery_blocked():
plugin_manager.add_hookspecs(hook_specifications)
plugin_manager.register(_builtins, name='builtins')
dock_widgets: Dict[Tuple[str, str], Tuple[Type['QWidget'], dict]] = dict()
function_widgets: Dict[Tuple[str, str], Tuple[Callable, dict, dict]] = dict()
def register_dock_widget(
args: Union[AugmentedWidget, List[AugmentedWidget]],
hookimpl: HookImplementation,
):
from qtpy.QtWidgets import QWidget
plugin_name = hookimpl.plugin_name
hook_name = '`napari_experimental_provide_dock_widget`'
for arg in args if isinstance(args, list) else [args]:
if isinstance(arg, tuple):
if not arg:
warn(
f'Plugin {plugin_name!r} provided an invalid tuple to '
f'{hook_name}. Skipping'
)
continue
_cls = arg[0]
kwargs = arg[1] if len(arg) > 1 else {}
else:
_cls, kwargs = (arg, {})
if not (isclass(_cls) and issubclass(_cls, QWidget)):
warn(
f'Plugin {plugin_name!r} provided an invalid '
f'widget type to {hook_name}: {_cls!r}. Widget ignored.'
)
continue
if not isinstance(kwargs, dict):
warn(
f'Plugin {plugin_name!r} provided invalid kwargs '
f'to {hook_name} for class {_cls.__name__}. Widget ignored.'
)
continue
# Get widget name
name = str(kwargs.get('name', '')) or camel_to_spaces(_cls.__name__)
key = (plugin_name, name)
if key in dock_widgets:
warn(
"Plugin '{}' has already registered a dock widget '{}' "
'which has now been overwritten'.format(*key)
)
dock_widgets[key] = (_cls, kwargs)
magicgui_sig = {
name
for name, p in signature(magicgui).parameters.items()
if p.kind is p.KEYWORD_ONLY
}
def register_function_widget(
args: Union[AugmentedFunction, List[AugmentedFunction]],
hookimpl: HookImplementation,
):
plugin_name = hookimpl.plugin_name
hook_name = '`napari_experimental_provide_function_widget`'
for arg in args if isinstance(args, list) else [args]:
if isinstance(arg, tuple):
if not arg:
warn(
f'Plugin {plugin_name!r} provided an invalid tuple to '
f'{hook_name}. Skipping'
)
continue
func = arg[0]
magic_kwargs = arg[1] if len(arg) > 1 else {}
dock_kwargs = arg[2] if len(arg) > 2 else {} # type: ignore
else:
func, magic_kwargs, dock_kwargs = (arg, {}, {})
if not callable(func):
msg = (
f'Plugin {plugin_name!r} provided a non-callable type to '
f'{hook_name}: {type(func)!r}. Function widget ignored.'
)
if isinstance(func, tuple):
msg += (
" To provide multiple function widgets please use "
"a LIST of tuples"
)
warn(msg)
continue
if not isinstance(magic_kwargs, dict):
warn(
f'Plugin {plugin_name!r} provided invalid type '
f'({type(magic_kwargs)}) for 2nd argument to {hook_name} '
f'for function {func.__name__!r}. Widget ignored.'
)
continue
valid_magic_kwargs = set(signature(func).parameters) | magicgui_sig
extra_kwargs = set(magic_kwargs) - valid_magic_kwargs
if extra_kwargs:
warn(
f'Plugin {plugin_name!r} provided invalid magicgui kwargs '
f'to {hook_name} for function {func.__name__!r}: '
f'{extra_kwargs}. Valid kwargs are {valid_magic_kwargs}.'
'Widget ignored.'
)
continue
if not isinstance(dock_kwargs, dict):
warn(
f'Plugin {plugin_name!r} provided invalid type '
f'({type(dock_kwargs)}) for 3nd argument to {hook_name} '
f'for function {func.__name__!r}. Widget ignored.'
)
continue
# Get function name
name = str(dock_kwargs.get('name', '')) or func.__name__.replace(
'_', ' '
)
key = (plugin_name, name)
if key in function_widgets:
warn(
"Plugin '{}' has already registered a function widget '{}' "
'which has now been overwritten'.format(*key)
)
function_widgets[key] = (func, magic_kwargs, dock_kwargs)
plugin_manager.hook.napari_experimental_provide_dock_widget.call_historic(
result_callback=register_dock_widget, with_impl=True
)
plugin_manager.hook.napari_experimental_provide_function_widget.call_historic(
result_callback=register_function_widget, with_impl=True
)
#: Template to use for namespacing a plugin item in the menu bar
menu_item_template = '{}: {}'
__all__ = ["PluginManager", "plugin_manager", 'menu_item_template']
``` |
{
"source": "10yogi/Django-Csong",
"score": 2
} |
#### File: website/music/models.py
```python
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.db import models
# Create your models here.
class Album(models.Model):
artist = models.CharField(max_length=250)
album_title = models.CharField(max_length=500)
genre = models.CharField(max_length=100)
album_logo = models.FileField()
def get_absolute_url(self):
return reverse("detail", kwargs = { "pk": self.pk})
def __str__(self):
return self.album_title+' -- '+self.artist
class Song(models.Model):
album = models.ForeignKey(Album, on_delete = models.CASCADE)
file_type = models.CharField(max_length= 10)
song_title = models.CharField(max_length= 250)
is_favorite = models.BooleanField(default = False)
def __str__(self):
return self.song_title;
``` |
{
"source": "10-zin/Synthesizer",
"score": 3
} |
#### File: synth/synthesizer/modules.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
__author__ = "<NAME>, <NAME>"
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1e9)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output, attn
# template for attention synthesizers
class DenseAttention(nn.Module):
def __init__(self, max_seq_len, d_k, d_hid = 64, attn_dropout = 0.1):
#d_hid = 8*(128/8)/2
super(DenseAttention, self).__init__()
self.w_1 = nn.Linear(d_k, d_hid)
self.w_2 = nn.Linear(d_hid, max_seq_len)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, v, len_q, mask=None):
# b x n x lq x dq -> b x n x lq x lq #
dense_attn = self.w_2(self.relu(self.w_1(q)))[:,:,:,:len_q]
# print('Attn: ', dense_attn.shape)
# print('Mask: ', mask.shape)
# print('V: ', v.shape)
if mask is not None:
dense_attn = dense_attn.masked_fill(mask == 0, -1e9)
dense_attn = self.dropout(F.softmax(dense_attn, dim=-1))
output = torch.matmul(dense_attn, v)
return output, dense_attn
class FactorizedDenseAttention(nn.Module):
def __init__(self, max_seq_len, d_k, f, attn_dropout = 0.1):
#d_hid = 8*(128/8)/2
super(DenseAttention, self).__init__()
self.f = f
self.max_seq_len = max_seq_len
self.f_a = nn.Linear(d_k, f)
self.f_b = nn.Linear(d_k, max_seq_len/f)
# self.relu = nn.ReLU()
self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, v, len_q, mask=None, factorize=False):
h_a = torch.repeat_interleave(self.f_a(q), self.max_seq_len/self.f, -1)[:,:,:,:len_q]
h_b = torch.repeat_interleave(self.f_b(q), self.f, -1)[:,:,:,:len_q]
dense_attn = torch.matmul(h_a, h_b.transpose(2, 3))
if mask is not None:
dense_attn = dense_attn.masked_fill(mask == 0, -1e9)
dense_attn = self.dropout(F.softmax(dense_attn, dim=-1))
output = torch.matmul(dense_attn, v)
return output, dense_attn
class RandomAttention(nn.Module):
def __init__(self, batch_size, n_head, max_seq_len, attn_dropout = 0.1):
super(RandomAttention, self).__init__()
#device = torch.device("GPU"),
self.random_attn = torch.randn(batch_size, n_head, max_seq_len, max_seq_len, requires_grad = True)
self.dropout = nn.Dropout(attn_dropout)
def forward(self, v, len_q, mask=None):
# b x n x max_len x max_len -> b x n x lq x lq
random_attn = self.random_attn[:mask.shape[0],:,:len_q,:len_q]
random_attn = random_attn.to(torch.device('cuda' if mask.is_cuda else 'cpu'))
if mask is not None:
random_attn = random_attn.masked_fill(mask == 0, -1e9)
random_attn = self.dropout(F.softmax(random_attn, dim=-1))
output = torch.matmul(random_attn, v)
return output, random_attn
class FactorizedRandomAttention(nn.Module):
def __init__(self, batch_size, n_head, f, max_seq_len, attn_dropout = 0.1):
super(RandomAttention, self).__init__()
#device = torch.device("GPU"),
self.random_attn_1 = torch.randn(batch_size, n_head, max_seq_len, f, requires_grad = True)
self.random_attn_2 = torch.randn(batch_size, n_head, f, max_seq_len, requires_grad = True)
self.dropout = nn.Dropout(attn_dropout)
def forward(self, v, len_q, mask=None, factorize=False):
# b x n x max_len x max_len -> b x n x lq x lq #[:,:,:len_q,:len_q]
random_attn = torch.matmul(self.random_attn_1, self.random_attn_2)[:mask.shape[0],:,:len_q,:len_q]
if mask is not None:
random_attn = random_attn.masked_fill(mask == 0, -1e9)
random_attn = self.dropout(F.softmax(random_attn, dim=-1))
output = torch.matmul(random_attn, v)
return output, random_attn
``` |
{
"source": "10zinten/tibetan-spellchcker",
"score": 3
} |
#### File: app/routes/tokens.py
```python
from app.schemas import Text, Token
from botok.tokenizers.wordtokenizer import WordTokenizer
from fastapi import APIRouter
from typing import List
router = APIRouter()
@router.post("/token", response_model=List[Token])
def tokenize_text(text: Text):
word_tokenizer = WordTokenizer()
tokens = word_tokenizer.tokenize(text.content)
token_list = []
for token in tokens:
cur_token = Token(form=token.text, pos=token.pos)
token_list.append(cur_token)
return token_list
``` |
{
"source": "1103928458/meiduo_drf",
"score": 2
} |
#### File: apps/carts/views.py
```python
from django.shortcuts import render
from django.views import View
import json, pickle, base64
from django import http
from django_redis import get_redis_connection
from goods.models import SKU
from meiduo_mall.utils.response_code import RETCODE
class CartsView(View):
"""购物车"""
def post(self, request):
# 购物车商品添加
# 1.接收请求体中的sku_id和count
json_dict = json.loads(request.body.decode())
sku_id = json_dict.get("sku_id")
count = json_dict.get("count")
selected = json_dict.get("selected", True)
# 2.校验
if all([sku_id, count]) is False:
return http.HttpResponseForbidden("缺少必传参数")
try:
sku = SKU.objects.get(id=sku_id)
except SKU.DoesNotExist:
return http.HttpResponseForbidden("sku_id无效")
if isinstance(count, int) is False:
return http.HttpResponseForbidden("类型有误")
# 3.获取当前请求对像中的user
user = request.user
# 4.判断用户是否登录
if user.is_authenticated:
# 5.如果是登录用户就操作redis中的数据
# """
# hash:{sku_id_1:1,sku_id_16:2}
# set:{sku_id_1}
# """
# 创建redis链接对象
redis_conn = get_redis_connection("carts")
pl = redis_conn.pipeline()
# 向hash中添加sku_id及count hincrby可以做增量没有就新建
pl.hincrby('carts_%s' % user.id, sku_id, count)
# 把当前商品的sku_id添加到set集合 sadd
if selected:
pl.sadd('selected_%s' % user.id, sku_id)
pl.execute()
# 响应
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': '添加购物车成功'})
else:
# 6.如果是未登录用户就操作cookie数据
"""
{
sku_id_1:{'count':1,'selected':True},
sku_id_2:{'count':2,'selected':True},
}
"""
# 先获取cookie中的购物车数据
cart_str = request.COOKIES.get("carts")
# 判断cookie中是否有购物车数据
if cart_str:
# 如果有数据,应该将字符串转回到字典
# 先将字符串转换成bytes类型
cart_str_bytes = cart_str.encode()
# 使用base64 把bytes字符串转换成bytes类型
cart_bytes = base64.b64decode(cart_str_bytes)
# 使用pickle模型,把bytes转换成字典
cart_dict = pickle.loads(cart_bytes)
else:
# 如果没有数据,就准备一个字典用来装购物车数据
cart_dict = {}
# 判断本次添加的商品是否已存在,已存在就要做增量计算
if sku_id in cart_dict:
# 获取存在商品的原有的count
origin_count = cart_dict[sku_id]['count']
count += origin_count
# 如果是一个新商品就直接添加到字典中
cart_dict[sku_id] = {
'count': count,
'selected': selected,
}
# 把cookie购物车字典转换成字符串
cart_str = base64.b64decode(pickle.dumps(cart_dict)).decode()
# 创建响应对象
response = http.JsonResponse({"code": RETCODE.OK, "errmsg": "添加购物车成功"})
# 设置cookie
response.set_cookie("carts", cart_str)
# 响应
return response
def get(self,request):
"""展示购物车"""
# 判断用户是否登录
user = request.user
if user.is_authenticated:
# 登录用户就操作redis购物车数据
# 把redis中hash和set数据取出来
redis_conn = get_redis_connection("carts")
# 获取redis中的hash所有字典数据
redis_dict = redis_conn.hgetall("carts_%s" % user.id)
# 获取set所有集合数据
selected_ids = redis_conn.smembers("selected_%s" % user.id)
# 把数据格式转换成和cookie购物车数据格式一致,方便后期统一处理
#定义一个字典变量,用来装redis购物车的所有数据
cart_dict = {}
# 遍历hash字典数据 向cart_dict中添加数据
for sku_id_bytes in redis_dict:
cart_dict[int(sku_id_bytes)] = {
'count':int(redis_dict[sku_id_bytes]),
'selected':sku_id_bytes in selected_ids
}
else:
# 未登录用户就操作cookie购物车数据
# 获取cookie数据
cart_str = request.COOKIES.get('carts')
if cart_str:
cart_dict = pickle.loads(base64.b64decode(cart_str.encode()))
else:
# 如果当前购物车没有数据就提前响应,直接显示购物车
return render(request,"cart.html")
# 通过cart_dict中的key sku_id获取到sku模型
sku_qs = SKU.objects.filter(id__in=cart_dict.keys())
sku_list = [] # 用来包装每一个购物车商品字典
# 将sku模型和商品的其他数据包装到同一个字典
for sku_model in sku_qs:
sku_list.append({
'id':sku_model.id,
'name':sku_model.name,
'price':str(sku_model.price),
'default_image_url':sku_model.default_image.url,
'selected':str(cart_dict[sku_model.id]['selected']),
'count':cart_dict[sku_model.id]['count'],
'amount':str(sku_model.price * cart_dict[sku_model.id]['count'])
})
return render(request,'cart.html',{"cart_skus":sku_list})
def put(self,request):
"""修改购物车"""
# 接收 sku_id ,count,selected
json_dict = json.loads(request.body.decode())
sku_id = json_dict.get('sku_id')
count = json_dict.get('count')
selected = json_dict.get('selected')
# 校验
if all([sku_id,count]) is False:
return http.HttpResponseForbidden("缺少必传参数")
try:
sku_model = SKU.objects.get(id=sku_id)
except SKU.DoesNotExist:
return http.HttpResponseForbidden("sku_id不存在")
try:
count=int(count)
except Exception:
return http.HttpResponseForbidden("类型有误")
if isinstance(selected,bool):
return http.HttpResponseForbidden("类型有误")
# 判断是否登录
user = request.user
if user.is_authenticated:
# 登录用户操作redis
redis_conn = get_redis_connection("carts")
pl = redis_conn.pipeline()
pl.hset('carts_%s'%user.id,sku_id,count)
if selected:
pl.sadd('selected_%s'%user.id,sku_id)
else:
pl.srem('selected_%s'%user.id,sku_id)
pl.execute()
# 包装一个当前修改的商品新数据字典
cart_sku = {
'id': sku_model.id,
'name': sku_model.name,
'price': str(sku_model.price), # 单价是Decimal类型前端解析可能会出错所以转换成str
'default_image_url': sku_model.default_image.url,
'selected': selected, # 在修改时selected就不要转成字符串类型
'count': count,
'amount': str(sku_model.price * count)
} # 小计
# 响应
reponse = http.JsonResponse({'code':RETCODE.OK,'errmsg':'修改购物车数据成功','cart_sku': cart_sku})
return reponse
else:
# 未登录用户操作cookie
# 获取cookie数据
cart_str = request.COOKIES.get('carts')
if cart_str:
# 把cart_str转换成字典
cart_dict = pickle.loads(base64.b64decode(cart_str.encode()))
else:
return http.JsonResponse({'code':RETCODE.OK,"errmsg":'cookie数据没有获取到'})
# 新数据替换旧数据
cart_dict[sku_id] = {
'count':count,
'selected':selected
}
# 把字典转换成字符串
cart_str = base64.b64decode(pickle.dumps(cart_str)).decode()
cart_sku = {
'id': sku_model.id,
'name': sku_model.name,
'price': str(sku_model.price), # 单价是Decimal类型前端解析可能会出错所以转换成str
'default_image_url': sku_model.default_image.url,
'selected': selected, # 在修改时selected就不要转成字符串类型
'count': count,
'amount': str(sku_model.price * count)
} # 小计
# 设置cookie
reponse = http.JsonResponse({'code':RETCODE.OK,'errmsg':'修改购物车数据成功','cart_sku': cart_sku})
reponse.set_cookie("carts",cart_str)
# 响应
return reponse
pass
def delete(self,request):
"""删除购物车"""
# 接收请求体中的sku_id
json_dict = json.loads(request.body.decode())
sku_id = json_dict.get('sku_id')
# 校验
try:
sku = SKU.objects.get(id=sku_id)
except SKU.DoesNotExist:
return http.HttpResponseForbidden('sku_id无效')
# 判断登录
user = request.user
if user.is_authenticated:
# 登录用户操作redis数据
redis_conn = get_redis_connection('carts')
pl = redis_conn.pipeline()
# 删除hash中对应的键值对
pl.hdel("carts_%s" % user.id,sku_id)
# 把当前sku_id从set集合中移除
pl.srem("selected_%s" % user.id,sku_id)
pl.execute()
# 响应
return http.JsonResponse({"code":RETCODE.DBERR,"errmsg":"删除购物车成功"})
else:
# 未登录操作cookie数据
# 现获取cookie数据
cart_str = request.COOKIES.get('carts')
# 判断是否获取到cookie数据
if cart_str:
# 获取到后将字符串转成字典
cart_dict = pickle.loads(base64.b64decode(cart_str.encode()))
else:
# 没有获取到提前响应
return http.JsonResponse({"code":RETCODE.DBERR,"errmsg":"cookie数据没有获取到"})
# 删除指定sku_id对应的键值对
if sku_id in cart_dict:
del cart_dict[sku_id]
response = http.JsonResponse({"code":RETCODE.DBERR,"errmsg":"删除购物车成功"})
# 判断字典是否为空:如果为空就将cookie删除
if not cart_dict:
response.delete_cookie('carts')
return response
# 字典转成字符串
cart_str = base64.b64decode(pickle.dumps(cart_dict).decode())
# 设置cookie
response.set_cookie("carts",cart_str)
# 响应
return response
pass
class CartsSelectedAllView(View):
"""购物车全选"""
def put(self,request):
# 接收请求体数据
json_dict = json.loads(request.body.decode())
selected = json_dict.get("selected")
# 校验
if isinstance(selected,bool) is False:
return http.HttpResponseForbidden("类型有误")
# 判断登录
user = request.user
if user.is_authenticated:
# 登录操作redis
redis_conn = get_redis_connection("carts")
# 获取hash数据
redis_dict = redis_conn.hgetall('carts_%s'% user.id)
# 如果全选就将hash中的所有key添加到set集合中
if selected:
redis_conn.sadd('selected_%s'%user.id,*redis_conn.keys())
# 如果取消全选将set删除
else:
# redis_conn.srem('selected_%s'%user.id,*redis_conn.keys())
redis_conn.delete('selected_%s'%user.id)
# 响应
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK'})
else:
# 未登录操作cookie
# 先获取cookie购物车数据
cart_str = request.COOKIES.get('carts')
# 判断是否获取到
if cart_str:
# 如果获取到就把字符串转换成字典
cart_dict = pickle.loads(base64.b64decode(cart_str.encode()))
else:
# 如果没有获取到就提前响应
return http.JsonResponse({'code': RETCODE.DBERR, 'errmsg': 'cookie没有获取到'})
# 遍历cookie购物车大字典,将内部的每一个selected修改为Ture 或 False
for sku_id in cart_dict:
cart_dict[sku_id]['select'] = selected
# 将字典转换成字符串
cart_str = base64.b64decode(pickle.dumps(cart_dict)).decode()
# 设置cookie
response = http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK'})
response.set_cookie("carts",cart_str)
# 响应
return response
class CartsSimpleView(View):
"""商品页面右上角简单购物车"""
def get(self,request):
user = request.user
if user.is_authenticated:
# 登录用户就操作redis购物车数据
# 把redis中hash和set数据取出来
redis_conn = get_redis_connection("carts")
# 获取redis中的hash所有字典数据
redis_dict = redis_conn.hgetall("carts_%s" % user.id)
# 获取set所有集合数据
selected_ids = redis_conn.smembers("selected_%s" % user.id)
# 把数据格式转换成和cookie购物车数据格式一致,方便后期统一处理
# 定义一个字典变量,用来装redis购物车的所有数据
cart_dict = {}
# 遍历hash字典数据 向cart_dict中添加数据
for sku_id_bytes in redis_dict:
cart_dict[int(sku_id_bytes)] = {
'count': int(redis_dict[sku_id_bytes]),
'selected': sku_id_bytes in selected_ids
}
else:
# 未登录用户就操作cookie购物车数据
# 获取cookie数据
cart_str = request.COOKIES.get('carts')
if cart_str:
cart_dict = pickle.loads(base64.b64decode(cart_str.encode()))
else:
# 如果当前购物车没有数据就提前响应,直接显示购物车
return render(request, "cart.html")
# 通过cart_dict中的key sku_id获取到sku模型
sku_qs = SKU.objects.filter(id__in=cart_dict.keys())
sku_list = [] # 用来包装每一个购物车商品字典
# 将sku模型和商品的其他数据包装到同一个字典
for sku_model in sku_qs:
sku_list.append({
'id': sku_model.id,
'name': sku_model.name,
# 'price': str(sku_model.price),
'default_image_url': sku_model.default_image.url,
# 'selected': str(cart_dict[sku_model.id]['selected']),
'count': cart_dict[sku_model.id]['count'],
# 'amount': str(sku_model.price * cart_dict[sku_model.id]['count'])
})
return http.JsonResponse({'code':RETCODE.OK,'errmsg':'ok','cart_skus':sku_list})
```
#### File: meiduo_admin/serializers/user_login_serializers.py
```python
from rest_framework import serializers
from django.contrib.auth import authenticate
from rest_framework_jwt.utils import jwt_encode_handler,\
jwt_payload_handler
# 定义一个序列化器,完成传统段身份认证逻辑
class UserLoginSerializer(serializers.Serializer):
# 类属性,定义除校验段字段
username = serializers.CharField(max_length=20)
password = serializers.CharField()
def validate(self, attrs):
# attrs前端传来的所有数据
# username = attrs.get("username")
# password = attrs.get("password")
# 1、根据用户名和密码完成身份认证
# authenticate(username=username, password=password)
user = authenticate(**attrs)
# 2、认证失败,数据校验失败
if not user:
raise serializers.ValidationError("用户认证失败!")
# 3、认证成功,要签发token
# 3.1、获得payload
payload = jwt_payload_handler(user)
# 3.2、获得token
jwt_token = jwt_encode_handler(payload)
return {
"user": user,
"jwt_token": jwt_token
}
```
#### File: meiduo_mall/scripts/regenerate_detail_html.py
```python
# from django.shortcuts import render
# import os
# from django.conf import settings
# from goods.models import SKU
# from contents.utils import get_categories
# from goods.utils import get_breadcrumb
#
# def generate_static_sku_detail_html(sku_id):
#
# sku = SKU.objects.get(id=sku_id)
#
# category = sku.category # 三级类别数据
# spu = sku.spu
#
# """1.准备当前商品的规格选项列表 [8, 11]"""
# # 获取出当前正显示的sku商品的规格选项id列表
# current_sku_spec_qs = sku.specs.order_by('spec_id')
# current_sku_option_ids = [] # [8, 11]
# for current_sku_spec in current_sku_spec_qs:
# current_sku_option_ids.append(current_sku_spec.option_id)
#
# """2.构造规格选择仓库
# {(8, 11): 3, (8, 12): 4, (9, 11): 5, (9, 12): 6, (10, 11): 7, (10, 12): 8}
# """
# # 构造规格选择仓库
# temp_sku_qs = spu.sku_set.all() # 获取当前spu下的所有sku
# # 选项仓库大字典
# spec_sku_map = {} # {(8, 11): 3, (8, 12): 4, (9, 11): 5, (9, 12): 6, (10, 11): 7, (10, 12): 8}
# for temp_sku in temp_sku_qs:
# # 查询每一个sku的规格数据
# temp_spec_qs = temp_sku.specs.order_by('spec_id')
# temp_sku_option_ids = [] # 用来包装每个sku的选项值
# for temp_spec in temp_spec_qs:
# temp_sku_option_ids.append(temp_spec.option_id)
# spec_sku_map[tuple(temp_sku_option_ids)] = temp_sku.id
#
# """3.组合 并找到sku_id 绑定"""
# spu_spec_qs = spu.specs.order_by('id') # 获取当前spu中的所有规格
#
# for index, spec in enumerate(spu_spec_qs): # 遍历当前所有的规格
# spec_option_qs = spec.options.all() # 获取当前规格中的所有选项
# temp_option_ids = current_sku_option_ids[:] # 复制一个新的当前显示商品的规格选项列表
# for option in spec_option_qs: # 遍历当前规格下的所有选项
# temp_option_ids[index] = option.id # [8, 12]
# option.sku_id = spec_sku_map.get(tuple(temp_option_ids)) # 给每个选项对象绑定下他sku_id属性
#
# # spec.spec_options = spec_option_qs # 把规格下的所有选项绑定到规格对象的spec_options属性上
#
# context = {
# 'categories': get_categories(), # 商品分类
# 'breadcrumb': get_breadcrumb(category), # 面包屑导航
# 'sku': sku, # 当前要显示的sku模型对象
# 'category': category, # 当前的显示sku所属的三级类别
# 'spu': spu, # sku所属的spu
# 'spec_qs': spu_spec_qs, # 当前商品的所有规格数据
# }
#
# response = render(None, 'detail.html', context)
# html_text = response.content.decode()
# file_path = os.path.join(settings.STATICFILES_DIRS[0], 'detail/' + str(sku_id) + '.html')
# with open(file_path, 'w') as f:
# f.write(html_text)
#
#
# if __name__ == '__main__':
# skus = SKU.objects.all()
# for sku in skus:
# print(sku.id)
# generate_static_sku_detail_html(sku.id)
``` |
{
"source": "1104028/StrativNewsFeed",
"score": 2
} |
#### File: StrativNewsFeed/Newsapi/apps.py
```python
from django.apps import AppConfig
class NewsapiConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'Newsapi'
def ready(self):
from JobScheduler import jobupdater
jobupdater.start()
``` |
{
"source": "1105042987/Dominant-Patterns",
"score": 2
} |
#### File: Dominant-Patterns/utils/get_dp.py
```python
import torch,cv2
import sys,os,json5
base = sys.path[0]
sys.path.append(os.path.abspath(os.path.join(base, "..")))
from glob import glob
from tqdm import tqdm
import torchvision.models as models
from dataset.general_image_datasets import dataloader
from docker.tool import yellow
dev = 'cpu'
std = torch.Tensor((0.229, 0.224, 0.225)).reshape(-1,1,1).float().to(dev)
mean = torch.Tensor((0.485, 0.456, 0.406)).reshape(-1,1,1).float().to(dev)
softmax = torch.nn.Softmax(-1).to(dev)
net_dic = {}
with open('RootPath.json') as f:
RF = json5.load(f)
root = RF[r'%RESULT%']
data_root = RF[r'%DATA%']
with open(os.path.join(data_root,'imagenet_class_index.json')) as f:
num2idx = json5.load(f)
def draw(img_id,img,basepath,target_net,prefix='dp'):
if target_net not in net_dic:
net_dic[target_net] = getattr(models,target_net)(pretrained=True).to(dev)
net_dic[target_net].eval()
result = softmax(net_dic[target_net]((img-mean)/std))
prob,idx = result.max(1)
if img_id>-1:
name = f'{prefix}_ID{img_id}_CLS({num2idx[str(int(idx))][1]})_Prob{int(prob*100+0.5)}.jpg'
else:
name = f'{prefix}_{num2idx[str(int(idx))][1]}_Prob{int(prob*100+0.5)}.jpg'
if prefix == 'dp':
temp = ((img[0]-img[0].min()).cpu().numpy().transpose((1,2,0)))
temp = temp/temp.max()
else:
temp = img[0].cpu().numpy().transpose((1,2,0))[...,::-1]
cv2.imwrite(os.path.join(basepath,'save', name),(temp*255))
return name
assert len(sys.argv)>1, 'Project Name Needed!'
if len(sys.argv)==3 and sys.argv[2] == 'img':
num_img = 1
cfg={
"dataset_name": "ImageNet",
"num_workers": 2,
"direction": data_root,
"batch_size": 1,
"shuffle":False,
"enhancement":{
"Resize":[256],
"CenterCrop":[224],
}
}
data = iter(dataloader(cfg,'test'))
tars = glob(os.path.join(root,sys.argv[1],'*'))
tars.sort()
for path in tqdm(tars,ascii=True,desc='EXP NUM',ncols=130):
if len(glob(os.path.join(path,'*.txt')))!=0:
w_path = os.path.join(path,'ckp','best','weight.pth')
if os.path.exists(w_path):
try:
weight = torch.load(w_path, map_location=lambda storage, loc:storage)
dp = weight['dp'].to(dev)
with open(glob(os.path.join(path,'code','config','*'))[0]) as f:
target_net = json5.load(f)["system"]["net_param"]["target_net"]
except:
print('error at',yellow(w_path))
continue
name = draw(-1,dp,path,target_net,'dp')
if len(sys.argv)==3 and sys.argv[2] == 'img':
for idx in tqdm(range(num_img),ascii=True,desc=path.split('/')[-1],ncols=130):
img,label = next(data)
img = img.to(dev)
draw(idx,dp+img,path,target_net,'per')
draw(idx,img,path,target_net,'ori')
``` |
{
"source": "1105042987/NAG_Pytorch",
"score": 3
} |
#### File: 1105042987/NAG_Pytorch/vgg.py
```python
import cv2
import torch
import torch.nn as nn
import torch.nn.functional as F
class VGG_F(nn.Module):
"""
Main Class
"""
def __init__(self):
"""
Constructor
"""
super().__init__()
self.block_size = [2, 2, 3, 3, 3]
self.conv1_1 = nn.Conv2d(3, 64, 3, stride=1, padding=1)
self.conv1_2 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self.conv2_1 = nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.conv2_2 = nn.Conv2d(128, 128, 3, stride=1, padding=1)
self.conv3_1 = nn.Conv2d(128, 256, 3, stride=1, padding=1)
self.conv3_2 = nn.Conv2d(256, 256, 3, stride=1, padding=1)
self.conv3_3 = nn.Conv2d(256, 256, 3, stride=1, padding=1)
self.conv4_1 = nn.Conv2d(256, 512, 3, stride=1, padding=1)
self.conv4_2 = nn.Conv2d(512, 512, 3, stride=1, padding=1)
self.conv4_3 = nn.Conv2d(512, 512, 3, stride=1, padding=1)
self.conv5_1 = nn.Conv2d(512, 512, 3, stride=1, padding=1)
self.conv5_2 = nn.Conv2d(512, 512, 3, stride=1, padding=1)
self.conv5_3 = nn.Conv2d(512, 512, 3, stride=1, padding=1)
self.fc6 = nn.Linear(512 * 7 * 7, 4096)
self.fc7 = nn.Linear(4096, 4096)
self.fc8 = nn.Linear(4096, 2622)
def forward(self, x):
""" Pytorch forward
Args:
x: input image (224x224)
Returns: class logits
"""
x = F.relu(self.conv1_1(x))
x = F.relu(self.conv1_2(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2_1(x))
x = F.relu(self.conv2_2(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv3_1(x))
x = F.relu(self.conv3_2(x))
x = F.relu(self.conv3_3(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv4_1(x))
x = F.relu(self.conv4_2(x))
x = F.relu(self.conv4_3(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv5_1(x))
x = F.relu(self.conv5_2(x))
x = F.relu(self.conv5_3(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(x.size(0), -1)
x = F.relu(self.fc6(x))
x = F.dropout(x, 0.5, self.training)
x = F.relu(self.fc7(x))
x = F.dropout(x, 0.5, self.training)
return self.fc8(x)
``` |
{
"source": "110805/TOC-Project",
"score": 3
} |
#### File: 110805/TOC-Project/fsm.py
```python
from transitions.extensions import GraphMachine
class TocMachine(GraphMachine):
def __init__(self, **machine_configs):
self.machine = GraphMachine(
model = self,
**machine_configs
)
def say_hi(self, update):
text = update.message.text
return text.lower() == 'hello'
def say_name(self, update):
text = update.message.text
return text.lower() == 'name'
def reply(self,update):
return 1
def choice(self, update):
text = update.message.text
return text.lower() == 'recommend'
def go_sport(self,update):
text=update.message.text
return text.lower()=='sport'
def go_movie(self,update):
text=update.message.text
return text.lower()=='movie'
def go_news(self,update):
text=update.message.text
return text.lower()=='news'
def go_exit(self,update):
text=update.message.text
return text.lower()=='exit'
def on_enter_hello(self, update):
update.message.reply_text("Hi!")
self.go_back(update)
def on_enter_name(self, update):
update.message.reply_text("My name is Jibot, an interesting name?")
update.message.reply_text("What's your name?")
def on_enter_reply_name(self,update):
update.message.reply_text("A great name, nice to meet you")
self.go_back(update)
def on_enter_recommend(self,update):
update.message.reply_text("sport,movie or news")
def on_enter_sport(self,update):
update.message.reply_text("http://www.espn.com/")
self.go_recommend(update)
def on_enter_movie(self,update):
update.message.reply_text("https://movies.yahoo.com.tw/")
self.go_recommend(update)
def on_enter_news(self,update):
update.message.reply_text("http://edition.cnn.com/")
self.go_recommend(update)
def on_enter_exit(self,update):
update.message.reply_text("See you next time")
self.go_back(update)
``` |
{
"source": "1109450752/homework2018_10_10",
"score": 4
} |
#### File: 1109450752/homework2018_10_10/work6.py
```python
s="waddawd"
def reserve(s):
s1=''
for i in range (len(s)-1,-1,-1):
s1=s1+str(s[i])
#print(s1)
return s1
print(reserve(s))
``` |
{
"source": "1109450752/homework2018_10_11",
"score": 3
} |
#### File: 1109450752/homework2018_10_11/work12.py
```python
def cesi(s):
for i in range (len(s)):
if s[0]!=s[i]:
return False
break
return True
s=[1,2,3,4]
s1=[1,1,1,1]
s2=[2,2,2]
print(cesi(s))
print(cesi(s1))
print(cesi(s2))
``` |
{
"source": "1110sillabo/DockerAnalyser",
"score": 3
} |
#### File: scanner/core/dfexception.py
```python
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class ImageNotFound(Error):
"""Exception raised when a Docker image is not found.
Attributes:
message -- explanation of the error
"""
def __init__(self, message):
self.message = message
```
#### File: scanner/deploy/analysis.py
```python
def analysis(image_json, context):
logger = context['logger']
client_images = context['images']
logger.info("Received image from rabbitMQ: {}".format(image_json))
client_images.post_image(image_json)
# image_json = {
# "name": "sameersbn/postgresql:9.4-17",
# "repo_name": "sameersbn/postgresql",
# "tag": "9.4-17",
# "pull_count": 1430790,
# "star_count": 107,
# "repo_owner": None,
# "short_description": "",
# "is_automated": True,
# "is_official": False,
# "full_size": 81978718,
# "images": [{"size": 81978718}], "id": 2384003,
# "repository": 14080, "creator": 3263,
# "last_updater": 3263,
# "last_updated": "2016-03-22T06:46:03.447956Z",
# "image_id": None,
# "v2": True}
# }
return True
```
#### File: examples/deploy-package-dockergraph/analysis.py
```python
import requests
from lxml import html
import re
def analysis(image, context):
logger = context['logger']
client_images = context['images']
repo = image["repo_name"]
logger.info("Received image to be analysed: {} ".format(repo))
if client_images.is_new(repo):
logger.info("{} is not present into local database".format(repo))
# image dictionary with the field to be sent to the images_server
node_image = {'name': repo}
try:
dockerfile = get_dockerfile(repo)
from_repo, from_tag = extract_FROM(dockerfile)
logger.info("{} FROM {} {}"
.format(repo, from_repo,
"" if from_tag is None else from_tag))
node_image['from_repo'] = from_repo
node_image['from_tag'] = from_tag
node_image["is_official"] = image["is_official"]
node_image["is_automated"] = image["is_automated"]
logger.info("POST {} ".format(node_image))
client_images.post_image(node_image)
except ValueError as e:
logger.error(str(e))
return False
return True
else:
logger.info("{} already present into local database ".format(repo))
return True
def extract_FROM(dockerfile):
search = re.search('FROM ([^\s]+)', dockerfile)
if search:
from_image = search.group(1)
if ":" in from_image: # FROM reponame:tag
from_repo, from_tag = from_image.split(":")
else: # FROM reponame
from_repo = from_image
from_tag = None
return from_repo, from_tag
else:
raise ValueError("FROM value not found in DockerFile")
def get_dockerfile(repo_name):
# https://hub.docker.com/v2/repositories/dido/webofficina/dockerfile/
# https://hub.docker.com/v2/repositories/kaggle/python/dockerfile/
docker_url = "https://hub.docker.com/v2/repositories/{}/dockerfile/"
# logger.info(docker_url.format(repo_name))
try:
response = requests.get(docker_url.format(repo_name))
dockerfile = response.json()['contents']
return dockerfile
except ConnectionError as e:
raise
def get_officials_FROM():
# GITHUB official library
# https://github.com/docker-library/official-images/tree/master/library
# https://raw.githubusercontent.com/docker-library/official-images/master/library/[java]
github_url = "https://github.com/"
page = requests.get(github_url +
"docker-library/official-images/tree/master/library")
tree = html.fromstring(page.content)
# list of names of the official images [''adminer', 'aerospike',..]
name_off = tree.xpath('//td[@class="content"]/span/a/text()')
node = dict()
for name in name_off:
# retrive the url path to the github library official e.g.
# ['/docker-library/official-images/blob/master/library/adminer']
print("Searching image {} ".format(name))
href = tree.xpath('//*[@title="{}"]/@href'.format(name))
node["name"] = name
# https://github.com//docker-library/official-images/blob/master/library/adminer
git_url = github_url + href[0]
node['gitrepo'] = git_url
off_git_page = requests.get(git_url)
tree_github = html.fromstring(off_git_page.content)
# get the GitHub repository url of the official image
githubrepo = tree_github.xpath(
"//tr[td[contains(text(), 'GitRepo')]]/*/text()")
# https://github.com/TimWolla/docker-adminer.git'
if len(githubrepo) > 0:
githubrepo = githubrepo[0]
# print(githubrepo)
# //tr[td[contains(text(), 'Directory')]]/*/text()" // 'Directory: 4.3'
githubrepo_dirs = tree_github.xpath(
"//tr[td[contains(text(), 'Directory')]]/*/text()")
# githubrepo_tags = tree_github.xpath("//tr[td[contains(text(), 'Directory')]]/*/text()")
dirs = [d.split(":")[1].strip()
for d in githubrepo_dirs] # i.strip()
node["Directory"] = dirs
for directory in node["Directory"]:
# githubrepo #
# https://github.com/TimWolla/docker-adminer.git' =>
# TimWolla/docker-adminer
startString = 'https://github.com/'
endString = '.git'
repo = githubrepo[githubrepo.find(
startString) + len(startString):githubrepo.find(endString)]
# print(repo)
response_dockerfile = requests.get(
"https://raw.githubusercontent.com/{}/master/{}/Dockerfile"
.format(repo, directory))
dockerfile = response_dockerfile.text
from_image = parse_dockerfile(dockerfile)
print("Base image {} ".format(from_image))
node['from_image'] = from_image
yield node
else:
print("GitRepo has not been found")
# https://raw.githubusercontent.com/docker-library/[repository]/master/[directory]/Dockerfile
# https://raw.githubusercontent.com/tianon/docker-bash/master/4.3/Dockerfile
# for image in get_officials_FROM():
# print(image)
```
#### File: docker-compose/core/bridgecompose.py
```python
import logging
from os.path import normpath
# from compose import __version__ as compose_version
from compose.container import Container
from compose.cli.utils import get_version_info
from compose.cli.command import get_project as compose_get_project, get_config_path_from_options, get_config_from_options
from compose.config.config import get_default_config_files
from compose.config.environment import Environment
from compose.cli.main import TopLevelCommand
from compose.cli.docker_client import docker_client
from compose.const import API_VERSIONS, COMPOSEFILE_V3_0
logging.info(get_version_info('full'))
def ps_(project):
"""
containers status
"""
logging.info('ps ' + project.name)
running_containers = project.containers(stopped=True)
items = [{
'name': container.name,
'name_without_project': container.name_without_project,
'command': container.human_readable_command,
'state': container.human_readable_state,
'labels': container.labels,
'ports': container.ports,
'volumes': get_volumes(get_container_from_id(project.client, container.id)),
'is_running': container.is_running} for container in running_containers]
return items
def run(project,service,command, options):
# Run a service passing the command OPTIONS
# e.g.
# docker-compose run crawler crawl --fp=100 --min-pulls=10 --min-stars=20
# SERVICE = crawler
# COMMAND = crawl
# OPTIONS = --fp=100 --min-pulls=10 --min-stars=20
service = "crawler"
service = self.project.get_service(service) #options['SERVICE'])
# detach = options.get('--detach')
#
# if options['--publish'] and options['--service-ports']:
# raise UserError(
# 'Service port mapping and manual port mapping '
# 'can not be used together'
# )
# if options['COMMAND'] is not None:
# command = [options['COMMAND']] + options['ARGS']
# elif options['--entrypoint'] is not None:
# command = []
# else:
# command = service.options.get('command')
command = "--fp=100 --min-pulls=10 --min-stars=20"
container_options = None # build_container_options(options, detach, command)
run_one_off_container(
container_options, self.project, service, options,
self.toplevel_options, self.project_dir
)
def get_container_from_id(my_docker_client, container_id):
"""
return the docker container from a given id
"""
return Container.from_id(my_docker_client, container_id)
def get_volumes(container):
"""
retrieve container volumes details
"""
mounts = container.get('Mounts')
return [dict(source=mount['Source'], destination=mount['Destination']) for mount in mounts]
def get_yml_path(path):
"""
get path of docker-compose.yml file
"""
return get_default_config_files(path)[0]
def get_project(path, project_name=None):
"""
get docker project given file path
"""
logging.debug('get project ' + path)
environment = Environment.from_env_file(path)
config_path = get_config_path_from_options(path, dict(), environment)
project = compose_get_project(path, config_path, project_name=project_name)
return project
def containers():
"""
active containers
"""
return client().containers()
def info():
"""
docker info
"""
docker_info = client().info()
return dict(compose=compose_version,info=docker_info['ServerVersion'], name=docker_info['Name'])
def client():
"""
docker client
"""
return docker_client(Environment(), API_VERSIONS[COMPOSEFILE_V3_0])
def project_config(path):
"""
docker-compose config
"""
norm_path = normpath(path)
return get_config_from_options(norm_path, dict())
if __name__ == "__main__":
project_dir="/home/dido/code/DockerAnalyserUI/DockerAnalyser/"
project = get_project(project_dir, project_name="docker-finder")
# # docker-compose build --build-arg DEPLOY_PACKAGE_PATH=/data/examples/deploy-package-dockerfinder scanner
# d = project.build(service_names=["scanner"], build_args={"DEPLOY_PACKAGE_PATH": "/data/examples/deploy-package-dockerfinder"})
#
# # service_names=None, no_cache=False, pull=False, force_rm=False, memory=None, build_args=None, gzip=False):
# containers = project.up() # service_names=["scanner"]
# c = [ str(d) for i in containers]
# print(c)
#
# project.stop()
topcommand = TopLevelCommand(project, project_dir=project_dir)
# tart_deps = not options['--no-deps']
# always_recreate_deps = options['--always-recreate-deps']
# exit_value_from = exitval_from_opts(options, self.project)
# cascade_stop = options['--abort-on-container-exit']
# service_names = options['SERVICE']
# timeout = timeout_from_opts(options)
# remove_orphans = options['--remove-orphans']
# detached = options.get('--detach')
# no_start = options.get('--no-start')
# topcommand.build({'SERVICE':['scanner'],
# '--build-arg': {
# "DEPLOY_PACKAGE_PATH": "/data/examples/deploy-package-dockerfinder"
# },
# '--memory':'1GB'
# }
#
# )
# topcommand.up()
# topcommand.run({'SERVICE':'crawler',
# 'COMMAND':'crawl',
# 'ARGS':['--fp=100', '--min-pulls=10','--min-stars=20', '--policy=pulls_first'],
# "--no-deps":True,
# '-e':None,'--label':None,'--rm':True,'--user':None,
# '--name':None,'--workdir':None,'--volume':None,
# '--publish':False,'--detach':True,"--service-ports":False,
# "--use-aliases":None,
# '--entrypoint':None})
# topcommand.stop({'SERVICS':'scanner'})
m ''
```
#### File: docker-compose/core/mycompose.py
```python
from compose.cli.main import TopLevelCommand
from compose.project import OneOffFilter
from compose.cli.command import project_from_options
from os import path
import json
import yaml
import io
from itertools import groupby
from operator import attrgetter
def singleton(theClass):
""" decorator for a class to make a singleton out of it """
classInstances = {}
def getInstance(*args, **kwargs):
""" creating or just return the one and only class instance.
The singleton depends on the parameters used in __init__ """
key = (theClass, args, str(kwargs))
if key not in classInstances:
classInstances[key] = theClass(*args, **kwargs)
return classInstances[key]
return getInstance
@singleton
class MyCompose:
def __init__(self, project_name, project_dir='.', file_compose='docker-compose.yml'):
self._name = project_name
self.file = file_compose
self.project_dir = project_dir
print("Reading file: {}".format(self.get_compose_file()))
# TODO: get project must be called every time in order to load the compose file updated
self._project = self._get_project(project_dir, project_name=self._name)
self.compose = TopLevelCommand(self._project, project_dir=project_dir)
def reload_project(self):
self._project = self._get_project(self.project_dir, project_name=self._name)
self.compose = TopLevelCommand(self._project, project_dir=self.project_dir )
def get_compose_file(self):
return path.join(self.project_dir, self.file)
def get_name(self):
return self._name
def get_service_names(self):
return self._project.service_names
def build(self, services=None):
services = services if services else self.get_service_names()
services_no_scanner = list(filter(lambda x: x != "scanner", services))
self.compose.build({'SERVICE': services_no_scanner, '--memory': '1GB'})
def build_scanner(self, scanner_name='scanner', path_deploypackage='/data/examples/deploy-package-dockerfinder'):
print("Building the scanner....")
self.compose.build({'SERVICE': [scanner_name],
'--build-arg': {
"DEPLOY_PACKAGE_PATH": path_deploypackage
},
'--memory': '1GB'})
print("Finished building")
def up(self, services=None, scale=None):
# scale = ['crawler=2']
# services = list of services
print(scale)
services = services if services else self.get_service_names()
scale = scale if scale else []
options = {
'SERVICE': services,
'start': True,
'--no-deps': False,
'--always-recreate-deps': False,
'--abort-on-container-exit': None,
'--remove-orphans': False,
'--detach': True,
'--no-start': False,
'--no-recreate': False,
'--force-recreate': True,
'--no-build': False,
'--build': False,
'--scale': [scale] if scale else [] # ['crawler=2']
}
self.compose.up(options)
return (services, scale)
def run(self, service_name='crawler', command='crawl', args=['--fp=100', '--min-pulls=10', '--min-stars=20', '--policy=pulls_first']):
options = {'SERVICE': service_name,
'COMMAND': command,
'ARGS': args,
"--no-deps": True,
'-e': None, '--label': None, '--rm': True, '--user': None,
'--name': None, '--workdir': None, '--volume': None,
'--publish': False, '--detach': True, "--service-ports": False,
"--use-aliases": True,
'--entrypoint': None}
return self.compose.run(options)
def stop(self, services):
services = [services] if services else self.get_service_names()
self.compose.stop({'SERVICE': services})
return self.get_service_names()
def ps(self, services=None):
"""
services status
"""
# running_containers = self._project.containers(stopped=False)
services_name = [services] if services else self.get_service_names()
running_containers = self._project.containers(
service_names=services_name, stopped=True)
items = [{
'name': container.name,
'service': container.service,
'name_without_project': container.name_without_project,
'command': container.human_readable_command,
'state': container.human_readable_state,
'health': container.human_readable_health_status,
# 'labels': container.labels,
'ports': container.human_readable_ports,
# 'volumes': get_volumes(get_container_from_id(project.client, container.id)),
'log': container.log_config,
'is_running': container.is_running} for container in running_containers]
service_per_container = list()
for key, group in groupby(items, lambda x: x['service']):
l = list(group)
service_per_container.append({'name': key,
'num': len(l),
'containers': l,
})
return service_per_container
def logs(self, services=None, tail=4):
services_logs = []
containers = self._project.containers(
service_names=services if services else self.get_service_names(), stopped=True)
for container in containers:
logs = dict()
logs['name'] = container.name
logs['service'] = container.service
print(tail)
logs['log'] = container.logs(tail=tail).decode("utf-8")
services_logs.append(logs)
return services_logs
def _get_project(self, path, project_name=None):
"""
get docker project given file path
"""
# environment = Environment.from_env_file(path)
# config_path = get_config_path_from_options(path, dict(), environment)
# project = compose_get_project(path, config_path, project_name=project_name)
options = {
'--file': [self.file],
'--host': None,
'--project-name': project_name,
'--verbose': False,
'--project-directory': None, # override the path of the project
'--compatibility': False
}
project = project_from_options(path, options)
return project
def get_config(self, service=None, key='command'):
with open(self.get_compose_file(), 'r+') as file_compose:
data = yaml.load(file_compose)
if (service):
# list ['command','args=value', 'arg2=value2']
return data['services'][service][key]
else:
return data['services']
def config_command(self, service, command=None, args=None):
try:
with open(self.get_compose_file(), 'r+') as file_compose:
data = yaml.load(file_compose)
# list ['command','args=value', 'arg2=value2']
actual_command_args = data['services'][service]['command']
actual_command = actual_command_args[0]
actual_args = actual_command_args[0]
data['services'][service]['command'] = [
command if command else actual_command] + (args if args else actual_args)
file_compose.seek(0) # rewind
yaml.dump(data, file_compose, indent=4)
file_compose.truncate()
self.reload_project()
return True
except:
return False
```
#### File: docker-compose/core/utils.py
```python
from compose.cli.command import get_project as compose_get_project, get_config_path_from_options, get_config_from_options
from compose.cli.command import project_from_options
from compose.config.environment import Environment
# def pull_code(local_folder, urlrepo="https://github.com/di-unipi-socc/DockerFinder.git", branch="dfcustom" ):
# """
# Downlaod the source code of the project
# """
# Repo.clone_from(urlrepo, local_folder , branch=branch, depth=1)
def get_project(path, project_name=None):
"""
get docker project given file path
"""
# environment = Environment.from_env_file(path)
# config_path = get_config_path_from_options(path, dict(), environment)
# project = compose_get_project(path, config_path, project_name=project_name)
options = {
'--file': ['docker-compose.json'],
'--host': None,
'--project-name': project_name,
'--verbose': False,
'--project-directory': None, # override the path of the project
'--compatibility': False
}
project = project_from_options(path, options)
return project
def success_msg(msg, detail=None):
return {"err": 0, "msg": msg, "detail": "{}".format(detail)}
```
#### File: DockerAnalyser/docker-compose/views.py
```python
import json
import zipfile
import sys
import traceback
import os.path
import compose
from shutil import make_archive
import shutil
import datetime
from django.views.decorators.csrf import csrf_exempt
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
from django.http import HttpResponse
from django.http import JsonResponse
from distlib.index import PackageIndex
from wsgiref.util import FileWrapper
from django.core.files.storage import FileSystemStorage
from django.conf import settings
from .core import utils
from .core.mycompose import MyCompose
# PROJECT_DIR = "/home/dido/code/DockerAnalyserUI/DockerAnalyser/"
# PATH_TOSAVE_DEPLOY_PACKAGE = "{}/data/examples".format(PROJECT_DIR)
# DEFAULT_DEPLOY_PACKAGE = "default-deploy-package"
PROJECT_NAME = "docker-analyser"
mycompose = MyCompose(project_name=PROJECT_NAME,project_dir=settings.DOCKER_ANALYSER_DIR)
@csrf_exempt
def upload(request):
key = "deploy-package"
if request.method == 'POST':
# POST /upload:
# deploy-package : DEPLOY-PACKAGE.zip
if key not in request.FILES:
return JsonResponse({"err": 0, "msg": "NO {} key set into body request".format(key), "detail": ""})
uploaded_file = request.FILES[key] # deploy-pacakge: .zip file
files_extracted = handle_uploaded_deploy_package(uploaded_file)
if not files_extracted:
options = "Name:{} \t Bytes:{} \t Content-type:{}".format(
uploaded_file.name, uploaded_file.size, uploaded_file.content_type)
return JsonResponse({"err": 1, "msg": "{} upload file is empty".format(uploaded_file.name), "detail": options})
else:
return JsonResponse(utils.success_msg("{} sucesfully extracted".format(uploaded_file.name),
files_extracted))
if request.method == 'GET':
"""
Return the latest uploaded zip file.
If the latest is missing, it resturn the zip file containing the default
deploy package.
"""
path_to_zip = get_zip_deploy_package()
with open(path_to_zip, 'rb') as file_zip:
print("Open file zip {}".format(file_zip.name))
response = HttpResponse(FileWrapper(
file_zip), content_type='application/zip')
response['Content-Disposition'] = 'attachment; filename={}'.format(
os.path.basename(file_zip.name)) # +file_name.replace(" ","_")+'.zip'
response['Access-Control-Expose-Headers'] = 'Content-Disposition'
return response
def build(request):
# GET /build
# Build the scanner with the deploy-package uploaded.
if request.method == 'GET':
try:
mycompose.build() # build the services aprt scanner
deploy_package = get_latest_uploaded_deploy_package()
path = os.path.join(settings.MEDIA_ROOT,deploy_package)
res = mycompose.build_scanner(scanner_name="scanner",path_deploypackage=path)
msg = utils.success_msg("{} DockerAnalyser built succesfuly. Selected deploy package: {}"
.format(mycompose.get_name(),deploy_package))
return JsonResponse(msg)
except compose.service.BuildError as err:
return JsonResponse({"err": 1,
"msg": "{} DockerAnalyser not built. Error occurs when building with {}".format(mycompose.get_name(), deploy_package),
"detail": str(err)
})
except Exception as e:
return JsonResponse({"err": 1, "msg": traceback.format_exc()})
def up(request):
# GET /up?service=<SERVICE_NAME>&scale=<NUM>
service = request.GET.get('service')
scale = request.GET.get('scale')
try:
(services_up, scale) = mycompose.up(services=([service] if service else None), scale=(
"{}={}".format(service, scale) if scale else None)) # service_names=["scanner"]
return JsonResponse({"err": 0,
"msg": "DockerAnalyser UP succesfully.",
"detail": "services:{} scale {} ".format(services_up, scale)})
except Exception as e:
return JsonResponse({"err": 1, "msg": traceback.format_exc()})
@csrf_exempt
def config(request):
# POST /config
# body:
# {
# "service": "crawler",
# "command": "crawl",
# "args": {
# "force-page":true,
# "si": 0,
# "random": false,
# "fp": 10,
# "ps": 0,
# "policy": "pulls_first",
# "min-stars" : 0,
# "min-pulls" : 0,
# "only-automated": true,
# "only-official": false
# }
# }
if request.method == 'POST':
if request.body:
body = json.loads(request.body.decode('utf-8'))
service = body['service']
command = body['command']
user_args = body['args']
crawler_default_args = ["--save-url=/data/crawler/lasturl.txt",
"--amqp-url=amqp://guest:guest@rabbitmq:5672",
"--images-url=http://images_server:4000/api/images/",
"--queue=images"]
map_args = {"si": "--si", "force-page": "--force-page", "random": "--random",
"fp": "--fp", "ps": "--ps", "policy": "--policy",
"min-stars": "--min-stars", "min-pulls": "--min-pulls",
"only-automated": "--only-automated",
"only-official": "--only-official"}
crawler_user_args = [
"{}={}".format(map_args[k], str(v)) for k, v in user_args.items() if (k not in [
'only-automated', 'only-official']) ]
if(user_args['only-automated']):
crawler_user_args.append(map_args['only-automated'])
if(user_args['only-official']):
crawler_user_args.append(map_args['only-official'])
args = crawler_default_args + crawler_user_args
try:
mycompose.config_command(service, command, args)
return JsonResponse(utils.success_msg(
"{} configured succesfully".format(
service),
[command]+args))
except Exception as e:
return JsonResponse({"err": 1, "msg": traceback.format_exc()})
if request.method == 'GET':
# GET /config?service=<NAME>
service = request.GET.get('service')
res = mycompose.get_config(service if service else None, key="command")
return JsonResponse(
utils.success_msg(
"{} configuration options".format(
service if service else "All the services"),
res))
def logs(request):
# GET /logs?service=<SERVICE_NAME>
# mycompose = MyCompose(project_name=PROJECT_NAME, project_dir=PROJECT_DIR)
service = request.GET.get('service')
tail = request.GET.get('tail')
res = mycompose.logs(services=[service] if service else None, tail=int(tail) if tail else 10)
return JsonResponse({"err": 0, "msg": "Logs of services", "services": res})
def stop(request):
# GET /stop
service = request.GET.get('service')
try:
services = mycompose.stop(services=service) # service_names=["scanner"]
return JsonResponse({"err": 0, "msg": "stop {} services".format(service)})
except Exception as e:
return JsonResponse({"err": 1, "msg": traceback.format_exc()})
def status(request):
# GET /status
service = request.GET.get('service')
try:
services = mycompose.ps(services=service)
if(service):
return JsonResponse({"err": 0, "msg": "Status of {}".format(service),
"replicas": len(services), "services": services}, safe=False)
else:
return JsonResponse({"err": 0, "msg": "status of the services", "services": services}, safe=False)
except Exception as e:
return JsonResponse({"err": 1, "msg": traceback.format_exc()})
def exist_uploaded_deploy_package():
fs = FileSystemStorage()
directories, files = fs.listdir(settings.MEDIA_ROOT)
print("Directories {} Files: {}".format(directories, files))
return len(directories) > 0#ettings.UPLOADED_DEPLOY_PACKAGE in directories
def _extract_zip_file(file, path_folder):
filenames_extracted = []
print("Extracting {} into {}".format(file.name,path_folder))
with zipfile.ZipFile(file, "r") as zip_ref:
zip_ref.printdir()
zip_ref.extractall(path_folder)
filenames_extracted = [zip.filename for zip in zip_ref.infolist()]
return filenames_extracted #[0], filenames_extracted[1:]
def get_zip_deploy_package():
print("Getting deploy package")
name_deploy_package = ""
if exist_uploaded_deploy_package():
name_deploy_package = get_latest_uploaded_deploy_package()
else:
name_deploy_package = settings.DEFAULT_DEPLOY_PACKAGE
# copy the deploy package from examples folder in DockerAnalyser to server MEDIA folder
shutil.copytree(os.path.join(settings.DOCKER_ANALYSER_EXAMPLES,name_deploy_package),
os.path.join(settings.MEDIA_ROOT,name_deploy_package))
file_path = os.path.join(settings.MEDIA_ROOT,name_deploy_package)
path_to_zip = make_archive(file_path, "zip", file_path)
return path_to_zip
def get_deploy_packages_name():
fs = FileSystemStorage()
directories, files = fs.listdir(settings.MEDIA_ROOT)
dir_creation_time = [(fs.get_created_time(directory),directory) for directory in directories]
dir_creation_time.sort(key=lambda tup: tup[0], reverse=True)
uploaded_package = [d[1] for d in dir_creation_time]
return uploaded_package
def get_latest_uploaded_deploy_package():
dp = get_deploy_packages_name()
return dp[0]
def handle_uploaded_deploy_package(uploaded_file):
date_upload = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
# path_media = os.path.join(settings.MEDIA_ROOT)#,date_upload+settings.UPLOADED_DEPLOY_PACKAGE)
files_extracted = _extract_zip_file(uploaded_file,path_folder=settings.MEDIA_ROOT)
return files_extracted
``` |
{
"source": "1110sillabo/NeuroKit",
"score": 2
} |
#### File: NeuroKit/neurokit2/__init__.py
```python
import datetime
import platform
from .benchmark import *
from .bio import *
from .complexity import *
from .data import *
from .ecg import *
from .eda import *
from .eeg import *
from .emg import *
from .eog import *
from .epochs import *
from .events import *
from .hrv import *
from .misc import *
from .ppg import *
from .rsp import *
from .signal import *
from .stats import *
# Info
__version__ = "0.0.39"
# Maintainer info
__author__ = "The NeuroKit development team"
__email__ = "<EMAIL>"
# Citation
__bibtex__ = r"""
@misc{neurokit2,
doi = {10.5281/ZENODO.3597887},
url = {https://github.com/neuropsychology/NeuroKit},
author = {<NAME> and <NAME> and <NAME>. and <NAME>. and Lesspinasse,
Fran\c{c}ois and <NAME> and Schölzel, Christopher and <NAME>, Annabel},
title = {NeuroKit2: A Python Toolbox for Neurophysiological Signal Processing},
publisher = {Zenodo},
month={Mar},
year = {2020},
}
"""
__cite__ = (
"""
You can cite NeuroKit2 as follows:
- <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., & <NAME>. (2020). NeuroKit2: A Python Toolbox for Neurophysiological
Signal Processing. Retrieved """
+ datetime.date.today().strftime("%B %d, %Y")
+ """, from https://github.com/neuropsychology/NeuroKit
Full bibtex reference:
"""
+ __bibtex__
)
# Aliases for citation
__citation__ = __cite__
# =============================================================================
# Helper functions to retrieve info
# =============================================================================
def cite(silent=False):
"""Cite NeuroKit2.
This function will print the bibtex and the APA reference for your to copy and cite.
Examples
---------
>>> import neurokit2 as nk
>>>
>>> nk.cite()
"""
if silent is False:
print(__cite__)
else:
return __bibtex__
def version(silent=False):
"""NeuroKit2's version.
This function is a helper to retrieve the version of the package.
Examples
---------
>>> import neurokit2 as nk
>>>
>>> nk.version()
"""
if silent is False:
print(
"- OS: " + platform.system(),
"(" + platform.architecture()[1] + " " + platform.architecture()[0] + ")",
"\n- Python: " + platform.python_version(),
"\n- NeuroKit2: " + __version__,
)
else:
return __version__
``` |
{
"source": "1110sillabo/ProgrammingDigitalHumanitiesBook",
"score": 4
} |
#### File: Ch5 - Python Crash Course/Code/5.11_comprehensions.py
```python
items = 1, 2, 3, 4
def yourfunction(x):
print(x)
print(x*x)
output = [yourfunction(item) for item in items]
```
#### File: Ch5 - Python Crash Course/Code/5.1_Norvig_spell_correct_pt1.py
```python
import re
from collections import Counter
def words(text): return re.findall(r'\w+', text.lower())
WORDS = Counter(words(open('big.txt').read()))
def P(word, N=sum(WORDS.values())):
"Probability of `word`."
return WORDS[word] / N
def correction(word):
"Most probable spelling correction for word."
return max(candidates(word), key=P)
def candidates(word):
"Generate possible spelling corrections for word."
return (known([word]) or known(edits1(word)) \
or known(edits2(word)) or [word])
def known(words):
"The subset of `words` that appear in the dictionary of WORDS."
return set(w for w in words if w in WORDS)
``` |
{
"source": "1111111111122222222223333333333/Django_boxuegu",
"score": 2
} |
#### File: apps/organization/views.py
```python
from django.shortcuts import render
from django.views import View
class OrgList(View):
def get(self, request):
return render(request, 'org-list.html')
# 获取教师列表
class TeacherList(View):
def get(self, request):
return render(request, 'teachers-list.html')
class TeacherDetail(View):
def get(self, request):
return render(request, 'teacher-detail.html')
class AddAsk(View):
def get(self, request):
return render(request, 'org-detail-desc.html')
class OrgHome(View):
def get(self, request):
return render(request, 'org-detail-homepage.html')
class OrgDesc(View):
def get(self, request):
return render(request, 'org-detail-desc.html')
class OrgCourse(View):
def get(self, request):
return render(request, 'org-detail-course.html')
``` |
{
"source": "1-1-1-1-1-1-1-1/LocalKey",
"score": 3
} |
#### File: 1-1-1-1-1-1-1-1/LocalKey/action.py
```python
__all__ = ['table', 'action', 'main']
from file import get_info
import os
from libs.hasht import save_file, get_hash
from names import HASHING_TYPE, LOCAL_ANSWERS, STOP_WORD
from get_answers import folder
try:
os.makedirs(LOCAL_ANSWERS)
except:
None
def table_v0(): # It was earlier.
table0 = """\
—————————————
| 1 | 2 | 3 |
|———————————|
| 4 | 5 | 6 |
|———————————|
| 7 | 8 | 9 |
—————————————"""
for i in table0.split('\n'):
assert len(i) == len(table0.split('\n')[0])
return table0
def table(a):
"""Create a local table.
Usage example:
>>> print(table(3).format(*list(range(9))))
╔═══╤═══╤═══╗
║ 0 │ 1 │ 2 ║
╟───┼───┼───╢
║ 3 │ 4 │ 5 ║
╟───┼───┼───╢
║ 6 │ 7 │ 8 ║
╚═══╧═══╧═══╝
Note: a ``table(a, b)`` could be done in the same way; even for
any input (i.e. with possibly length-different symbols in `format`).
"""
def _align(line, n_times, *, by_symbol=" "):
# If trying `"{:=...}".format(str(...))`
# (with exact values instead of '...'):
# ``ValueError: '=' alignment not allowed in string format specifier``
return by_symbol * ((n_times - 1) // 2) + line \
+ by_symbol * (n_times // 2)
columns = 3 # Number of columns.
line = "╟" + "┼".join([
"─"*a for i in range(columns)
]) + "╢"
input_line = "║" + "│".join([
_align("{}", a) for i in range(columns)
]) +"║"
# These `return`s' difference is a readability, not a result.
"""return f'╔{"═"*a}╤{"═"*a}╤{"═"*a}╗\n' \
+ (line + '\n').join(input_line + '\n' for i in range(3))
+ f'╚{"═"*a}╧{"═"*a}╧{"═"*a}╝'"""
return f'╔{"═"*a}╤{"═"*a}╤{"═"*a}╗\n' \
+ input_line + '\n' \
+ line + '\n' \
+ input_line + '\n' \
+ line + '\n' \
+ input_line + '\n' \
+ f'╚{"═"*a}╧{"═"*a}╧{"═"*a}╝'
def table_of_answers(_answers):
return table(3).format(*["•" if n in _answers else " " \
for n in range(1, 10)])
def action(text):
# Removing the previous results:
for filename in os.listdir(LOCAL_ANSWERS):
os.remove(LOCAL_ANSWERS + filename)
topic, links = get_info(text); del text
source = folder + topic + '\\'
possible_answers = os.listdir(source)
# Making a list of requested hashes (i.e. all questions):
hashes = dict()
for link in links:
_hash_ = get_hash(link, hashing_type=HASHING_TYPE)
hashes.update({_hash_: link})
answers_h = tuple(filter(lambda hash_: hash_ + '.png' \
in list(map(lambda obj: obj.split('_')[-1], possible_answers)),
hashes.keys()))
for h in answers_h:
save_file(hashes[h], LOCAL_ANSWERS + h + '.png')
""" # Or so:
with open('data/pictures/' + i + '.png', 'rb') as f, \
open(FOLDER + i + '.png', 'wb') as g:
g.write(f.read())"""
# Which is faster: doing ``requests.get(link).content``
# or doing the ``g.write(f.read())`` (with open(...) ... )?
L = [h for h in hashes if h in answers_h]
keys = list(hashes.keys())
answers = [keys.index(h) + 1 for h in L]
with open(LOCAL_ANSWERS + "README.txt", 'w', encoding='utf-8') as f:
# V0:
# f.write('1. These are numbers of pictures, which are suggested to be answers: '
# + ', '.join(map(str, answers))
# + '.\n2. The table, according to which they are numerated:\n' + TABLE
# + '\n\n3. All pictures of those were placed to this folder.') #?
# V1:
f.write('The table of suggested answers:\n'
+ table_of_answers(answers)
+ '\n\nAll those pictures (and only those) were placed to this folder.')
return answers
def _get_text(*, with_stop=True):
if with_stop:
def lines():
while True:
i = input()
if i != STOP_WORD:
yield i
else:
break
return '\n'.join(lines())
else:
# Alternatively, it can be done by adding a hotkey,
# which interrupts the input.
pass
raise NotImplementedError
'''# Alternatively, it can be done like this:
all_text = ''
def one_line():
i = input()
if i != STOP_WORD:
return i
while True:
line = one_line()
if line is not None:
all_text += '\n' + line
else:
break
return all_text
'''
def _main():
# In case of adding an option "act without stop-word", to change:
print(f'Enter the text here:\n(Print "{STOP_WORD}" to stop)\n')
text = _get_text()
print("\nTrying to get the result...", end="")
answers = action(text)
print("""
Here are suggested answers:
{}
This and some more were written to the folder "{}".""".format(
table_of_answers(answers), LOCAL_ANSWERS))
def main():
try:
_main()
except Exception as e:
print(e)
input() # Is `finally` needed here?
if __name__ == '__main__':
main()
```
#### File: '/About solvings/check_solvings.py
```python
import os
from time import sleep
from sys import exit as sys_exit
RUN_MODE = 2
SLEEP_FOR = 60 # Seconds
def main():
with open('Unsolved.txt') as f:
filenames = f.read().split('\n')
def info(j):
with open('pictures_savings/' + j, 'rb') as f:
return f.read().count(b'\n') # As I saw, it may work not really
# great: does it really counts a
# number of lines in the file?
for j in filenames:
print(j, info(j), sep=': ')
#t: print(info('00f0446c4ff0a9d72008d744aee94f0b.png'))
return
if RUN_MODE == 1:
main()
if RUN_MODE == 2:
counter = 1
try:
while True:
print("\
=== Attempt number {:=<4}==================".format(str(counter) + ' '))
main()
print()
sleep(SLEEP_FOR)
counter += 1
except KeyboardInterrupt:
sys_exit(input('Stopped. '))
input('Ready with it.')
``` |
{
"source": "1-1-1-1-1-1-1-1/Old-Slavonic-bot",
"score": 3
} |
#### File: 1-1-1-1-1-1-1-1/Old-Slavonic-bot/_disconnector.py
```python
import os
from typing import NoReturn
from telethon.sync import TelegramClient
import globalconfig # Run `load_dotenv`
del globalconfig
_get = os.environ.get
default_session_name = 'null'
api_id = _get('API_ID')
api_hash = _get('API_HASH')
def disconnect(api_id=api_id, api_hash=api_hash, client=None,
session_name=default_session_name) -> NoReturn:
"""Ensure previous client session is closed."""
print("Trying to end the previous client session...")
try:
if client is None:
client = TelegramClient(session_name, api_id, api_hash)
client.log_out()
print('Ended previous client session')
except ConnectionError:
print('Connection error raised: already disconnected from Telegram')
except:
raise
print("Disconnected from Telegram")
if __name__ == '__main__':
disconnect()
```
#### File: utils/escape_markdown/escape_markdown.py
```python
__all__ = ['escape']
def escape(s: str) -> str:
pass
```
#### File: utils/escape_markdown/escape_markdown_v2.py
```python
_special_symbols = {
'_', '*', '[', ']', '(', ')', '~', '`', '>', '#', '+', '-', '=', '|', '{',
'}', '.', '!'
}
def escape(string: str) -> str:
for symbol in _special_symbols:
# Escape each special symbol
string = string.replace(symbol, '\\' + symbol)
# Mind that such sequences, being replaced, do not intersect.
# Replacement order is not important.
return string
```
#### File: telebot (pyTelegramBotAPI)/hist/worker-telebot.py
```python
import telebot
from telebot import types
import random
import re
from os.path import join
import requests
from config import (
TOKEN, CACHE_TIME, NAMES_REPLACE, UIDS_BASE,
# pics:
A_CYRYLLIC, A_GLAGOLIC, A_LATER_GLAGOLIC,
# techical
ANY_LETTER,
ADMINS, LOGGING_CHAT, HELP_URL,
PROD, PASSWORD_ENABLED, ON_HEROKU, CHAT_LOGS_MODE_ALL,
# configparser:
configparser, NoSectionError,
# chats:
CHANNEL, TEST_CHAT, SPEAK_CHAT, HEAD_CHAT,
# other:
bot, BOT_ID, BOT_USERNAME,
WORDS_GAME_PATTERN,
INLINE_EXAMPLES,
GAME_WORDS_DATA,
TOKEN_INIT
)
from functions import translation, glagolic_transliterate
edit_date = '@day-08.03.2021->12.05.2021;'
# dummy, to check for updates while running.
def bot_inform(msg, chat_id=LOGGING_CHAT, type_=None, **kwargs):
if type_ is not None and type_ not in CHAT_LOGS_MODE_ALL:
return
bot.send_message(LOGGING_CHAT, msg, **kwargs)
bot_inform(f"""
Launched the bot.
Is <u>{"" if PROD else 'not '}the production</u> version.
Is on Heroku: <u>{str(ON_HEROKU).lower()}</u>.
""", type_="launch", parse_mode='HTML')
del ON_HEROKU
def _cmd_pattern(cmd, *, flags='i'): # Internal
if flags:
_flags_add = r'(?' + flags + r')'
else:
_flags_add = ""
cmd_pattern = _flags_add + r'/' + cmd + r'(?:@' + BOT_USERNAME + r')?'
return cmd_pattern
def feature_exists(fid): # Internal
d = {
'teach_word': False
}
if fid in d:
return d[fid]
return False
def _chatMember(user_id, of):
# Is it needed NOT for users, new in the chat?
try:
return bot.get_chat_member(of, user_id)
except:
return types.ChatMember(*([None]*4 + [False]*12))
def is_participant(user_id, of=HEAD_CHAT):
"""User is participant of chat `of`, returns bool."""
return _chatMember(user_id, of).status in [
"creator",
"administrator",
"member",
"restricted"
]
def _add_user(user_id):
# Add ID to the base. Comments are allowed.
filename = UIDS_BASE
with open(filename, 'a') as f:
pass
with open(filename, 'r', encoding='utf8') as f:
data = f.read() # to read
data_ = "" # to write
if data and data[-1] != '\n':
data_ += '\n'
if (s := str(user_id)) not in data:
bot_inform('New user: \
<a href="tg://user?id={0}">{1}</a>\nuser_id: {0}'.format(
user_id, "User"), type_='new user', parse_mode='HTML')
data_ += s + '\n'
with open(filename, 'a', encoding='utf8') as f:
f.write(data_)
def full_name(user):
return f'{user.first_name}{" " + u if (u := user.last_name) else ""}'
def user_text_mention(user, fill_as=None, fill=None):
# fill_as: text to insert at mention
if fill_as is not None:
import warnings
warnings.warn('fill_as param is deprecated')
if fill is not None:
fill_as = fill # compat
if fill_as is None:
filling = full_name(user)
else:
filling = fill_as
return f'<a href="tg://user?id={user.id}">{filling}</a>'
def load_users():
with open(UIDS_BASE, encoding='utf-8') as f:
users = map(eval, f.read().strip().split('\n'))
return users
def make_move(chat_id, message, letter, mentioned):
bot.send_chat_action(chat_id, 'typing')
try:
from functions import d as _d
try:
d = _d[1]
vars_ = {k: d[k] for k in d
if k.lower().startswith(letter) and
k.lower() not in (item.lower() for item in mentioned) and
re.fullmatch(r'[-\w]+', k)}
assert vars_
def _answer(i):
p = possible[i]
return p[0]
def _description(i):
p = possible[i]
return p[1]
except AssertionError:
d = _d['3']
vars_ = {k: d[k] for k in d
if k.lower().startswith(letter) and
k.lower() not in (item.lower() for item in mentioned) and
re.fullmatch(r'[-\w]+', k)}
assert vars_
def _answer(i):
p = possible[i]
return p[0]
def _description(i):
p = possible[i]
return p[1]
finally:
del _d, d
possible = list(vars_.items()) # Should be here?
i = random.choice(range(len(possible)))
answer = _answer(i).capitalize()
description = _description(i)
msg = answer + ' (' + description + ')'
return answer, msg
except AssertionError:
maxn = 4 # perfect?
possible = list(range(1, maxn))
res_word = None
while not res_word and maxn <= 20:
possible.append(maxn)
n = possible.pop(random.choice(range(len(possible))))
url = "https://loopy.ru/?word={}{}&def=".format(letter, '*'*(n-1))
r = requests.get(url)
text = r.text
base = re.finditer(r'<div class="wd">(?:.|\n)+?</div>', text)
base = list(base)
def word(item):
return re.search(
r'<h3>.+?значение слова ([-\w]+).+?</h3>', item).group(1)
def meanings(item):
return re.findall(r'<p>(.+?)</p>', item)
while base:
item = base.pop(random.choice(range(len(base)))).group()
_word = word(item)
if _word not in mentioned:
res_word = _word
meaning = random.choice(meanings(item))
mentioned.append(_word)
break
maxn += 1
if res_word is None:
msg = \
"Wow! How can it happen? I had found any words for it."
"Game is stopped. Realise move's skip to continue" #!
bot.send_message(chat_id, msg, # continuing game — ?
reply_to_message_id=message.message_id)
return
msg = res_word + ' (' + meaning + ')'
return res_word, msg # test st.
def play_words(chat_id, message):
# """Да, тут считают, что е и ё — одна буква."""
c = configparser.ConfigParser()
chat_id = str(chat_id)
filename = GAME_WORDS_DATA
c.read(filename, encoding='utf-8')
if not c.has_section(chat_id):
msg = "Ошибка: не найдено игру."
bot.send_message(chat_id, msg,
reply_to_message_id=message.message_id)
raise NoSectionError
section = c[chat_id]
order = eval(section["order"])
current = int(section["current"])
cur_letter = section["letter"]
mentioned = eval(section["mentioned"])
dot = '.' * (random.choice([0, 1, 2]) < 1)
if (match := re.fullmatch(WORDS_GAME_PATTERN, message.text)) \
and (n := message.from_user.id) in order:
if message.chat.type != 'private' and not (
# comment vvvvvvvvv
re.fullmatch(r"(?s)\!\s?(?:[-\w]+)(?:\s*\(.+\))?", message.text) or
# ^^^^^^ word
(message.reply_to_message and
message.reply_to_message.from_user.id == order[current - 1])):
return
if n != order[current]:
answer_msg = f"Не твой сейчас ход{dot}" # " Ход игрока "
# user_text_mention(user)!
bot.send_message(chat_id, answer_msg,
reply_to_message_id=message.message_id)
return #~
word = match.group(1)
print(word) # test
if cur_letter != "." and word[0].lower().replace('ё', 'е') != cur_letter:
answer_msg = f"На букву {cur_letter!r}{dot}"
bot.send_message(chat_id, answer_msg,
reply_to_message_id=message.message_id)
return
if requests.get(f"https://loopy.ru/?word={word}&def=").status_code \
== 404:
answer_msg = f"Вау. Кажется, я не знаю этого слова. Хотя, \
возможно, оно лежит где-то на полке в папке data, но я не смотрел." \
+ f" Переходи, пожалуйста{dot}" + " Что \
это слово значит? (Ход не засчитан. Потом либо напиши ответом на это \
сообщение толкование слова, я его в словарь запишу, либо назови другое \
слово. И вообще, это not implemented ещё{dot})"*feature_exists('teach_word')
bot.send_message(chat_id, answer_msg,
reply_to_message_id=message.message_id)
return
if word.casefold() in map(lambda s: s.casefold(), mentioned):
answer_msg = f"Слово {word!r} уже было в этой игре{dot}"
bot.send_message(chat_id, answer_msg,
reply_to_message_id=message.message_id)
return
mentioned.append(word)
res = word.lower().rstrip('ь')
assert re.fullmatch("(?i)[-а-яё]+", res) # really required?
letter = res[-1]
section["letter"] = letter
current = (current + 1) % len(order)
if str(order[current]) == BOT_ID:
print("Bot's move at game `words`") # test-log
try:
answer, msg = make_move(chat_id, message, letter, mentioned)
except Exception as e:
print(e)
return
current = (current + 1) % len(order)
mentioned.append(answer)
next_let = answer.lower().rstrip('ьъ')[-1]
if next_let == 'ё': next_let = 'е'
section["letter"] = next_let
bot.send_message(chat_id, msg, reply_to_message_id=message.message_id)
section["current"] = str(current)
section["mentioned"] = str(mentioned)
with open(filename, 'w', encoding='utf-8') as f:
c.write(f)
elif match:
# msg = ...
# await event.reply(msg)
pass
r'''
@bot.message_handler(commands=['test_start'])
def test_start_message(message):
if message.from_user.id not in ADMINS:
return
msg = "some msg.\n\n\
Ввод в режиме inline (перевод):"
choices = INLINE_EXAMPLES
HELP_URL = "https://telegra.ph/Test-02-20-154" # test
example = random.choice(choices)
switch_cur_chat_button = types.InlineKeyboardButton(
text="Да", switch_inline_query_current_chat=example)
go_to_help_message = types.InlineKeyboardButton(
text="Open help", url=HELP_URL)
keyboard = types.InlineKeyboardMarkup([
[switch_cur_chat_button],
[go_to_help_message]
])
bot.send_message(message.chat.id, msg, reply_markup=keyboard)
pass
'''
# examples for it:
# /do -password=pw -action=eval code
# /do -password=pw -time=mm:ss -action=eval code
@bot.message_handler(commands=['do'])
def do_action(message):
if message.from_user.id not in ADMINS:
return
filename = join("locals", "do_logs.log")
mid = "{},{}".format(message.chat.id, message.message_id)
import os
if not 'data' in os.listdir():
os.mkdir('data')
del os
try: # to let the polls tracker go further
with open(filename, 'rt', encoding='utf-8') as f:
_data = f.read().split('\n')
if mid in _data:
return
except FileNotFoundError:
pass
except Exception as e:
print(e)
finally:
with open(filename, 'at', encoding='utf-8') as f:
f.write(mid)
f.write('\n')
pattern = (_cmd_pattern('do', flags='is')
+ r"(?: {1,2}-password=(" + r'\S+' + r"))?"
+ r"(?:\s+-time=(\d{,2}:\d{,2}))?"
+ r"(?:\s+-action=(exec|eval))?"
+ r"\s+(.+)")
string = message.text
if not (match := re.fullmatch(pattern, string)):
print(pattern, string)
return
pw, time, *other = match.groups()
uid = message.from_user.id
if PASSWORD_ENABLED:
from config import password as _password
if time is not None:
time = time.split(':')
password = _password(<PASSWORD>, time=time)
print('pw:', password)
if pw != password:
return
action, code = other
if action is None:
action = 'eval'
action0 = action
action = eval(action)
try:
if action is eval:
res = eval(code)
bot.send_message(message.chat.id, str(res),
reply_to_message_id=message.message_id)
elif action is exec:
exec(code)
except Exception as e:
msg = f"Ошибка. {e}"
bot.send_message(message.chat.id, msg,
reply_to_message_id=message.message_id)
@bot.message_handler(commands=['start'])
def start_message(message):
user_id = message.from_user.id
_add_user(user_id)
p = commands('start') + r'\s+[-=]?test'
if 'test_start_message' in globals() and re.fullmatch(p, event.text):
result = await test_start_message(event)
if result: # If result appears `None`, consider message
# as unanswered and go to answer as to normal
# message `/start`
return
test = False
chat_id = message.chat.id
header = "\[test]"*test
msg = f"""\
{header}
Основные команды:
/start — это сообщение
/words — игра в слова. Глянь /words help.
/help — помощь по боту
/meaning — значение слова. Глянь /meaning для справки.
`Перевод текста`
См. /help.
"""
allow_to_all = False
if allow_to_all or is_participant(user_id):
def invite_link(chat_id_):
# No `export_chat_invite_link`.
try:
link = bot.get_chat(chat_id_).invite_link
except:
link = "not found"
return link
channel = invite_link(CHANNEL)
chat = invite_link(HEAD_CHAT)
msg += f"""
Ссылки на материалы по курсу:
🔸 канал: {channel}
🔸 чат курса: {chat}
"""
msg += """
Тест перевода:
"""
choices = INLINE_EXAMPLES
example = random.choice(choices)
switch_cur_chat_button = types.InlineKeyboardButton(
text="Да", switch_inline_query_current_chat=example)
keyboard = types.InlineKeyboardMarkup(
[[switch_cur_chat_button]]
)
bot.send_message(chat_id, msg, parse_mode='Markdown',
reply_markup=keyboard,
disable_web_page_preview=True)
@bot.message_handler(commands=["add_user", "add_users"])
def add_user_via_message(message):
uids = [message.from_user.id]
if (m := message.reply_to_message):
uid = m.from_user.id
uids.append(uid)
for uid in uids:
_add_user(uid)
bot.send_message(chat.id, "Пользователь добавлен.")
@bot.message_handler(commands=["help"])
def send_help_msg(message):
_add_user(message.from_user.id)
msg = """\
Переводчик на старославянский язык. Правило перевода: ввести в чате\
слово "@""" + BOT_USERNAME.replace('_', r'\_') + f"""\
\" и, после пробела, — текст для перевода.
Для отправки текста из списка нажать на тот текст.
Очень много символов за раз бот не может отправить, только около 220.
`*` Возможные ошибки: недописывание символов.
Ещё: игра в слова (см. `/words help`). Значение слова: \
см. /meaning help.
{short_cright if is_not_full else full_cright}
"""
help_message = types.InlineKeyboardButton(
text="Руководство",
url=HELP_URL)
keyboard = types.InlineKeyboardMarkup(
[[help_message]]
)
bot.send_message(message.chat.id, msg, parse_mode='Markdown',
reply_markup=keyboard)
def words_skip_move(message):
c = configparser.ConfigParser()
chat_id = str(message.chat.id)
filename = GAME_WORDS_DATA
c.read(filename, encoding='utf-8')
if not c.has_section(chat_id):
msg = "Игра не найдена"
bot.send_message(chat_id, msg,
reply_to_message_id=message.message_id)
return
section = c[chat_id]
order = eval(section["order"])
current = int(section["current"])
current += 1
current %= len(order)
section["current"] = str(current)
if str(order[current]) == BOT_ID:
mentioned = eval(section["mentioned"])
cur_letter = section["letter"]
letter = cur_letter
print("Bot's move") # test
answer, msg = make_move(chat_id, message, letter, mentioned)
current = (current + 1) % len(order)
section["current"] = str(current)
next_let = answer.lower().rstrip('ьъ')[-1]
mentioned.append(answer)
if next_let == 'ё': next_let = 'е'
section["letter"] = next_let
section["mentioned"] = str(mentioned)
bot.send_message(chat_id, msg, reply_to_message_id=message.message_id)
else:
msg = "Ход пропущен."
bot.send_message(chat_id, msg, reply_to_message_id=message.message_id)
with open(filename, 'w', encoding='utf-8') as f:
c.write(f)
print('Performed skip of move.') # test
@bot.message_handler(commands=['meaning'])
def send_meaning(message):
"""Priority to search for a word:
dict. 1 -> dict. '3' -> in the exact place at the I-net.
"""
chat_id = message.chat.id
bot.send_chat_action(chat_id, 'typing')
try:
cmd_pattern = _cmd_pattern('meaning')
text = message.text
word = re.fullmatch(cmd_pattern + '\s*([-а-яё]+)', text).group(1)
except:
try:
text = message.reply_to_message.text
word = re.fullmatch(WORDS_GAME_PATTERN, text).group(1)
except:
msg = """Не распознано слово. Напиши либо в ответ на сообщение, \
где искомое слово, либо `/meaning слово`."""
bot.send_message(chat_id, msg,
reply_to_message_id=message.message_id,
parse_mode='Markdown')
return
from functions import d as d0
order = [1, '3']
def by_rule(kid):
if kid == 1:
for a, q in d.items():
a = a.replace(')', '')
a = a.replace('(', ',')
a = a.lower().split(',')
a = map(lambda ph: ph.strip(), a)
if word.lower() in a:
bot.send_message(chat_id, q,
reply_to_message_id=message.message_id)
return 0
elif kid == '3':
for k in d:
if k.lower() == word.lower():
meaning = d[k]
bot.send_message(chat_id, meaning,
reply_to_message_id=message.message_id)
return 0
for k in order:
try:
d = d0[k]
if by_rule(k) != 0:
continue
return
except:
continue
del d
del d0, order, by_rule
url = f"https://loopy.ru/?word={word}&def="
if (sc := (r := requests.get(url)).status_code) == 404:
msg = f"Слово {word!r} не найдено в словаре."
bot.send_message(chat_id, msg)
elif sc != 200:
msg = f"Непонятная ошибка. Код ошибки: {sc}."
bot.send_message(chat_id, msg)
else:
rtext = r.text
_rtext_part = rtext[rtext.find('Значения'):]
try: # great?
rtext_part = _rtext_part
rtext_part = rtext_part[:rtext_part.index('</div>')]
finds = re.findall(r'<p>(.*?)</p>', rtext_part)[1:]
# 1-st item — a header?
assert finds
except AssertionError:
rtext_part = _rtext_part
rtext_part = rtext_part[:rtext_part.index('</ul>')]
finds = re.findall(r'<li>(.*?)</li>', rtext_part)
if not finds:
bot_inform(
f"A <b>great</b> error occured: haven't found a meaning\
for {word!r}.", parse_mode='HTML')
res = random.choice(finds)
bot.send_message(chat_id, res,
reply_to_message_id=message.message_id)
@bot.message_handler(commands=['words'])
def react_game_words(message):
chat = message.chat
# Processing further actions may take a great amount of time.
bot.send_chat_action(chat.id, 'typing')
_add_user(message.from_user.id)
cmd_pattern = _cmd_pattern('words')
help_pattern = cmd_pattern \
+ r'\s(?:[-—\s:]*)(?:правила|инструкция|команды|help)\s*\??'
if re.fullmatch(cmd_pattern + r'.*?\s+[-!]?skip', message.text):
words_skip_move(message)
return
if re.fullmatch(cmd_pattern + r'\s+(?:приостановить|pause)', message.text):
bot.send_chat_action(chat.id, 'typing')
c = configparser.ConfigParser()
chat_id = str(chat.id)
filename = GAME_WORDS_DATA
c.read(filename, encoding='utf-8')
if not c.has_section(chat_id):
msg = "Игра не найдена."
bot.send_message(chat_id, msg,
reply_to_message_id=message.message_id)
return
c[chat_id]['status'] = 'paused'
with open(filename, 'w', encoding='utf-8') as f:
c.write(f)
dot = '.' * (random.random() > 1/2)
msg = f"Игра приостановлена. Продолжение: /words continue{dot}"
bot.send_message(chat_id, msg,
reply_to_message_id=message.message_id)
return
if re.fullmatch(cmd_pattern + r'\s+(?:хватит|удалить игру|stop)', message.text):
bot.send_chat_action(chat.id, 'typing')
c = configparser.ConfigParser()
chat_id = str(chat.id)
filename = GAME_WORDS_DATA
c.read(filename, encoding='utf-8')
if c.has_section(chat_id):
c.remove_section(chat_id)
else:
msg = "Игра не найдена."
bot.send_message(chat_id, msg,
reply_to_message_id=message.message_id)
return
with open(filename, 'w', encoding='utf-8') as f:
c.write(f)
dot = '.' * (random.random() > 1/2)
msg = f"Текущая игра убрана{dot}"
bot.send_message(chat_id, msg,
reply_to_message_id=message.message_id)
return
if re.fullmatch(cmd_pattern + r'\s+(?:очередь|порядок|order)', message.text):
bot.send_chat_action(chat.id, 'typing')
c = configparser.ConfigParser()
chat_id = str(chat.id)
filename = GAME_WORDS_DATA
c.read(filename, encoding='utf-8')
if not c.has_section(chat_id):
bot.send_message(chat.id,
"Игра в этом чате не найдена."
)
return
section = c[chat_id]
order = eval(section["order"])
current = int(section["current"])
uid = order[current]
cuser = bot.get_chat_member(chat_id, uid).user
order_ = ', '.join(map(full_name,
(bot.get_chat_member(chat_id, _uid).user for _uid in order)))
text_mention = user_text_mention(cuser, fill_as=None)
msg = f"""Последовательность: {order_}
Сейчас ходит: {text_mention}"""
bot.send_message(chat_id, msg, parse_mode='HTML')
return
if re.fullmatch(help_pattern, message.text):
bot.send_chat_action(chat.id, 'typing')
msg = """\
Начало игры
`-----------`
В личной переписке: /words `[начать|start]` `[single]` (`single` — игра самому)
В группе: `/words пользователь\_1 ...`
◽️Имена пользователей — упоминанием;
◽️Если своё имя не указывать, оно первое в очереди
Хода
`----`
В личной переписке: `!слово` либо `слово`
В группе: либо `!слово`, либо `слово` в ответ на сообщение того, кто ходил прошлым.
Другие:
`-------`
`/words pause``|``приостановить` — остановка игры
`/words stop``|``хватит|удалить игру` — прекратить игру и удалить
`/words skip` — пропуск хода
`/words order``|``очередь|порядок` — порядок ходов, текущий игрок
`/words help``|``правила|инструкция|команды` — это сообщение
`/words continue``|``продолжить` — продолжение (после `pause`)
"""
bot.send_message(chat.id, msg, parse_mode='Markdown')
return
if re.fullmatch(cmd_pattern + r'\s+(?:продолжить|continue)', message.text):
bot.send_chat_action(chat.id, 'typing')
c = configparser.ConfigParser()
chat_id = str(chat.id)
filename = GAME_WORDS_DATA
c.read(filename, encoding='utf-8')
if (c.has_option(chat_id, 'status') and
c[chat_id]['status'] == 'paused'):
c.set(chat_id, 'status', 'active')
with open(filename, 'w', encoding='utf-8') as f:
c.write(f)
dot = '.' * (random.random() > 1/2)
msg = f"Игра продолжена{dot}"
bot.send_message(chat_id, msg,
reply_to_message_id=message.message_id)
return
if chat.type == 'private':
if 'single' in message.text:
order = [message.from_user.id]
else:
order = [message.from_user.id, eval(BOT_ID)]
current = 0
mentioned = []
c = configparser.ConfigParser()
chat_id = str(chat.id)
filename = GAME_WORDS_DATA
c.read(filename, encoding='utf-8')
if c.has_section(chat_id):
if not re.search('(?:начать|start)', message.text):
msg = "Игра уже есть. Новая игра: /words начать|start. " \
"Также см.: /words help."
bot.send_message(chat_id, msg,
reply_to_message_id=message.message_id)
return # Do the game being not registered then.
if not c.has_section(chat_id):
c.add_section(chat_id)
section = c[chat_id]
section["order"] = str(order)
section["current"] = str(current)
section["letter"] = ANY_LETTER
section["mentioned"] = str(mentioned)
with open(filename, 'w', encoding='utf-8') as f:
c.write(f)
print("Registered. chat_id: " + chat_id)
bot.send_message(chat_id, "Done. Registered.")
elif 'group' in chat.type:
def user_id_(e):
if e.type == 'text_mention':
return e.user.id
elif e.type == 'mention':
users = load_users()
# +1: Skips `@`
uname = message.text[e.offset + 1 : e.offset + e.length]
for user_id in users:
if (_user := _chatMember(user_id, of=chat.id).user) \
and _user.username == uname:
return user_id
msg = f"Unknown user: @{uname}"
bot.send_message(chat.id, msg,
reply_to_message_id=message.message_id)
order = [user_id_(e) for e in message.entities[1:]]
if None in order:
return
if (n := message.from_user.id) not in order:
order = [n] + order
current = 0
mentioned = []
c = configparser.ConfigParser()
chat_id = str(chat.id)
filename = GAME_WORDS_DATA
c.read(filename, encoding='utf-8')
if not c.has_section(chat_id):
c.add_section(chat_id)
section = c[chat_id]
section["order"] = str(order)
section["current"] = str(current)
section["letter"] = ANY_LETTER
section["mentioned"] = str(mentioned)
with open(filename, 'w', encoding='utf-8') as f:
c.write(f)
print("Registered. chat_id: " + chat_id)
bot.send_message(chat_id, "Done. Registered.")
@bot.message_handler(content_types=['new_chat_members'])
def greet_new_chat_member(message):
user = message.new_chat_members[0]
chat = message.chat
_add_user(message.from_user.id)
_add_user(user.id)
if chat.id == SPEAK_CHAT \
and TOKEN == TOKEN_INIT: # !
if not is_participant(user.id):
until_date = 0
bot.send_message(chat.id, "Сюда можно онли участникам чата курса. Сóри.",
reply_to_message_id=message.message_id)
bot.kick_chat_member(chat.id, user.id, until_date=until_date)
return
dash = "—" # m-dash # <- ?
is_test_msg = False
till = (
"28.02.2021, 06.03.2021, 07.03.2021, 12.05.2021, 13.05.2021"
)
msg = f"""{"[Это тест.]"*is_test_msg}
Привет, {user_text_mention(user)}!
Этот чат — флудилка, не основной чат. Ссылка на основной чат находится в \
закрепе. Бот может быть полезен попыткой перевода текста на старославянский \
язык. Инструкция: см. /help@{BOT_USERNAME}.
© Anonym, 27.01.2021{dash}{till}
"""
bot.send_message(message.chat.id, msg, parse_mode='HTML')
elif chat.id == HEAD_CHAT:
msg = f"""\
Здравствуй, <a href="tg://user?id={user.id}">\
{user.first_name}{" " + u if (u := user.last_name) else ""}</a>!
Это основной чат курса, ссылка на флудилку есть в закрепе.
Бот может быть полезен попыткой перевода текста на старославянский \
язык, транслитерацией в старославянские алфавиты. Инструкция: \
см. /help@{BOT_USERNAME}.
"""
bot.send_message(message.chat.id, msg, parse_mode='HTML')
elif message.chat.id == TEST_CHAT:
msg = f"""\
{user_text_mention(user)}
Этот чат — тестовый чат.
/start
/help@{BOT_USERNAME}.
"""
bot.send_message(message.chat.id, msg, parse_mode='HTML')
@bot.inline_handler(func=lambda query: 0 < len(query.query) <= 255) # Or 256?
def answer_query(query):
print('user_id:', query.from_user.id) # test
try:
answers = []
text = query.query
print('query:', text) # test
if any(text.startswith(k) for k in NAMES_REPLACE):
show_text = text
pairs = {
'<': "<",
'>': ">",
'&': "&"
}
def _repl(s_part):
return pairs[s_part]
pattern = '|'.join(pairs.keys())
repl = lambda match: _repl(match.group())
def html_readable(s):
return re.sub(pattern, repl, s)
text = html_readable(text)
for k in NAMES_REPLACE:
i1, i2 = NAMES_REPLACE[k]
show_text = show_text.replace(k, i1)
i1 = html_readable(i1)
text = text.replace(k,
'<a href="tg://user?id={1}">{0}</a>'.format(i1, i2))
a_1 = types.InlineQueryResultArticle(
id='0',
title="Смена слов",
description=show_text,
input_message_content=types.InputTextMessageContent(
parse_mode='HTML',
message_text=text)
)
answers.append(a_1)
text = query.query
# Parse mode here — ?
# And sending the text in HTML/Markdown.
text_cyr = translation(text, dest="cyryllic")
cyr = types.InlineQueryResultArticle(
id='1',
title="Перевод на кириллицу",
description=text_cyr,
input_message_content=types.InputTextMessageContent(
message_text=text_cyr),
thumb_url=A_CYRYLLIC, thumb_width=48, thumb_height=48,
)
text_gla = translation(text, dest="glagolic")
gla = types.InlineQueryResultArticle(
id='2',
title="Перевод на глаголицу",
description=text_gla,
input_message_content=types.InputTextMessageContent(
message_text=text_gla),
thumb_url=A_GLAGOLIC, thumb_width=48, thumb_height=48,
)
text_transliterated_gla = glagolic_transliterate(text)
transliterated_gla = types.InlineQueryResultArticle(
id='3',
title="Транслитерация на глаголицу",
description=text_transliterated_gla,
input_message_content=types.InputTextMessageContent(
message_text=text_transliterated_gla),
thumb_url=A_LATER_GLAGOLIC, thumb_width=48, thumb_height=48,
)
answers = [cyr, gla, transliterated_gla] + answers
bot.answer_inline_query(query.id, answers, cache_time=CACHE_TIME)
except Exception as e:
print(type(e), ': ', e, sep='')
@bot.inline_handler(func=lambda query: not query.query)
def answer_empty_query(query):
_add_user(query.from_user.id)
try:
thumb_url = A_LATER_GLAGOLIC
r = types.InlineQueryResultArticle(
id='1',
title="Перевод на славянские языки: кириллица, глаголица.",
input_message_content=types.InputTextMessageContent(
message_text="..."
),
thumb_url=thumb_url, thumb_width=48, thumb_height=48,
description="Введи текст для перевода, жми на нужный для отправки"
)
bot.answer_inline_query(query.id, [r], cache_time=CACHE_TIME)
except Exception as e:
print(e)
@bot.message_handler(content_types=['text'])
def answer_message(message):
_add_user(message.from_user.id)
c = configparser.ConfigParser()
chat_id = str(message.chat.id)
filename = GAME_WORDS_DATA
c.read(filename, encoding='utf-8')
if not c.has_section(chat_id):
# Аnswer can be performed only if section exists.
return
section = c[chat_id]
order = eval(section["order"])
current = int(section["current"])
if (c.has_option(chat_id, 'status') and
c.get(chat_id, 'status') == 'paused'):
return
if message.chat.type == 'private':
pattern = '(?i)(!)?[-а-яё]+'
if (s := re.fullmatch(pattern, message.text)):
play_words(chat_id, message)
else:
pattern = WORDS_GAME_PATTERN
if re.fullmatch(pattern, message.text):
play_words(chat_id, message)
bot.infinity_polling()
```
#### File: 1-1-1-1-1-1-1-1/Old-Slavonic-bot/_write_files.py
```python
allow_run_as_script = True
# *Note*: To run as a script, do not choose versions at `version`.
import os
import re
import warnings
import typing
from typing import Optional, Union, NoReturn, Literal
from pathlib import Path
import configparser # See the tracker of files changes made.
# That is a file _changed_tracker.py.
import time
from copy import copy
import argparse
from globalconfig import INITIAL_FILE
from meta.utils.parse_comment import load_comments
from meta.utils.versions_about import parse_version
_VERSION_NAME_TYPE = Optional[Union[re.Pattern, str]]
DEFAULT_ALL_FILES = [
INITIAL_FILE,
"config.py",
"requirements.txt"
]
current_version_source = Path("meta") / "versions-controll" / "cversion.txt"
try:
with open(current_version_source,
encoding='utf-8') as f:
_version_to_choose_pattern = f.read().strip()
version_to_choose_pattern = eval(_version_to_choose_pattern)
except:
version_to_choose_pattern = None
filename = os.path.split(__file__)[-1]
version_to_choose: _VERSION_NAME_TYPE = version_to_choose_pattern
# Whether warning about versions' difference is enabled.
warn_about_v_difference_enabled = True
version = [
"aiogram",
"->telethon",
"telebot"
]
def choose_version(v_name: _VERSION_NAME_TYPE = None) \
-> NoReturn:
"""A utility to choose version.
Usage
-----
- Pass a pattern, applying to the start of searched version,
at the v_name. Only one version should match that pattern.
- Pass None to choose nothing.
"""
global version
if not v_name:
return
regexp = v_name
_index = [item for item in version if re.match(regexp, item.lstrip('->'))]
if not _index:
msg = (
"Index to change chosen version not found. "
"Doing nothing."
)
warnings.warn(msg)
return
elif len(_index) > 1:
warnings.warn(
"Multiple versions to change the choice found. "
"Doing nothing."
)
return
indexed: str = _index[0].lstrip('->')
for i, name in enumerate(version):
name = name.lstrip("->")
if name == indexed:
name = '->' + name
version[i] = name
choose_version(version_to_choose)
# Syntax to choose an item: change "item" to "->item".
candidates = [item for item in version if item.startswith('->')]
if len(candidates) > 1:
raise SystemExit("Can't choose 2 versions simultaneously.")
elif not candidates:
version = None
else:
version = candidates[0].lstrip('->').strip()
# === configparser meta-utils =================================================
c = configparser.ConfigParser()
tracker_config_filename = os.path.join("locals", 'tracker.ini')
c.read(tracker_config_filename, encoding='utf-8')
def _disable_tracker():
"""Disable the tracker. See: _changed_tracker.py."""
if not c.has_section('main'):
return
c['main']['requires_set_defaults'] = 'true'
c['main']['can_reload'] = 'false'
with open(tracker_config_filename, 'w') as f:
c.write(f)
def _allow_reload_tracker_defaults():
"""Allow the tracker being reloaded. See: _changed_tracker.py."""
if not c.has_section('main'):
return
c['main']['requires_set_defaults'] = 'true'
with open(tracker_config_filename, 'w') as f:
c.write(f)
# === Main function ===========================================================
def main(v_name=None, files=[], *,
encoding='utf-8'):
"""Write files `files` to the root, doing additional stuff.
Additional stuff
----------------
Update content of current-version.txt and ensure that versions
at `files` do correspond to versions at `INITIAL_FILE`; if not,
make a corresponding mark / note.
On success print corresponding messages to the buffer/stream.
While doing all, disable tracker; enabled at the end.
"""
#
# Do some preliminary stuff.
#
assert files
if encoding in {'basic', 'utf-8'}:
encoding = 'utf-8'
else:
exc_message = "encoding undefined"
raise TypeError(exc_message)
# Do stuff is v_name is not given. Get it via the CLI.
if not v_name:
help_message = f"""Usage:
{filename} library
Writes files {', '.join(files)} to the root folder.
"""
parser = argparse.ArgumentParser(description=help_message)
parser.add_argument('v_name', type=str, help='Name of library')
args = parser.parse_args()
v_name = args.v_name
print(
"🛑Disabling tracker of version files, written to the root..."
)
_disable_tracker()
time.sleep(0.5) # Prevent some strange behaviour.
print(f"Writing with version: {v_name}")
root = os.path.abspath(os.curdir)
folder = Path(root) / 'meta' / 'versions-controll'
exact_name = None
for name in os.listdir(folder):
if v_name.lower() in name.lower():
exact_name = name
break
if exact_name:
os.chdir(folder)
os.chdir(exact_name)
path = os.path.abspath(os.curdir)
else:
er_msg = f"undefined folder's name: of version {v_name}. Sure?"
raise TypeError(
er_msg
)
os.chdir(root); del root
#
# Writing files to the root
#
comments = load_comments()
for fname in files:
name, ext = os.path.splitext(fname)
mod_fname = name + '-' + v_name + ext
with open(os.path.join(path, mod_fname), encoding=encoding) as f:
data = f.readlines()
for index, line in enumerate(data): # Set comments.
match = re.fullmatch(r'#+\s+<comment:(\w+):(\w+)>' + r'\n',
line,
re.IGNORECASE)
if not match:
continue
c_type, lib_name_local = match.groups()
comment = comments[c_type]
data[index] = comment
data = ''.join(data)
with open(fname, 'w', encoding=encoding) as f:
f.write(data)
print(f"\t🖊️Wrote file {fname} to root")
# Detect the main file.
if INITIAL_FILE in files:
main_file = INITIAL_FILE
else:
main_file = files[0]
# Open just written file, find version's full name.
version_is_undefined = False
with open(main_file, encoding=encoding) as f:
index = 0
while index < 3:
line = f.readline()
part = '# version:'
if line.startswith(part):
full_name = line.split(part)[1].strip()
break
index += 1
else:
version_is_undefined = True
full_name = ':'.join(
(
v_name,
"undefined"
)
)
print(
"📚Read version full name from {0}. ".format(main_file)
+ "Name of version: {0}".format(full_name)
)
library = v_name
# Get version info as a tuple.
v_info: tuple[str, Optional[str]] = parse_version(full_name)[1:]
name = '-'.join(filter(lambda i: i,
v_info))
version_number: str = v_info[0]
pattern = r'(.+)\+'
def assert_form(s: str) -> bool:
_result = re.fullmatch(pattern, s)
return bool(_result)
if assert_form(version_number):
msg = (
"version number at {0} is not strict, ".format(main_file)
+ "be careful"
)
warnings.warn(msg)
# Warn about the version difference, if required.
if not version_is_undefined and warn_about_v_difference_enabled:
def another_version_marked_exists() -> Optional[Literal[0]]:
# Whether file, marked with another version, exists.
# Return 0 if the answer is yes, None otherwise.
nonlocal pattern
for fname in set(files) - {main_file}:
with open(fname, encoding=encoding) as f:
lines_: list[str] = f.readlines()
part = '# version:'
# Find the version if given, do stuff if another version
# found.
for line in lines_:
if line.startswith(part):
full_name_local = line.split(part)[1].strip()
v_number_local = parse_version(full_name_local)[1]
match = re.fullmatch(pattern, v_number_local)
if match:
# Ignore match failure when v_number_local
# ends with '+' end it's a "<v_local_>+",
# v_local_ >= version_number.
v_number_local_ = match.group(1)
# Check whether version_number and same local
# do match '[0-9]+\.[0-9]+\.(?:[0-9]+)?'.
pattern = r'[0-9]+\.[0-9]+\.(?:[0-9]+)?'
if assert_form(version_number) \
and assert_form(v_number_local_):
tuple_initial = version_number.split('.')
tuple_initial = tuple(map(int,
tuple_initial))
tuple_local = v_number_local_.split('.')
tuple_local = tuple(map(int,
tuple_local))
if tuple_initial >= tuple_local:
continue
else:
return 0
else:
# At least one of that strings
# does not match pattern.
pass
# Now v_number_local doesn't end with '+'.
if full_name_local != full_name:
return 0
_do_warn_about_version_difference = another_version_marked_exists()
if _do_warn_about_version_difference == 0:
msg = (
"❗ Versions at files may be inappropriate "
"with each other, be careful"
)
warnings.warn(msg)
else:
print("✓ No imcompatible versions at version files found.")
current_version_fname = 'current-version.txt'
#
# Do some stuff with this file.
#
# Get `data` and `lines`.
with open(current_version_fname, encoding=encoding) as f:
data: list[str] = f.readlines()
data_copy: list[str] = copy(data)
started: bool = False
lines: list[int] = []
# `lines` is a list of possible lines' indexes to change.
i = 0
while data_copy:
line = data_copy.pop(0)
if line.startswith(".. version"):
# Mark that the search started.
started = True
elif line.lstrip('\t\f ') != line or not line.rstrip('\n'):
lines.append(i)
elif started:
break
i += 1
# Change some "lines", if required.
for i in lines:
if (str_ := ':type:') in data[i]:
j = data[i].index(str_) + len(str_)
data[i] = data[i][:j] + ' at ' + library
data[i] += '\n'
elif (str_ := ':name:') in data[i]:
j = data[i].index(str_) + len(str_)
data[i] = data[i][:j] + ' ' + name
data[i] += '\n'
# Update content at file current_version_fname.
with open(current_version_fname, 'w', encoding=encoding) as f:
f.write(''.join(data))
print(f"📔Updated the content of file {current_version_fname}.")
#
# Allow tracker being reloaded. After doing it all is considered done.
#
_allow_reload_tracker_defaults()
print("✅Allowed the tracked being worked further, "
"reloading variables. All is done.")
if __name__ == '__main__':
files = DEFAULT_ALL_FILES
main(version, files)
``` |
{
"source": "111114342/skrproject",
"score": 2
} |
#### File: skrproject/adminapp/views.py
```python
from django.shortcuts import render
from django.views.generic import CreateView, UpdateView, DeleteView, DetailView, ListView
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.urls import reverse
from app1.models import Contact, ServiceEnq, CourseEnq
# Create your views here.
def AdminHome(reqest):
return render(reqest, 'adminapp/adminhome.html')
# =========================contact us ============================
# Contact Us list view
@method_decorator(login_required, name='dispatch')
class ContactUsList(ListView):
model = Contact
template_name = 'adminapp/contactuslist.html'
context_object_name = 'contactenq'
@login_required
def DeleteContactEnq(request, id):
mydict = {'msg':'Enquiry Deleted'}
del_enq = Contact.objects.get(id=id)
del_enq.delete()
del_enq = Contact.objects.all()
return HttpResponseRedirect("/contactenqlist/")
#
# class DeleteContactEnq(DeleteView):
# model = Contact
# def get_success_url(self):
# return reverse('contactus_list')
# =========================Service Enq ============================
# Service Enq list view
@method_decorator(login_required, name='dispatch')
class ServiceEnqList(ListView):
model = ServiceEnq
template_name = 'adminapp/serviceenqlist.html'
context_object_name = 'serviceenq'
@login_required
def DeleteServiceEnq(request, id):
mydict = {'msg':'Service Enq Deleted'}
del_serv_enq = ServiceEnq.objects.get(id=id)
del_serv_enq.delete()
del_serv_enq = ServiceEnq.objects.all()
return HttpResponseRedirect("/serviceenqlist/")
# =========================Course Enq ============================
#Course Enq list view
@method_decorator(login_required, name='dispatch')
class CourseEnqList(ListView):
model = CourseEnq
template_name = 'adminapp/courseenqlist.html'
context_object_name = 'courseenq'
@login_required
def DeleteCourseEnq(request, id):
mydict = {'msg':'Service Enq Deleted'}
del_course_enq = CourseEnq.objects.get(id=id)
del_course_enq.delete()
del_course_enq = CourseEnq.objects.all()
return HttpResponseRedirect("/courseenqlist/")
``` |
{
"source": "1111goliathus/discordpy-startup",
"score": 2
} |
#### File: 1111goliathus/discordpy-startup/mochikoshi.py
```python
import discord
import re
import re_YuniBot
# チャンネル情報
CHANNEL_ID = tomobot.get_channel_id("開発用")
# デバッグ用
# 個人鯖に飛ぶ
#CHANNEL_ID = tomobot.get_channel_id("tomobot")
TOKEN = re_YuniBot.get_token("<PASSWORD>")
client = discord.Client()
mochikoshi_list = {}
'''
持ち越し時間の判定について、
以下のパターンに一致するものを時刻の情報とする
\d:\d\d (ex:0:45)
\d\d\d (ex:113) calcのみ
\d\ds (ex:53s) applyのみ
\d\d (ex:53) calcのみ
3桁の数字を時刻判定するとTL内のダメ表記まで置換されてしまうので
対応検討中
'''
'''
既知のバグ:
・最後のUBの時間が同じ場合、その中で一番最初のものを除き消される
'''
def calc_sec(text):
# 切り出して数字の部分のみ取り出す
res = str(text).replace(":", "").replace('s', "")
# 文字数を3文字に統一する
if len(res) == 2:
res = "0" + res
# 時間を求める
t = int(res[0]) * 60 + int(res[1:])
return t
# 持ち越し時間を計算する関数
## text: 投稿されたテキスト
def calc_mochikoshi(text):
# 正規表現を用いて存在を確認する
patten = re.compile(r'(\d:\d\d)|(\d\d\d)|(\d\d)')
result = re.match(patten, text.content)
# 該当パターンが存在した場合
if result:
t = calc_sec(result.group(0))
if int(t) <= 0 or int(t) > 90:
raise Exception
# 結果を記録する
mochikoshi_list[text.author] = int(t)
return t
def apply_mochikoshi(message):
msg = str(message.content)
# 正規表現を用いて存在を確認する
patten = re.compile(r'(\d:\d\d)|(\d\ds)')
result = re.match(patten, msg)
# 正規表現に一致する部分のリストを取得
l = re.findall(patten, msg)
# リスト内の要素に対し、変換前と変換後の値の組を取得
li = {}
for i in l:
for j in i:
if j != '':
t = mochikoshi_list[message.author] - (90 - calc_sec(j))
# 1秒以上残る場合はリストに追加する
if (t > 0):
rep = ""
if (t >= 60):
rep += "1:"
t -= 60
else:
rep += '0:'
if len(str(t)) == 1:
rep += '0'
rep += str(t)
li[j] = rep
# 大きい方から置換すると同じものを複数回置換してしまう可能性があるため、
# 小さい方からソートする
li_sorted = sorted(li.items(), key=lambda x: x[0])
print(li_sorted)
# 置換処理
for before, after in li_sorted:
msg = msg.replace(before, after)
# リストの先頭要素(最後のUB)以降を削除する
msg = msg.split(li_sorted[0][1])[0] + str(li_sorted[0]
[1]) + msg.split(li_sorted[0][1])[1].splitlines()[0]
return msg
@client.event
async def on_message(message):
channel = client.get_channel(CHANNEL_ID)
# print(message.author)
# 別チャンネルへの投稿
if (message.channel.id != CHANNEL_ID):
return
# メッセージ送信者が自分自身の場合
if (message.author.bot):
return
# リスト内に秒数が格納されていない場合
if (message.author not in mochikoshi_list):
try:
# 入力された持ち越し時間を取得
mochikoshi_time = calc_mochikoshi(message)
# メッセージ送信
await channel.send(f'{message.author.mention}' +
" 持ち越し時間:" +
str(mochikoshi_list[message.author]) +
"秒")
except:
await channel.send(f'{message.author.mention}' + "持ち越し時間の取得に失敗しました")
if message.author in mochikoshi_list:
del mochikoshi_list[message.author]
else:
try:
# 変換後TLを取得
tl = apply_mochikoshi(message)
# メッセージ送信
await channel.send(tl)
del mochikoshi_list[message.author]
except:
await channel.send(f'{message.author.mention}' + "TL変換に失敗しました")
if message.author in mochikoshi_list:
del mochikoshi_list[message.author]
client.run(TOKEN)
``` |
{
"source": "1116574/vulcan-api",
"score": 2
} |
#### File: vulcan-api/tests/conftest.py
```python
import pytest
from dotenv import load_dotenv, find_dotenv
from utils import *
from vulcan import Vulcan
load_dotenv(find_dotenv())
@pytest.mark.private
@pytest.mark.online
@pytest.fixture
def klient():
cert = {
k: load_variable(k)
for k in [
"CertyfikatPfx",
"CertyfikatKluczSformatowanyTekst",
"CertyfikatKlucz",
"AdresBazowyRestApi",
]
}
yield Vulcan(cert)
```
#### File: vulcan-api/tests/utils.py
```python
from datetime import date
from os import environ
PARAMS_LESSON_PLAN = [
(
date(2018, 9, 4),
[
{"IdPrzedmiot": 173, "IdPracownik": 99},
{"IdPrzedmiot": 123, "IdPracownik": 101},
{"IdPrzedmiot": 172, "IdPracownik": 92},
{"IdPrzedmiot": 189, "IdPracownik": 91},
{"IdPrzedmiot": 119, "IdPracownik": 100},
{"IdPrzedmiot": 175, "IdPracownik": 97},
{"IdPrzedmiot": 118, "IdPracownik": 89},
],
)
]
PARAMS_TESTS = [
(date(2018, 10, 5), [{"Id": 661, "IdPrzedmiot": 177, "IdPracownik": 87}]),
(
date(2018, 10, 23),
[
{"Id": 798, "IdPrzedmiot": 173, "IdPracownik": 99},
{"Id": 838, "IdPrzedmiot": 172, "IdPracownik": 92},
],
),
]
PARAMS_HOMEWORKS = [
(
date(2018, 10, 23),
[
{"Id": 305, "IdPracownik": 100, "IdPrzedmiot": 119},
{"Id": 306, "IdPracownik": 100, "IdPrzedmiot": 119},
],
)
]
def load_variable(name):
return environ.get(name)
```
#### File: vulcan-api/vulcan/_pupil.py
```python
from aenum import Enum, unique
from ._class import Klasa
from ._period import Okres
from ._school import Szkola
@unique
class Plec(Enum):
"""Płeć"""
KOBIETA = 0
MEZCZYZNA = 1
class Uczen:
"""
Uczeń
Attributes:
id (:class:`int`): ID ucznia
nazwa (:class:`str`): Nazwisko, imię oraz drugie imię ucznia
imie (:class:`str`): Pierwsze imię ucznia
drugie_imie (:class:`str` or :class:`None`): Drugie imię ucznia
nazwisko (:class:`str`): Nazwisko ucznia
pseudonim (:class:`str` or :class:`None`): Pseudonim ucznia
plec (:class:`vulcan.models.Plec`): Płeć ucznia
okres (:class:`vulcan.models.Okres`): Aktualny okres klasyfikacyjny ucznia
klasa (:class:`vulcan.models.Klasa`): Klasa ucznia
szkola (:class:`vulcan.models.Szkola`): Szkoła ucznia
"""
def __init__(
self,
id=None,
login_id=None,
nazwa=None,
imie=None,
drugie_imie=None,
nazwisko=None,
pseudonim=None,
plec=None,
okres=None,
klasa=None,
szkola=None,
):
self.id = id
self.login_id = login_id
self.nazwa = nazwa
self.imie = imie
self.drugie_imie = drugie_imie
self.nazwisko = nazwisko
self.pseudonim = pseudonim
self.plec = plec
self.okres = okres
self.klasa = klasa
self.szkola = szkola
def __repr__(self):
return "<Uczen {!r}>".format(self.nazwa)
@classmethod
def from_json(cls, j):
id = j.get("Id")
login_id = j.get("UzytkownikLoginId")
nazwa = j.get("UzytkownikNazwa")
imie = j.get("Imie")
drugie_imie = j.get("Imie2") or None
nazwisko = j.get("Nazwisko")
pseudonim = j.get("Pseudonim")
plec = Plec(j.get("UczenPlec"))
okres = Okres.from_json(j)
klasa = Klasa.from_json(j)
szkola = Szkola.from_json(j)
return cls(
id=id,
login_id=login_id,
nazwa=nazwa,
imie=imie,
drugie_imie=drugie_imie,
nazwisko=nazwisko,
pseudonim=pseudonim,
plec=plec,
okres=okres,
klasa=klasa,
szkola=szkola,
)
``` |
{
"source": "1116574/WarsawGTFS",
"score": 2
} |
#### File: WarsawGTFS/src/static.py
```python
from collections import OrderedDict
from tempfile import NamedTemporaryFile
from warnings import warn
from datetime import date
from ftplib import FTP
import libarchive.public
import requests
import json
import csv
import re
import os
from .utils_static import *
from .parser import Parser
from .utils import *
# List of rail stops used by S× lines. Other rail stops are ignored.
ACTIVE_RAIL_STATIONS = {
"4900", "4901", "7900", "7901", "7902", "2901", "2900", "2918", "2917", "2916", "2915",
"2909", "2908", "2907", "2906", "2905", "2904", "2903", "2902", "4902", "4903", "4923",
"4904", "4905", "2914", "2913", "2912", "2911", "2910", "4919", "3901", "4918", "4917",
"4913", "1910", "1909", "1908", "1907", "1906", "1905", "1904", "1903", "1902", "1901",
"7903", "5908", "5907", "5904", "5903", "5902"
}
PROPER_STOP_NAMES = {
"4040": "<NAME>", "1484": "<NAME>",
"2005": "Praga-Płd. - Ratusz", "1541": "<NAME>",
"5001": "Połczyńska - Parking P+R", "2296": "<NAME>",
"6201": "<NAME>", "1226": "Mańki-Wojody",
}
class Converter:
def __init__(self, version="", shapes=False, clear_shape_errors=True):
clear_directory("gtfs")
if clear_shape_errors: clear_directory("shape-errors")
# Stop info
self.missing_stops = requests.get("https://gist.githubusercontent.com/MKuranowski/0ca97a012d541899cb1f859cd0bab2e7/raw/missing_stops.json").json()
self.rail_platforms = requests.get("https://gist.githubusercontent.com/MKuranowski/0ca97a012d541899cb1f859cd0bab2e7/raw/rail_platforms.json").json()
self.incorrect_stops = []
self.unused_stops = list(self.missing_stops.keys())
self.stops_map = {}
self.stop_names = PROPER_STOP_NAMES.copy()
# File handler
self.version = None
self.reader = None
self.parser = None
# Get shape generator instance
if isinstance(shapes, Shaper):
self.shapes = shapes
self.shapes.open()
elif shapes:
self.shapes = Shaper()
self.shapes.open()
else:
self.shapes = None
self.get_file(version)
def get_file(self, version):
"Download and decompress schedules for current data. Returns tuple (TemporaryFile, version) - and that TemporaryFile is decompressed .TXT file"
# Login to ZTM server and get the list of files
server = FTP("rozklady.ztm.waw.pl")
server.login()
files = [f for f in server.nlst() if re.fullmatch(r"RA\d{6}\.7z", f)]
# If user has requested an exact version, check if it's on the server
if version:
fname = "{}.7z".format(version)
if fname not in files:
raise KeyError("Requested file version ({}) not found on ZTM server".format(version))
# If not, find one valid today
else:
fdate = date.today()
while True:
fname = fdate.strftime("RA%y%m%d.7z")
if fname in files: break
else: fdate -= timedelta(days=1)
# Create temporary files for storing th 7z archive and the compressed TXT file
temp_arch = NamedTemporaryFile(mode="w+b", delete=False)
self.reader = NamedTemporaryFile(mode="w+t", delete=True)
try:
# Download the file
server.retrbinary("RETR " + fname, temp_arch.write)
server.quit()
temp_arch.close()
# Open the temporary archive inside
with libarchive.public.file_reader(temp_arch.name) as arch:
# Iterate over each file inside the archive
for arch_file in arch:
# Assert the file inside the archive is the TXT file we're looking for
name_match = re.fullmatch(r"(RA\d{6})\.TXT", arch_file.pathname, flags=re.IGNORECASE)
if not name_match:
continue
# Save the feed version
self.version = name_match[1].upper()
# Decompress the TXT file block by block and save it to the reader
for block in arch_file.get_blocks():
self.reader.write(str(block, "cp1250"))
self.reader.seek(0)
# only one TXT file should be inside the archive
break
else:
raise FileNotFoundError("no schedule file found inside archive {}".format(fname))
# Remove the temp arch file at the end
finally:
os.remove(temp_arch.name)
self.parser = Parser(self.reader)
def calendar(self):
file = open("gtfs/calendar_dates.txt", mode="w", encoding="utf8", newline="")
writer = csv.writer(file)
writer.writerow(["service_id", "date", "exception_type"])
print("\033[1A\033[K" + "Parsing calendars (KA)")
for day in self.parser.parse_ka():
for service_id in day["services"]:
writer.writerow([service_id, day["date"], "1"])
file.close()
def _stopgroup_railway(self, writer, group_id, group_name):
# Load ZTM stakes from PR section
# self.parser.parse_pr() has to be called to skip to the next entry in ZP
stakes = list(self.parser.parse_pr())
# If group is not in ACTIVE_RAIL_STATIONS, ignore it
if group_id not in ACTIVE_RAIL_STATIONS:
for s in stakes: self.stops_map[s["id"]] = None
return
# Basic info about the station
station_info = self.rail_platforms.get(group_id, {})
# If this station is not in rail_platforms, average all stake positions
# In order to calculate an approx position of the station
if not station_info:
stake_positions = [(i["lat"], i["lon"]) for i in stakes]
stake_positions = [i for i in stake_positions if i[0] and i[1]]
if stake_positions:
station_lat, station_lon = avg_position(stake_positions)
# No position for the station
else:
for s in stakes: self.stops_map[s["id"]] = None
self.incorrect_stops.append(group_id)
return
# Otherwise get the position from rail_platforms data
else:
station_lat, station_lon = map(float, station_info["pos"].split(","))
group_name = station_info["name"]
# One Platform or No Platform data
if (not station_info) or station_info["oneplatform"]:
# Save position for shapes
if self.shapes:
self.shapes.stops[group_id] = station_lat, station_lon
# Add info for stops_map
for stake in stakes:
self.stops_map[stake["id"]] = group_id
# Output info to GTFS
writer.writerow([
group_id, group_name, station_lat, station_lon,
"", "", station_info.get("ibnr_code", ""),
"", station_info.get("wheelchair", 0),
])
# Multi-Platform station
else:
# Hub entry
writer.writerow([
group_id, group_name, station_lat, station_lon,
"1", "", station_info["ibnr_code"],
"", station_info.get("wheelchair", 0),
])
# Platforms
for platform_id, platform_pos in station_info["platforms"].items():
platform_lat, platform_lon = map(float, platform_pos.split(","))
platform_code = platform_id.split("p")[1]
platform_name = f"{group_name} peron {platform_code}"
# Save position for shapes
if self.shapes:
self.shapes.stops[platform_id] = platform_lat, platform_lon
# Output to GTFS
writer.writerow([
platform_id, platform_name, platform_lat, platform_lon,
"0", group_id, station_info["ibnr_code"],
platform_code, station_info.get("wheelchair", 0),
])
# Stops → Platforms
for stake in stakes:
# Defined stake in rail_platforms
if stake["id"] in station_info["stops"]:
self.stops_map[stake["id"]] = station_info["stops"][stake["id"]]
# Unknown stake
elif stake["id"] not in {"491303", "491304"}:
warn(f'No platform defined for railway PR entry {group_name} {stake["id"]}')
def _stopgroup_normal(self, writer, group_id, group_name):
# Load ZTM stakes from PR section
# self.parser.parse_pr() has to be called to skip to the next entry in ZP
stakes = list(self.parser.parse_pr())
# Split virtual stakes from normal stakes
virtual_stakes = [i for i in stakes if i["code"][0] == "8"]
normal_stakes = [i for i in stakes if i["code"][0] != "8"]
# Load positions from missing_stops to normal_stakes
for idx, stake in enumerate(normal_stakes):
if (stake["lat"] == None or stake["lon"] == None) and \
stake["id"] in self.missing_stops:
self.unused_stops.remove(stake["id"])
stake["lat"], stake["lon"] = self.missing_stops[stake["id"]]
normal_stakes[idx] = stake
position_stakes = [i for i in normal_stakes if i["lat"] and i["lon"]]
# Convert normal stakes
for stake in normal_stakes:
# Position defined
if stake["lat"] and stake["lon"]:
# Save position for shapes
if self.shapes:
self.shapes.stops[stake["id"]] = stake["lat"], stake["lon"]
# Output info to GTFS
writer.writerow([
stake["id"], f'{group_name} {stake["code"]}',
stake["lat"], stake["lon"],
"", "", "", "", stake["wheelchair"],
])
# Position undefined
else:
self.stops_map[stake["id"]] = None
self.incorrect_stops.append(stake["id"])
# Convert virtual stops
for stake in virtual_stakes:
stakes_with_same_pos = [i["id"] for i in position_stakes if \
(i["lat"], i["lon"]) == (stake["lat"], stake["lon"])]
stakes_with_same_code = [i["id"] for i in position_stakes if \
i["code"][1] == stake["code"][1]]
# Metro Młociny 88 → Metro Młociny 28
if stake["id"] == "605988":
counterpart_available = [i for i in position_stakes if \
i["id"] == "605928"]
# If 605928 is present, map 605988 to it.
# Otherwise fallback on defualt maching options
if counterpart_available:
self.stops_map["605988"] = "605928"
continue
# Map to a stake with same position
if stakes_with_same_pos:
self.stops_map[stake["id"]] = stakes_with_same_pos[0]
# Map to a stake with same digit
elif stakes_with_same_code:
self.stops_map[stake["id"]] = stakes_with_same_code[0]
# Unable find a matching stake
else:
self.stops_map[stake["id"]] = None
self.incorrect_stops.append(stake["id"])
def stops(self):
file = open("gtfs/stops.txt", mode="w", encoding="utf8", newline="")
writer = csv.writer(file)
writer.writerow(["stop_id", "stop_name", "stop_lat", "stop_lon", "location_type", "parent_station", "stop_IBNR", "platform_code", "wheelchair_boarding"])
print("\033[1A\033[K" + "Parsing stops (ZP)")
for group in self.parser.parse_zp():
# Fix town name for Kampinoski PN
if group["town"] == "Kampinoski Pn":
group["town"] = "Kampinoski PN"
# Add name to self.stop_names if it's missing
if group["id"] not in self.stop_names:
group["name"] = normal_stop_name(group["name"])
self.stop_names[group["id"]] = group["name"]
else:
group["name"] = self.stop_names[group["id"]]
# Add town name to stop name
if should_town_be_added_to_name(group):
group["name"] = f'{group["town"]} {group["name"]}'
self.stop_names[group["id"]] = group["name"]
# Parse stakes
if group["id"][1:3] in {"90", "91", "92"}:
self._stopgroup_railway(writer, group["id"], group["name"])
else:
self._stopgroup_normal(writer, group["id"], group["name"])
file.close()
def routes_schedules(self):
file_routes = open("gtfs/routes.txt", mode="w", encoding="utf8", newline="")
writer_routes = csv.writer(file_routes)
writer_routes.writerow(["agency_id", "route_id", "route_short_name", "route_long_name", "route_type", "route_color", "route_text_color", "route_sort_order"])
file_trips = open("gtfs/trips.txt", mode="w", encoding="utf8", newline="")
writer_trips = csv.writer(file_trips)
writer_trips.writerow(["route_id", "service_id", "trip_id", "trip_headsign", "direction_id", "shape_id", "exceptional", "wheelchair_accessible", "bikes_allowed"])
file_times = open("gtfs/stop_times.txt", mode="w", encoding="utf8", newline="")
writer_times = csv.writer(file_times)
writer_times.writerow(["trip_id", "arrival_time", "departure_time", "stop_id", "stop_sequence", "pickup_type", "drop_off_type", "shape_dist_traveled"])
route_sort_order = 1 # Leave first 2 blank for M1 and M2 routes
route_id = None
print("\033[1A\033[K" + "Parsing routes & schedules (LL)")
for route in self.parser.parse_ll():
route_id, route_desc = route["id"], route["desc"]
# Ignore Koleje Mazowieckie & Warszawska Kolej Dojazdowa routes
if route_id.startswith("R") or route_id.startswith("WKD"):
self.parser.skip_to_section("WK", end=True)
continue
print("\033[1A\033[K" + f"Parsing routes & schedules (LL) - {route_id}")
route_sort_order += 1
route_type, route_color, route_text_color = route_color_type(route_id, route_desc)
# Data loaded from TR section
route_name = ""
direction_stops = {"0": set(), "1": set()}
on_demand_stops = set()
inaccesible_trips = set()
variant_directions = {}
# Variants
print("\033[1A\033[K" + f"Parsing routes & schedules (TR) - {route_id}")
for variant in self.parser.parse_tr():
print("\033[1A\033[K" + f"Parsing routes & schedules (LW) - {route_id}")
stops = list(self.parser.parse_lw())
# variant direction
variant_directions[variant["id"]] = variant["direction"]
# route_name should be the name of first and last stop of 1st variant
if not route_name:
route_name = " — ".join([
self.stop_names[stops[0]["id"][:4]],
self.stop_names[stops[-1]["id"][:4]]
])
# add on_demand_stops from this variant
on_demand_stops |= {i["id"] for i in stops if i["on_demand"]}
# add stopids to proper direction in direction_stops
direction_stops[variant["direction"]] |= {i["id"] for i in stops}
# now parse ODWG sections - for inaccesible trips (only tram)
if route_type == "0":
print("\033[1A\033[K" + f"Parsing routes & schedules (TD) - {route_id}")
for trip in self.parser.parse_wgod(route_type, route_id):
if not trip["accessible"]:
inaccesible_trips.add(trip["id"])
else:
self.parser.skip_to_section("RP", end=True)
# Schedules
print("\033[1A\033[K" + f"Parsing routes & schedules (WK) - {route_id}")
for trip in self.parser.parse_wk(route_id):
# Change stop_ids based on stops_map
for stopt in trip["stops"]:
stopt["orig_stop"] = stopt.pop("stop")
stopt["stop"] = self.stops_map.get(
stopt["orig_stop"], stopt["orig_stop"]
)
# Fliter "None" stops
trip["stops"] = [i for i in trip["stops"] if i["stop"]]
# Ignore trips with only 1 stopt
if len(trip["stops"]) < 2: continue
# Unpack info from trip_id
trip_id = trip["id"]
trip_id_split = trip_id.split("/")
variant_id = trip_id_split[1]
service_id = trip_id_split[2]
del trip_id_split
# "Exceptional" trip - a deutor/depot run
if variant_id.startswith("TP-") or variant_id.startswith("TO-"):
exceptional = "0"
else:
exceptional = "1"
# Shapes
if self.shapes:
shape_id, shape_distances = self.shapes.get(
route_type, trip_id, [i["stop"] for i in trip["stops"]])
else:
shape_id, shape_distances = "", {}
# Wheelchair Accessibility
if trip_id in inaccesible_trips:
wheelchair = "2"
else:
wheelchair = "1"
# Direction
if variant_id in variant_directions:
direction = variant_directions[variant_id]
else:
direction = trip_direction(
{i["orig_stop"] for i in trip["stops"]},
direction_stops)
variant_directions[variant_id] = direction
# Headsign
headsign = proper_headsign(
trip["stops"][-1]["stop"],
self.stop_names.get(trip["stops"][-1]["stop"][:4], ""))
if not headsign:
warn(f"No headsign for trip {trip_id}")
# Write to trips.txt
writer_trips.writerow([
route_id, service_id, trip_id, headsign, direction,
shape_id, exceptional, wheelchair, "1",
])
max_seq = len(trip["stops"]) - 1
# StopTimes
for seq, stopt in enumerate(trip["stops"]):
# Pickup Type
if seq == max_seq: pickup = "1"
elif "P" in stopt["flags"]: pickup = "1"
elif stopt["orig_stop"] in on_demand_stops: pickup = "3"
else: pickup = "0"
# Drop-Off Type
if seq == 0: dropoff = "1"
elif stopt["orig_stop"] in on_demand_stops: dropoff = "3"
else: dropoff = "0"
# Shape Distance
stop_dist = shape_distances.get(seq, "")
if stop_dist: stop_dist = round(stop_dist, 4)
# Output to stop_times.txt
writer_times.writerow([
trip_id, stopt["time"], stopt["time"], stopt["stop"],
seq, pickup, dropoff, stop_dist
])
# Output to routes.txt
writer_routes.writerow([
"0", route_id, route_id, route_name, route_type,
route_color, route_text_color, route_sort_order
])
file_routes.close()
file_trips.close()
file_times.close()
def parse(self):
self.calendar()
self.stops()
self.routes_schedules()
def dump_missing_stops(self):
with open("missing_stops.json", "w") as f:
json.dump(
{
"missing": [int(i) for i in self.incorrect_stops],
"unused": [int(i) for i in self.unused_stops],
},
f,
indent=0
)
@staticmethod
def static_files(shapes, version, download_time):
feed_version = "Version {}; downloaded at: {}".format(version, download_time)
"Create files that don't depend of ZTM file content"
file = open("gtfs/agency.txt", mode="w", encoding="utf8", newline="\r\n")
file.write('agency_id,agency_name,agency_url,agency_timezone,agency_lang,agency_phone,agency_fare_url\n')
file.write('0,"Warszawski Transport Publiczny","https://wtp.waw.pl",Europe/Warsaw,pl,19 115,"https://www.nowa.wtp.waw.pl/ceny-i-rodzaje-biletow/"\n')
file.close()
file = open("gtfs/feed_info.txt", mode="w", encoding="utf8", newline="\r\n")
file.write('feed_publisher_name,feed_publisher_url,feed_lang,feed_version\n')
if shapes: file.write('"GTFS Convert: MKuranowski; Data: ZTM Warszawa; Bus Shapes (under ODbL License): © OpenStreetMap contributors","https://github.com/MKuranowski/WarsawGTFS",pl,{}\n'.format(feed_version))
else: file.write('"GTFS Convert: MKuranowski; Data: ZTM Warszawa","https://github.com/MKuranowski/WarsawGTFS",pl,{}\n'.format(feed_version))
file.close()
@staticmethod
def compress(target="gtfs.zip"):
"Compress all created files to gtfs.zip"
with zipfile.ZipFile(target, mode="w", compression=zipfile.ZIP_DEFLATED) as archive:
for file in os.listdir("gtfs"):
if file.endswith(".txt"):
archive.write(os.path.join("gtfs", file), arcname=file)
@classmethod
def create(cls, version="", shapes=False, metro=False, prevver="", targetfile="gtfs.zip", clear_shape_errors=True):
print("\033[1A\033[K" + "Downloading file")
download_time = datetime.today().strftime("%Y-%m-%d %H:%M:%S")
self = cls(version, shapes)
if prevver == self.version:
self.reader.close()
print("\033[1A\033[K" + "File matches the 'prevver' argument, aborting!")
return
print("\033[1A\033[K" + "Starting parser...")
self.parse()
print("\033[1A\033[K" + "Parser finished working, closing TXT file")
self.reader.close()
self.dump_missing_stops()
print("\033[1A\033[K" + "Creating static files")
self.static_files(bool(self.shapes), self.version, download_time)
if metro:
print("\033[1A\033[K" + "Adding metro")
Metro.add()
print("\033[1A\033[K" + "Compressing")
self.compress(targetfile)
return self.version
```
#### File: WarsawGTFS/src/utils.py
```python
from contextlib import contextmanager
import signal
import math
import os
"""
Generic utility functions.
Either geographical ones, or
utilities that could be used by both static and realtime parsers
"""
@contextmanager
def time_limit(sec):
"Time limter based on https://gist.github.com/Rabbit52/7449101"
def handler(x, y): raise TimeoutError
signal.signal(signal.SIGALRM, handler)
signal.alarm(sec)
try: yield
finally: signal.alarm(0)
def clear_directory(directory):
if not os.path.exists(directory): os.mkdir(directory)
for file in [os.path.join(directory, x) for x in os.listdir(directory)]: os.remove(file)
def haversine(pt1, pt2):
"Calculate haversine distance (in km)"
lat1, lon1 = map(math.radians, pt1)
lat2, lon2 = map(math.radians, pt2)
lat = lat2 - lat1
lon = lon2 - lon1
d = math.sin(lat * 0.5) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(lon * 0.5) ** 2
return 2 * 6371 * math.asin(math.sqrt(d))
def iter_haversine(points):
"Calculate total route distance"
total = 0.0
for i in range(1, len(points)):
total += haversine(points[i-1], points[i])
return total
def avg_position(stops_in_group):
lats = list(map(float, [i[0] for i in stops_in_group.values()]))
lons = list(map(float, [i[1] for i in stops_in_group.values()]))
avg_lat = round(sum(lats)/len(lats), 8)
avg_lon = round(sum(lons)/len(lons), 8)
return str(avg_lat), str(avg_lon)
def initial_bearing(pos1, pos2):
"Calculate initial bearing of vehicle, only if the vehicle has moved more than 30m"
if haversine(pos1, pos2) < 0.003: return None
lat1, lat2, lon = map(math.radians, [pos1[0], pos2[0], pos2[1] - pos1[1]])
x = math.sin(lon) * math.cos(lat2)
y = math.cos(lat1) * math.sin(lat2) - (math.sin(lat1) * math.cos(lat2) * math.cos(lon))
return math.degrees(math.atan2(x, y))
``` |
{
"source": "111989/game_physics",
"score": 4
} |
#### File: game_physics/A-Star-Shortest Path/astar.py
```python
import pygame
import argparse
from heapq import heappush, heappop
class Cell:
def __init__(self, cell_row: int, cell_col: int,
display_width: int, display_height: int, total_cells: int, NONE: tuple):
self.cell_row = cell_row
self.cell_col = cell_col
self.total_cells = total_cells
self.cell_width = display_width // self.total_cells
self.cell_height = display_height // self.total_cells
self.mode = NONE
self.parent = None
self.successors = []
self.g_score = float('inf')
self.f_score = float('inf')
def get_cell_position(self):
return self.cell_row, self.cell_col
def __lt__(self, other):
return False
# fill display
# draw cells
# update display
def draw(display, display_width: int, display_height: int,
matrix, total_cells: int, NONE: tuple):
display.fill(NONE)
for row in matrix:
for cell in row:
pygame.draw.rect(display, cell.mode,
(cell.cell_row * cell.cell_width ,
cell.cell_col * cell.cell_height,
cell.cell_width, cell.cell_height))
pygame.display.update()
# Initializes a Priority Queue called 'open_cells' for the
# algorithm to track cells with low f-scores, and 'tie_breaker'
# to break ties between cells with the same f-score.
def algorithm(display, display_width: int, display_height: int, matrix,
total_cells: int, start_cell, end_cell, NONE: tuple, START: tuple,
END: tuple, OBSTACLE: tuple, OPEN: tuple, CLOSED: tuple, PATH: tuple):
# Returns Manhattan Distance between
# the input cell and end_cell
def heuristic(cell):
x1, y1 = cell.get_cell_position()
x2, y2 = end_cell.get_cell_position()
return abs(x2-x1) + abs(y2-y1)
tie_breaker = 0
start_cell.g_score = 0
start_cell.f_score = heuristic(start_cell)
open_cells = [(start_cell.f_score, tie_breaker, start_cell)]
open_cells_set = {start_cell}
# Fetch the cell with the least f_score, call it current_cell.
# If current_cell == end_cell, optimal path is found, draw it
# and terminate algorithm. Else, generate successors (neighbours)
# of the current_cell and update the attributes of a successor
# when a shorter path to it is found.
while open_cells:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
current_cell = heappop(open_cells)[2]
open_cells_set.remove(current_cell)
if current_cell == end_cell:
temp_cell = end_cell
while temp_cell.parent != start_cell:
temp_cell = temp_cell.parent
temp_cell.mode = PATH
draw(display, display_width, display_height, matrix, total_cells, NONE)
start_cell.mode, end_cell.mode = START, END
break
# TOP
if current_cell.cell_row > 0 and \
matrix[current_cell.cell_row-1][current_cell.cell_col].mode != OBSTACLE:
current_cell.successors.append(matrix[current_cell.cell_row-1][current_cell.cell_col])
# RIGHT
if current_cell.cell_col < current_cell.total_cells-1 and \
matrix[current_cell.cell_row][current_cell.cell_col+1].mode != OBSTACLE:
current_cell.successors.append(matrix[current_cell.cell_row][current_cell.cell_col+1])
# BOTTOM
if current_cell.cell_row < current_cell.total_cells-1 and \
matrix[current_cell.cell_row+1][current_cell.cell_col].mode != OBSTACLE:
current_cell.successors.append(matrix[current_cell.cell_row+1][current_cell.cell_col])
# LEFT
if current_cell.cell_col > 0 and \
matrix[current_cell.cell_row][current_cell.cell_col-1].mode != OBSTACLE:
current_cell.successors.append(matrix[current_cell.cell_row][current_cell.cell_col-1])
# # TOP-LEFT
# if current_cell.cell_row > 0 and current_cell.cell_col > 0 and \
# matrix[current_cell.cell_row-1][current_cell.cell_col-1] != OBSTACLE:
# current_cell.successors.append(matrix[current_cell.cell_row-1][current_cell.cell_col-1])
# # TOP-RIGHT
# if current_cell.cell_row > 0 and current_cell.cell_col < current_cell.total_cells-1 and \
# matrix[current_cell.cell_row-1][current_cell.cell_col+1] != OBSTACLE:
# current_cell.successors.append(matrix[current_cell.cell_row-1][current_cell.cell_col+1])
# # BOTTOM-RIGHT
# if current_cell.cell_row < current_cell.total_cells-1 and current_cell.cell_col < current_cell.total_cells-1 and \
# matrix[current_cell.cell_row+1][current_cell.cell_col+1] != OBSTACLE:
# current_cell.successors.append(matrix[current_cell.cell_row+1][current_cell.cell_col+1])
# # BOTTOM-LEFT
# if current_cell.cell_row < current_cell.total_cells-1 and current_cell.cell_col > 0 and \
# matrix[current_cell.cell_row+1][current_cell.cell_col-1] != OBSTACLE:
# current_cell.successors.append(matrix[current_cell.cell_row+1][current_cell.cell_col-1])
min_g_score = current_cell.g_score + 1
for successor in current_cell.successors:
if min_g_score < successor.g_score:
successor.parent = current_cell
successor.g_score = min_g_score
successor.f_score = successor.g_score + heuristic(successor)
if successor not in open_cells_set:
tie_breaker += 1
heappush(open_cells, (successor.f_score, tie_breaker, successor))
open_cells_set.add(successor)
successor.mode = OPEN
if current_cell != start_cell:
current_cell.mode = CLOSED
draw(display, display_width, display_height, matrix, total_cells, NONE)
def main():
# initialize cell modes
# initialize pygame display
# generate matrix
NONE = (50, 50, 50)
START = (255, 255, 0)
END = (0, 255, 255)
OBSTACLE = (175, 175, 225)
OPEN = (255, 200, 200)
CLOSED = (255, 128, 128)
PATH = (128, 225, 0)
display = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption("A* Shortest Path Finding Algorithm")
matrix = []
for cell_row in range(total_cells):
matrix.append([])
for cell_col in range(total_cells):
cell = Cell(cell_row, cell_col, display_width, display_height, total_cells, NONE)
matrix[cell_row].append(cell)
# run
start_cell, end_cell = None, None
run = True
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
# On left mouse click, get the position of
# the cell and initialize/ set cell modes.
if pygame.mouse.get_pressed()[0]:
cell_width = display_width // total_cells
cell_height = display_height // total_cells
cell_row = pygame.mouse.get_pos()[0] // cell_width
cell_col = pygame.mouse.get_pos()[1] // cell_height
cell = matrix[cell_row][cell_col]
if not start_cell and cell != end_cell:
start_cell = cell
start_cell.mode = START
elif not end_cell and cell != start_cell:
end_cell = cell
end_cell.mode = END
elif cell not in (start_cell, end_cell):
cell.mode = OBSTACLE
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE and start_cell and end_cell:
algorithm(display, display_width, display_height, matrix, total_cells,
start_cell, end_cell, NONE, START, END, OBSTACLE, OPEN, CLOSED, PATH)
#update display
draw(display, display_width, display_height, matrix, total_cells, NONE)
pygame.quit()
if __name__ == '__main__':
#parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument('--width', type = int, default = 1200, help = 'Display width, must be an integer')
parser.add_argument('--height', type = int, default = 600, help = 'Display height, must be an integer')
parser.add_argument('--n', type = int, default = 60, help = 'The display is treated as an (nxn) matrix. Must be an integer that completely divides both width and height')
args = vars(parser.parse_args())
display_width = int(args['width'])
display_height = int(args['height'])
total_cells = int(args['n'])
main()
```
#### File: game_physics/Projectile-Motion/projectile.py
```python
import math
import pygame
# Creates a ball object with location,
# size, appearance & speed attributes.
class Ball:
def __init__(self, display_width: int, display_height: int,
ball_radius: float, ball_colour: tuple, ball_speed: float):
self.ball_radius = ball_radius
self.ball_colour = ball_colour
self.ball_speed = ball_speed
self.x = display_width // 2
self.y = (display_height - 1) - self.ball_radius
self.center = (self.x, self.y)
def main():
# initialize pygame display
# initialize ball features
# initialize projectile inputs
# generate ball
DISPLAY_WIDTH = 1250
DISPLAY_HEIGHT = 500
display = pygame.display.set_mode((DISPLAY_WIDTH, DISPLAY_HEIGHT))
pygame.display.set_caption("Projectile Motion")
BALL_RADIUS = 7.5
BALL_COLOUR = (50, 50, 50)
BALL_SPEED = 0.0175
ground = (DISPLAY_HEIGHT - 1) - BALL_RADIUS
hyp_factor = 0.1
acceleration = 4.9
ball = Ball(DISPLAY_WIDTH, DISPLAY_HEIGHT, BALL_RADIUS, BALL_COLOUR, BALL_SPEED)
# run the projectile
time = 0
launch = False
run = True
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if launch and ground < ball.y:
ball.y = ground
time = 0
launch = False
# Set initial coordinates and
# compute projectile parameters.
if not launch and pygame.mouse.get_pressed()[0]:
(x0, y0) = ball.center
x1, y1 = pygame.mouse.get_pos()
x, y = x0-x1, y0-y1
hypotenuse = hyp_factor * math.sqrt((y)**2 + (x)**2)
try: angle = math.atan((y)/(x))
except: angle = math.pi / 2
if x < 0 and y < 0: angle = 2 * math.pi - angle
elif x < 0 and y > 0: angle = abs(angle)
elif x > 0 and y < 0: angle = math.pi + abs(angle)
elif x > 0 and y > 0: angle = math.pi - angle
launch = True
# Update ball coordinates to those
# dictated by projectile motion at time t.
if launch and ball.y <= ground:
time += ball.ball_speed
velocity_x = math.cos(angle) * hypotenuse
velocity_y = math.sin(angle) * hypotenuse
displacement_x = x0 + velocity_x * time
displacement_y = y0 - ((velocity_y * time) + ((-acceleration * (time**2))/2))
ball.x, ball.y = round(displacement_x), round(displacement_y)
# update coordinates of ball center
# fill display
# draw hypotenuse
# draw ball
# update display
ball.center = (ball.x, ball.y)
DISPLAY_COLOUR = (175, 175, 225)
display.fill(DISPLAY_COLOUR)
HYP_COLOUR = (200, 200, 200)
HYP_WIDTH = 3
pygame.draw.line(display, HYP_COLOUR, ball.center, pygame.mouse.get_pos(), HYP_WIDTH)
pygame.draw.circle(display, ball.ball_colour, ball.center, ball.ball_radius)
pygame.display.update()
pygame.quit()
if __name__ == '__main__':
main()
``` |
{
"source": "111989/Machine_Learning_Models",
"score": 3
} |
#### File: Machine_Learning_Models/models/gmm.py
```python
import numpy as np
import argparse
from csv import reader
eps = 1e-8
# Probability density function of multivariate_normal for d dimensions where d > 1
def pdf(x, mean, cov):
cl = (1 / ((np.linalg.det(cov) + eps) ** 0.5) * ((np.pi * 2) ** (cov.shape[0] / 2)))
x_mu = (x - mean)
e = np.sqrt(np.sum(np.square(np.matmul(np.matmul(x_mu, np.linalg.pinv(cov)), x_mu.T)), axis=-1))
ans = cl * np.exp((-1 / 2) * e)
return ans
class GMM:
def __init__(self, k=1):
self.k = k
# Initialise weight vector
self.w = np.ones(k) / k
self.means = None
self.cov = None
def fit(self, x):
x = np.array(x)
self.means = np.random.choice(x.flatten(), (self.k, x.shape[1]))
cov = []
for i in range(self.k):
cov.append(np.cov(x, rowvar=False))
cov = np.array(cov)
for step in range(55):
# Expectation step: estimating the values of latent variables
probabilities = []
for j in range(self.k):
probabilities.append(pdf(x=x, mean=self.means[j], cov=cov[j]) + eps)
probabilities = np.array(probabilities)
# Maximization step: update mean, covariance and weights
for j in range(self.k):
# Bayes' Theorem
b = ((probabilities[j] * self.w[j]) / (
np.sum([probabilities[i] * self.w[i] for i in range(self.k)], axis=0) + eps))
# update mean, covariance and weights to maximize b
self.means[j] = np.sum(b.reshape(len(x), 1) * x, axis=0) / (np.sum(b + eps))
cov[j] = np.dot((b.reshape(len(x), 1) * (x - self.means[j])).T, (x - self.means[j])) / (
np.sum(b) + eps)
self.w[j] = np.mean(b)
self.cov = cov
def prob(self, x):
x = np.array(x)
p = 0
for j in range(self.k):
# calculate probability of each component and add all of them
p += self.w[j] * pdf(x=x, mean=self.means[j], cov=self.cov[j])
return p
def load_csv(inp_file):
dataset = []
with open(inp_file, 'r') as file:
csv_reader = reader(file)
for data_row in csv_reader:
dataset.append(data_row)
return dataset
parser = argparse.ArgumentParser()
parser.add_argument('--components', help='components 1|3|4', required=True)
parser.add_argument('--train', help='path to training data file', required=True)
parser.add_argument('--test', help='path to test data file', required=True)
args = vars(parser.parse_args())
k = int(args['components'])
train_data = load_csv(args['train'])
train_datas = {
0: [],
1: [],
2: [],
3: [],
4: [],
5: [],
6: [],
7: [],
8: [],
9: [],
}
# Split data digit wise
for row in train_data:
y = int(row[64])
train_datas[y].append([float(x.strip()) for x in row[:-1]])
gmms = []
# Train GMM for each digit giving is 10 probability functions
for i in range(10):
gmm = GMM(k)
gmm.fit(train_datas[i])
gmms.append(gmm)
print('trained')
test_data = load_csv(args['test'])
preds = {
0: [],
1: [],
2: [],
3: [],
4: [],
5: [],
6: [],
7: [],
8: [],
9: [],
}
# Test
for row in test_data:
y_act = int(row[64])
max_p = float('-inf')
y_pred = -1
# Calculate the probability that the given x may correspond to a digit for all digits
for idx in range(len(gmms)):
p = gmms[idx].prob([float(x.strip()) for x in row[:-1]])
# select the digit with maximum probability
if np.sum(p) > max_p:
y_pred = idx
max_p = np.sum(p)
if y_pred == -1:
print('never')
accu = 0
if y_act == y_pred:
accu = 1
# Save prediction according to digit
preds[y_act].append(accu)
total = 0
for idx in range(len(preds)):
sum = np.sum(np.array(preds[idx]))
print(f'{idx}: {(sum * 100) / len(preds[idx])}')
total += sum
print(f'total: {(total / len(test_data)) * 100}')
```
#### File: Machine_Learning_Models/models/svm.py
```python
import numpy as np
from numpy import linalg
import argparse
from csv import reader
from random import seed
def linear_kernel(x1, x2):
return np.dot(x1, x2)
def radial_kernel(x, y, sigma=5.0):
return np.exp(-linalg.norm(x - y) ** 2 / (2 * (sigma ** 2)))
class SVM:
def __init__(self, kernel=linear_kernel, C=0.1):
self.kernel = kernel
self.C = C
self.learning_rate = 0.001
self.iterations = 1000
def fit(self, X, y):
n_samples, n_features = X.shape
y = np.where(y == 0, -1, 1)
K = np.zeros((n_samples, n_samples))
alpha = np.zeros(n_samples)
b = 0
for t in range(1, self.iterations + 1):
i = np.random.randint(0, n_samples)
for j in range(n_samples):
K[i, j] = self.kernel(X[i], X[j])
condition = y[i] * np.sum(alpha * K[i, :]) < 1
if condition:
alpha += self.learning_rate * alpha + y[i]
else:
b += self.learning_rate * y[i]
idx = np.logical_and(alpha > 0, alpha < self.C)
self.X = X[idx.reshape(1, -1)[0], :]
self.y = y[idx.reshape(1, -1)[0]]
self.b = b
self.alpha = alpha[idx.reshape(1, -1)[0]]
self.w = np.transpose(np.matmul(np.transpose(alpha * y), X))
def predict(self, X):
n_samples, n_features = X.shape
p = np.zeros(n_samples)
for i in range(n_samples):
prediction = 0
for j in range(self.X.shape[0]):
prediction = prediction + self.alpha[j] * self.y[j] * self.kernel(np.transpose(X[i, :]), np.transpose(self.X[j, :]))
p[i] = prediction + self.b
return np.where(p <= 0, 0, 1)
def erm(y_pred, y_true):
c = 0
for i in range(len(y_true)):
if y_pred[i] == y_true[i]:
c += 1
return 1 - c / len(y_true)
def load_csv(inp_file):
dataset = []
with open(inp_file, 'r') as file:
csv_reader = reader(file)
for data_row in csv_reader:
dataset.append(data_row)
return dataset
# Split in to X,Y
def split_X_Y(dataset):
x = []
y = []
for row in dataset:
x.append([p for p in row[:-1]])
y.append(int(row[-1]))
return np.array(x), np.array(y)
def split_x_y_mnist(d):
x = []
y = []
for r in d:
x.append([p / 255.0 for p in r[1:]])
y.append(int(r[0]))
return np.array(x), np.array(y)
binary = {
0: '0000',
1: '0001',
2: '0010',
3: '0011',
4: '0100',
5: '0101',
6: '0110',
7: '0111',
8: '1000',
9: '1001',
}
sb = {
'0000': 0,
'0001': 1,
'0010': 2,
'0011': 3,
'0100': 4,
'0101': 5,
'0110': 6,
'0111': 7,
'1000': 8,
'1001': 9,
}
def split_yy_mnist(mainy):
y0 = []
y1 = []
y2 = []
y3 = []
for idx in range(len(mainy)):
a = binary[mainy[idx]]
y0.append(int(a[0]))
y1.append(int(a[1]))
y2.append(int(a[2]))
y3.append(int(a[3]))
return np.array([np.array(y0), np.array(y1), np.array(y2), np.array(y3)])
def merge_Y_MNIST(yy):
res = np.where(yy[0] == 0, '0', '1')
for i in range(1, len(yy)):
res = np.char.add(res, np.where(yy[i] == 0, '0', '1'))
er = []
for i in range(len(res)):
er.append(sb[res[i]])
return np.array(er)
def train_test_split(X, y, test_size=0.5, shuffle=True, seed=None):
np.random.seed(seed)
idx_val = np.arange(X.shape[0])
np.random.shuffle(idx_val)
X = X[idx_val]
y = y[idx_val]
split_index = len(y) - int(len(y) // (1 / test_size))
X_train, X_test = X[:split_index], X[split_index:]
y_train, y_test = y[:split_index], y[split_index:]
return X_train, X_test, y_train, y_test
# Argument parser
parser = argparse.ArgumentParser()
parser.add_argument('--kernel', help='kernel function to use, linear|rbf', required=True)
parser.add_argument('--dataset', help='Dataset mnist|bcd', required=True)
parser.add_argument('--train', help='path to training data file', required=True)
parser.add_argument('--test', help='path to test data file', required=True)
parser.add_argument('--output', help='path to output weight vector', required=True)
args = vars(parser.parse_args())
seed(1)
output = open(args['output'], 'w')
# get the dataset file
data_file = args['train']
# Load the input CSV file
dataset = load_csv(data_file)
kf = linear_kernel
if args['kernel'] == 'linear':
kf = linear_kernel
elif args['kernel'] == 'rbf':
kf = radial_kernel
else:
print('invalid kernel value passed defaulting to linear')
dataset = dataset[1:]
i = 0
for row in dataset:
dataset[i] = [float(x.strip()) for x in row]
i += 1
ds = args['dataset']
if ds == 'mnist':
X, y = split_x_y_mnist(dataset)
yy = split_yy_mnist(y)
print('started training')
svms = []
for i in range(len(yy)):
svm = SVM(kernel=kf)
svm.fit(X, yy[i])
if svm.w is not None:
print(svm.w, file=output)
svms.append(svm)
test_data_set = load_csv(args['train'])
test_data_set = test_data_set[1:]
i = 0
for row in test_data_set:
test_data_set[i] = [float(x.strip()) for x in row]
i += 1
X_test, Y_test = split_x_y_mnist(test_data_set)
yy_pred = []
for i in range(len(svms)):
yy_pred.append(svms[i].predict(X_test))
y_pred = merge_Y_MNIST(yy_pred)
print('error')
print(erm(y_pred, Y_test))
elif ds == 'bcd':
svm = SVM(kernel=kf)
X, Y = split_X_Y(dataset)
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3)
svm.fit(X_train, y_train)
if svm.w is not None:
print(svm.w, file=output)
y_pred = svm.predict(X_test)
print('error')
print(erm(y_pred, y_test))
else:
print('unknown dataset passed possible values are mnist and bcd')
output.close()
``` |
{
"source": "111989/New-York_Thruway_Simulation",
"score": 3
} |
#### File: main/data_analysis/analyze_original_data.py
```python
import pandas as pd
from main.Graph.GraphCreator import CONFIGS, ACCESS_POINTS
from tqdm import tqdm
PATH_2019 = '/home/capstone/Documents/AMS_553_Final_Project/2019_final.csv'
PATH_2018 = '/home/capstone/Documents/AMS_553_Final_Project/2018_final.csv'
PATH_2017 = '/home/capstone/Documents/AMS_553_Final_Project/2017_final.csv'
DATA_PATH = '/home/capstone/Documents/AMS_553_Final_Project/original_results_1.csv'
SAVE_PATH_19 = '/home/capstone/Documents/AMS_553_Final_Project/2019_analysis_3.csv'
SAVE_PATH_18 = '/home/capstone/Documents/AMS_553_Final_Project/2018_analysis.csv'
SAVE_PATH_17 = '/home/capstone/Documents/AMS_553_Final_Project/2017_analysis.csv'
def translate_index(index):
return tuple([index[0][::-1]]) + index[1:]
def analyze(simulation, gathered, save_path):
print("Loading Gathered data...")
gathered_data = pd.read_csv(gathered, index_col=['vehicle_class', 'entrance_site', 'exit_site', 'week_of_year'], usecols=['vehicle_class', 'entrance_site', 'exit_site', 'week_of_year', 'payment_type', 'profit'])
gathered_data = gathered_data[gathered_data['payment_type'] == 'E-ZPass']
print("loading simulation data...")
simulation_data = pd.read_csv(simulation, index_col=['Vehicle Class', 'Entrance', 'Exit', 'Week']) if type(simulation) == str else simulation
print(simulation_data)
errors = {}
for index in tqdm(gathered_data.index):
if not (index[1] in ACCESS_POINTS and index[2] in ACCESS_POINTS):
continue
try:
actual_profit = gathered_data.loc[index]['profit']
simulated_profit = simulation_data.loc[translate_index(index)]
err = (simulated_profit - actual_profit)/actual_profit
err['Mean Perc. Err.'] = err.mean()
errors[index] = err
except:
print("ERROR:", index)
errors = pd.DataFrame(errors).T
errors.index.names = ['vehicle_type', 'entrance', 'exit', 'week']
if save_path is not None:
errors.to_csv(save_path)
return errors
if __name__ == '__main__':
print(analyze(DATA_PATH, PATH_2019, None))
``` |
{
"source": "111kenu/1337x",
"score": 3
} |
#### File: 1337x/py1337x/py1337x.py
```python
import requests
import requests_cache
from py1337x import parser
class py1337x():
def __init__(self, proxy=None, cookie=None, cache=None, cacheTime=86400, backend='sqlite'):
self.baseUrl = f'https://www.{proxy}' if proxy else 'https://www.1377x.to'
self.headers = {
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:88.0) Gecko/20100101 Firefox/88.0',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'accept-language': 'en-US,en;q=0.5',
'upgrade-insecure-requests': '1',
'te': 'trailers'
}
if cookie:
self.headers['cookie'] = f'cf_clearance={cookie}'
self.requests = requests_cache.CachedSession(cache, expire_after=cacheTime, backend=backend) if cache else requests
#: Searching torrents
def search(self, query, page=1, category=None, sortBy=None, order='desc'):
query = '+'.join(query.split())
category = category.upper() if category and category.lower() in ['xxx', 'tv'] else category.capitalize() if category else None
url = f"{self.baseUrl}/{'sort-' if sortBy else ''}{'category-' if category else ''}search/{query}/{category+'/' if category else ''}{sortBy.lower()+'/' if sortBy else ''}{order.lower()+'/' if sortBy else ''}{page}/"
response = self.requests.get(url, headers=self.headers)
return parser.torrentParser(response, baseUrl=self.baseUrl, page=page)
#: Trending torrents
def trending(self, category=None, week=False):
url = f"{self.baseUrl}/trending{'-week' if week and not category else ''}{'/w/'+category.lower()+'/' if week and category else '/d/'+category.lower()+'/' if not week and category else ''}"
response = self.requests.get(url, headers=self.headers)
return parser.torrentParser(response, baseUrl=self.baseUrl)
#: Top 100 torrents
def top(self, category=None):
category = 'applications' if category and category.lower() == 'apps' else 'television' if category and category.lower() == 'tv' else category.lower() if category else None
url = f"{self.baseUrl}/top-100{'-'+category if category else ''}"
response = self.requests.get(url, headers=self.headers)
return parser.torrentParser(response, baseUrl=self.baseUrl)
#: Popular torrents
def popular(self, category, week=False):
url = f"{self.baseUrl}/popular-{category.lower()}{'-week' if week else ''}"
response = self.requests.get(url, headers=self.headers)
return parser.torrentParser(response, baseUrl=self.baseUrl)
#: Browse torrents by category type
def browse(self, category, page=1):
category = category.upper() if category.lower() in ['xxx', 'tv'] else category.capitalize()
url = f'{self.baseUrl}/cat/{category}/{page}/'
response = self.requests.get(url, headers=self.headers)
return parser.torrentParser(response, baseUrl=self.baseUrl, page=page)
#: Info of torrent
def info(self, link=None, torrentId=None):
if not link and not torrentId:
raise TypeError('Missing 1 required positional argument: link or torrentId')
elif link and torrentId:
raise TypeError('Got an unexpected argument: Pass either link or torrentId')
link = f'{self.baseUrl}/torrent/{torrentId}/h9/' if torrentId else link
response = self.requests.get(link, headers=self.headers)
return parser.infoParser(response, baseUrl=self.baseUrl)
``` |
{
"source": "111pontes/napalm",
"score": 2
} |
#### File: test/junos/conftest.py
```python
import copy
import lxml
import yaml
import pytest
from napalm.base.test import conftest as parent_conftest
from napalm.base.test.double import BaseTestDouble
from napalm.junos import junos
@pytest.fixture(scope="class")
def set_device_parameters(request):
"""Set up the class."""
def fin():
request.cls.device.close()
request.addfinalizer(fin)
request.cls.driver = junos.JunOSDriver
request.cls.patched_driver = PatchedJunOSDriver
request.cls.vendor = "junos"
parent_conftest.set_device_parameters(request)
def pytest_generate_tests(metafunc):
"""Generate test cases dynamically."""
parent_conftest.pytest_generate_tests(metafunc, __file__)
class PatchedJunOSDriver(junos.JunOSDriver):
"""Patched JunOS Driver."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
optional_args["config_lock"] = False # to not try lock on open()
super(self.__class__, self).__init__(
hostname, username, password, timeout, optional_args
)
self.patched_attrs = ["device"]
self.device = FakeJunOSDevice()
def is_alive(self):
return {"is_alive": True} # always alive during the tests...
class FakeJunOSDevice(BaseTestDouble):
def __init__(self):
self.rpc = FakeRPCObject(self)
self._conn = FakeConnection(self.rpc)
self.alternative_facts_file = "facts.yml"
self.ON_JUNOS = True # necessary for fake devices
self.default_facts = {
"domain": None,
"hostname": "vsrx",
"ifd_style": "CLASSIC",
"2RE": False,
"serialnumber": "beb914a9cca3",
"fqdn": "vsrx",
"virtual": True,
"switch_style": "NONE",
"version": "12.1X47-D20.7",
"HOME": "/cf/var/home/vagrant",
"srx_cluster": False,
"model": "FIREFLY-PERIMETER",
"RE0": {
"status": "Testing",
"last_reboot_reason": "Router rebooted after a normal shutdown.",
"model": "FIREFLY-PERIMETER RE",
"up_time": "1 hour, 13 minutes, 37 seconds",
},
"vc_capable": False,
"personality": "SRX_BRANCH",
}
self._uptime = 4380
# Since junos-eznc 2.3.0 the new SAX parser is used as default. Thus
# disable it to use the DOM parser which was used prior.
self._use_filter = False
@property
def facts(self):
# we want to reinitialize it every time to avoid side effects
self._facts = copy.deepcopy(self.default_facts)
try:
alt_facts_filepath = self.find_file(self.alternative_facts_file)
except IOError:
self._facts = self.default_facts
return self._facts
with open(alt_facts_filepath, "r") as alt_facts:
self._facts.update(yaml.safe_load(alt_facts))
return self._facts
@property
def uptime(self):
return self._uptime
def open(self, auto_probe=0):
pass
def close(self):
pass
def bind(*args, **kvargs):
pass
def cli(self, command=""):
filename = "{safe_command}.txt".format(safe_command=self.sanitize_text(command))
fielpath = self.find_file(filename)
return self.read_txt_file(fielpath)
class FakeRPCObject:
"""
Fake RPC caller.
"""
def __init__(self, device):
self._device = device
def __getattr__(self, item):
self.item = item
return self
def response(self, **rpc_args):
instance = rpc_args.pop("instance", "")
filename = "{item}{instance}.xml".format(item=self.item, instance=instance)
filepathpath = self._device.find_file(filename)
xml_string = self._device.read_txt_file(filepathpath)
return lxml.etree.fromstring(xml_string)
def get_config(self, get_cmd=None, filter_xml=None, options={}):
# get_cmd is an XML tree that requests a specific part of the config
# E.g.: <configuration><protocols><bgp><group/></bgp></protocols></configuration>
if get_cmd is not None:
get_cmd_str = lxml.etree.tostring(get_cmd).decode("utf-8")
filename = self._device.sanitize_text(get_cmd_str)
# no get_cmd means it should mock the eznc get_config
else:
filename = "get_config__" + "__".join(
["{0}_{1}".format(k, v) for k, v in sorted(options.items())]
)
filename = "{filename}.xml".format(filename=filename[0:150])
filepathpath = self._device.find_file(filename)
xml_string = self._device.read_txt_file(filepathpath)
return lxml.etree.fromstring(xml_string)
__call__ = response
class FakeConnectionRPCObject:
"""
Will make fake RPC requests that usually are directly made via netconf.
"""
def __init__(self, rpc):
self._rpc = rpc
def response(self, non_std_command=None):
class RPCReply:
def __init__(self, reply):
self._NCElement__doc = reply
rpc_reply = RPCReply(self._rpc.get_config(get_cmd=non_std_command))
return rpc_reply
__call__ = response
class FakeConnection:
def __init__(self, rpc):
self.rpc = FakeConnectionRPCObject(rpc)
self._session = FakeSession()
class FakeSession:
def __init__(self):
self.transport = FakeTransport()
class FakeTransport:
def set_keepalive(self, keepalive):
self.keepalive = keepalive
``` |
{
"source": "111pontes/xr-pl2",
"score": 2
} |
#### File: xr-pl2/md-sdk+telemetry/configure_bgp_vrf.py
```python
import argparse
import urllib.parse
import sys
import logging
from ydk.services import CRUDService
from ydk.providers import NetconfServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_um_router_bgp_cfg \
as xr_um_router_bgp_cfg
def configure_bgp_vrf(router, local_as, vrf_name, route_distinguisher):
"""Add config data to bgp object."""
# local AS
as_ = router.bgp.As()
as_.as_number = local_as
# vrf configuration
vrf = as_.vrfs.Vrf()
vrf.vrf_name = vrf_name
vrf.rd = vrf.Rd()
vrf.rd.two_byte_as = vrf.rd.TwoByteAs()
as_number, index = route_distinguisher.split(':')
vrf.rd.two_byte_as.as_number = as_number
vrf.rd.two_byte_as.index = int(index)
address_family = vrf.address_families.AddressFamily()
address_family.af_name = xr_um_router_bgp_cfg.BgpAddressFamily.ipv4_unicast
address_family.redistribute.connected = address_family.redistribute.Connected()
address_family.redistribute.connected.metric = 10
# append configuration objects
vrf.address_families.address_family.append(address_family)
as_.vrfs.vrf.append(vrf)
router.bgp.as_.append(as_)
if __name__ == "__main__":
"""Execute main program."""
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose",
help="print debugging messages",
action="store_true")
parser.add_argument("local_as",
help="local autonomous system")
parser.add_argument("vrf_name",
help="VRF name")
parser.add_argument("route_distinguisher",
help="route distinguisher (as:index)")
parser.add_argument("device",
help="NETCONF device (ssh://user:password@host:port)")
args = parser.parse_args()
device = urllib.parse.urlparse(args.device)
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create NETCONF provider
provider = NetconfServiceProvider(address=device.hostname,
port=device.port,
username=device.username,
password=<PASSWORD>,
protocol=device.scheme)
# create CRUD service
crud = CRUDService()
# BGP configuration
router = xr_um_router_bgp_cfg.Router()
configure_bgp_vrf(router, int(args.local_as), args.vrf_name,
args.route_distinguisher)
# create configuration on NETCONF device
crud.create(provider, router)
sys.exit()
# End of script
``` |
{
"source": "111pontes/xr-um",
"score": 2
} |
#### File: xr-um/demo/validate_peer_interface.py
```python
import kafka
import json
import time
import argparse
import logging
KAFKA_TOPIC = 'pipeline'
KAFKA_BOOTSTRAP_SERVER = 'localhost:9092'
STATE_UP = "im-state-up"
TIMEOUT = 30
def validate_peer_interface(kafka_consumer, node, name,
state=STATE_UP,
timeout=TIMEOUT):
"""Validate interface state."""
telemetry_encoding_path = "Cisco-IOS-XR-pfi-im-cmd-oper:interfaces/interface-briefs/interface-brief"
start_time = time.time()
for kafka_msg in kafka_consumer:
msg = json.loads(kafka_msg.value.decode('utf-8'))
if (msg["Telemetry"]["node_id_str"] == node and
msg["Telemetry"]["encoding_path"] == telemetry_encoding_path
and "Rows" in msg):
for row in msg["Rows"]:
# return true if intf in expected oper/admin state
if ("interface-name" in row["Keys"] and
row["Keys"]["interface-name"] == name and
"state" in row["Content"] and
"line-state" in row["Content"] and
row["Content"]["state"] == state and
row["Content"]["line-state"] == state
):
return True
if time.time() - start_time > timeout:
break
return False
if __name__ == "__main__":
"""Execute main program."""
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("node",
help="node router streaming interface status")
parser.add_argument("name",
help="interface name")
args = parser.parse_args()
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("kafka")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create kafka consumer to pipeline topic
kafka_consumer = kafka.KafkaConsumer(KAFKA_TOPIC,
bootstrap_servers=KAFKA_BOOTSTRAP_SERVER,
consumer_timeout_ms=TIMEOUT*1000)
print(validate_peer_interface(kafka_consumer, args.node, args.name))
exit()
# End of script
``` |
{
"source": "112224/study_manage",
"score": 3
} |
#### File: 112224/study_manage/parsing.py
```python
import bs4
import requests
def reader():
names = []
with open('participants', 'r') as f:
names.extend(f.readline().split())
# 해당 부분 수정 예정 ex) key 라는 파일에서 현재 주차에 해당하는 숫자를 가져옴
# 아니면 문제에 해당하는 파일이 commit 된 경우 바뀌게?
problems = []
with open('1week', 'r') as f:
problems.extend(f.readline().split())
return names , problems
def writer(api, parser_data):
print('api : ', api)
print('parser_data : ', parser_data)
with open('state.md','w') as f:
content = '| | '
for _id, title, level in api:
content += f'<img height="25px" width="25px" src="https://static.solved.ac/tier_small/{level}.svg"/>'
content += f'[{title}](https://www.acmicpc.net/problem/{_id}) | '
print(content)
f.write(content + ' \n')
for_table = '|:----:|' + ':----:|' * len(api) + ' \n'
f.write(for_table)
for ele in parser_data:
content = f'| {ele[0]} |'
for flag in ele[1:]:
if flag == 0:
content += ' WA |'
elif flag == -1:
content += ' not yet |'
else:
content += ' AC |'
f.write(content + ' \n')
def solved_api(problems):
base_url = "https://solved.ac/api/v3/problem/show?problemId="
header = {'Content-type': 'application/json'}
# <img height="25px" width="25px" src="https://static.solved.ac/tier_small/6.svg"/>
# [요세푸스 문제](https://www.acmicpc.net/problem/1158)
data = []
for problem in problems:
url = base_url + str(problem)
res = requests.get(url, header)
if res.status_code == 200:
json = res.json()
data.append((problem, json['titleKo'], json['level']))
return data
def baekjoon_parser(names, problems):
base_url = "https://www.acmicpc.net/status?"
data = []
for name in names:
row = [name]
for problem in problems:
problem_id = 'problem_id=' + str(problem)
user_id = 'user_id='+str(name)
url = base_url + problem_id + '&' + user_id
res = requests.get(url)
soup = bs4.BeautifulSoup(res.content, "html.parser")
results = soup.find('table', {'class': "table table-striped table-bordered"}).tbody
results = results.find_all('td', {'class': 'result'})
if not results:
row.append(-1)
continue
solve_flag = 0
for link in results:
answer = link.find('span', {'class' : 'result-text'}).text
if '맞았습니다!!' == answer:
solve_flag = 1
break
row.append(solve_flag)
data.append(row)
return data
if __name__ == '__main__':
names, problems = reader()
print(baekjoon_parser(names, problems))
solved_api(problems)
writer(solved_api(problems), baekjoon_parser(names, problems))
``` |
{
"source": "11241069/LaureateChannelProject",
"score": 3
} |
#### File: 11241069/LaureateChannelProject/main.py
```python
import jinja2
import json
import os
import webapp2
from apiclient.discovery import build
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'])
REGISTRATION_INSTRUCTIONS = """
You must set up a project and get an API key to run this code. <br>
Steps: <br>
1. Visit <a href="https://developers.google.com/youtube/v3/code_samples/python_appengine#create-api-key"
target='_top'>https://developers.google.com/youtube/v3/code_samples/python_appengine#create-api-key</a>
for instructions on setting up a project and key. Make sure that you have
enabled the YouTube Data API (v3) and the Freebase API for your project.
You do not need to set up OAuth credentials for this project. <br>
2. Once you have obtained a key, search for the text 'REPLACE_ME' in the
code and replace that string with your key. <br>
3. Click the reload button above the output container to view the new output. """
# Set API_KEY to the "API key" value from the "Access" tab of the
# Google APIs Console http://code.google.com/apis/console#access
# Please ensure that you have enabled the YouTube Data API and Freebase API
# for your project.
API_KEY = "<KEY>"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
class MainHandler(webapp2.RequestHandler):
def get(self):
if API_KEY == '<KEY>':
self.response.write(REGISTRATION_INSTRUCTIONS)
else:
# Present a list of videos associated with a given channel
self.request_channel()
def request_channel(self):
# Display a text box where the user can enter a channel name or
# channel ID.
select_channel_page = '''
<html>
<body>
<p>Which channel's videos do you want to see?</p>
<form method="post">
<p>
<select name="channel_type">
<option value="id">Channel ID</option>
<option value="name">Channel name</option>
</select>
<input name="channel" size="30">
</p>
<p><input type="submit" /></p>
</form>
</body>
</html>
'''
# Display the HTML page that shows the form.
self.response.out.write(select_channel_page)
def post(self):
# Service for calling the YouTube API
youtube = build(YOUTUBE_API_SERVICE_NAME,
YOUTUBE_API_VERSION,
developerKey=API_KEY)
# Use form inputs to create request params for channel details
channel_type = self.request.get('channel_type')
channels_response = None
if channel_type == 'id':
channels_response = youtube.channels().list(
id=self.request.get('channel'),
part='snippet,contentDetails'
).execute()
else:
channels_response = youtube.channels().list(
forUsername=self.request.get('channel'),
part='snippet,contentDetails'
).execute()
channel_name = ''
videos = []
for channel in channels_response['items']:
uploads_list_id = channel['contentDetails']['relatedPlaylists']['uploads']
channel_name = channel['snippet']['title']
next_page_token = ''
while next_page_token is not None:
playlistitems_response = youtube.playlistItems().list(
playlistId=uploads_list_id,
part='snippet',
maxResults=50,
pageToken=next_page_token
).execute()
for playlist_item in playlistitems_response['items']:
videos.append(playlist_item)
next_page_token = playlistitems_response.get('tokenPagination', {}).get(
'nextPageToken')
if len(videos) > 100:
break
template_values = {
'channel_name': channel_name,
'videos': videos
}
self.response.headers['Content-type'] = 'text/html'
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render(template_values))
app = webapp2.WSGIApplication([
('/.*', MainHandler),
], debug=True)
``` |
{
"source": "1129ljc/video-inpainting-detection",
"score": 3
} |
#### File: video-inpainting-detection/HPFCN/resnet_v2_test.py
```python
import numpy as np
import torch
import resnet_utils
def create_test_input(batch_size, channels, height, width):
"""Create test input tensor.
Args:
batch_size: The number of images per batch.
channels: The number of channels per image.
height: The height of each image.
width: The width of each image.
Returns:
A constant `Tensor` with the mesh grid values along the spatial
dimensions.
"""
return torch.FloatTensor(
np.tile(
np.reshape(
np.reshape(np.arange(height), [height, 1]) +
np.reshape(np.arange(width), [1, width]),
[1, 1, height, width]), [batch_size, channels, 1, 1])
)
def testSubsampleThreeByThree():
x = torch.arange(9, dtype=torch.float32).reshape((1, 1, 3, 3))
x = resnet_utils.subsample(x, 2)
expected = torch.FloatTensor([0, 2, 6, 8]).reshape((1, 1, 2, 2))
assert torch.allclose(x, expected)
def testSubsampleFourByFour():
x = torch.arange(16, dtype=torch.float32).reshape((1, 1, 4, 4))
x = resnet_utils.subsample(x, 2)
expected = torch.FloatTensor([0, 2, 8, 10]).reshape((1, 1, 2, 2))
assert torch.allclose(x, expected)
def _testConv2DSame(n, n2, expected_output):
# Input image.
x = create_test_input(1, 1, n, n)
# Convolution kernel.
w = create_test_input(1, 1, 3, 3)
# Note that y1, y2, and y4 of the original test code are not tested as
# PyTorch does not support VALID padding for nn.Conv2D
y3_conv = resnet_utils.conv2d_same(1, 1, 3, stride=2)
y3_conv[1].weight.data = w
y3_conv[1].bias.data = torch.zeros_like(y3_conv[1].bias.data)
y3 = y3_conv(x)
y3_expected = torch.FloatTensor(expected_output)
y3_expected = y3_expected.reshape((1, 1, n2, n2))
assert torch.allclose(y3, y3_expected)
def testConv2DSameEven():
n, n2 = 4, 2
_testConv2DSame(n, n2, [[14, 43], [43, 84]])
def testConv2DSameOdd():
n, n2 = 5, 3
_testConv2DSame(n, n2, [[14, 43, 34], [43, 84, 55], [34, 55, 30]])
if __name__ == '__main__':
create_test_input()
```
#### File: video-inpainting-detection/Unet/loss.py
```python
import torch
import torch.nn as nn
class FocalLoss(nn.Module):
def __init__(self, alpha=0.25, gamma=2, reduction='mean'):
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
self.crit = nn.BCELoss(reduction='none')
def forward(self, logits, label):
logits = logits.float()
with torch.no_grad():
alpha = torch.empty_like(logits).fill_(1 - self.alpha)
alpha[label == 1] = self.alpha
pt = torch.where(label == 1, logits, 1 - logits)
ce_loss = self.crit(logits, label)
loss = (alpha * torch.pow(1 - pt, self.gamma) * ce_loss)
if self.reduction == 'mean':
loss = loss.mean()
if self.reduction == 'sum':
loss = loss.sum()
return loss
if __name__ == '__main__':
model = FocalLoss()
input = torch.rand(size=[2, 1, 2, 2])
label = torch.randint(low=0, high=2, size=[2, 1, 2, 2])
label = label.float()
print(input)
print(label)
loss = model(input, label)
print(loss)
```
#### File: video-inpainting-detection/Unet/model.py
```python
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import utils
class Filter(nn.Module):
def __init__(self, size, band_start, band_end, use_learnable=True, norm=False):
super(Filter, self).__init__()
self.use_learnable = use_learnable
x = np.array(utils.generate_filter(band_start, band_end, size))
self.base = nn.Parameter(torch.tensor(x).float(), requires_grad=False)
if self.use_learnable:
self.learnable = nn.Parameter(torch.randn(size, size), requires_grad=True)
self.learnable.data.normal_(0., 0.1)
self.norm = norm
if norm:
self.ft_num = nn.Parameter(torch.sum(torch.tensor(x)), requires_grad=False)
def forward(self, x):
if self.use_learnable:
filt = self.base + utils.norm_sigma(self.learnable)
else:
filt = self.base
if self.norm:
y = x * filt / self.ft_num
else:
y = x * filt
return y
class DctFilter(nn.Module):
def __init__(self, size):
super(DctFilter, self).__init__()
dct_mat = utils.DCT_mat(size)
self._DCT_all = nn.Parameter(torch.tensor(dct_mat).float(), requires_grad=False)
self._DCT_all_T = nn.Parameter(torch.transpose(torch.tensor(dct_mat).float(), 0, 1), requires_grad=False)
low_filter = Filter(size, 0, size // 16)
middle_filter = Filter(size, size // 16, size // 8)
high_filter = Filter(size, size // 8, size)
all_filter = Filter(size, 0, size * 2)
# self.filters = nn.ModuleList([low_filter, middle_filter, high_filter, all_filter])
self.filters = nn.ModuleList([low_filter, middle_filter, high_filter])
def forward(self, x):
x_freq = self._DCT_all @ x @ self._DCT_all_T
y_list = []
for i in range(3):
x_pass = self.filters[i](x_freq)
y = self._DCT_all_T @ x_pass @ self._DCT_all
y_list.append(y)
out = torch.cat(y_list, dim=1)
return out
class SobelConv(nn.Module):
def __init__(self, in_chan, out_chan):
super(SobelConv, self).__init__()
filter_x, filter_y = utils.get_sobel(in_chan=in_chan, out_chan=1)
filter_x = torch.from_numpy(filter_x)
filter_y = torch.from_numpy(filter_y)
filter_x = nn.Parameter(filter_x, requires_grad=False)
filter_y = nn.Parameter(filter_y, requires_grad=False)
conv_x = nn.Conv2d(in_channels=in_chan, out_channels=1, kernel_size=3, stride=1, padding=1, bias=False)
conv_y = nn.Conv2d(in_channels=in_chan, out_channels=1, kernel_size=3, stride=1, padding=1, bias=False)
conv_x.weight = filter_x
conv_y.weight = filter_y
self.sobel_x = nn.Sequential(conv_x, nn.BatchNorm2d(1))
self.sobel_y = nn.Sequential(conv_y, nn.BatchNorm2d(1))
self.conv = nn.Conv2d(in_channels=3, out_channels=out_chan, kernel_size=3, stride=1, padding=1, bias=False)
def forward(self, input):
g_x = self.sobel_x(input)
g_y = self.sobel_y(input)
g = torch.sqrt(torch.pow(g_x, 2) + torch.pow(g_y, 2))
x = torch.sigmoid(g) * input
x = self.conv(x)
return x
class BayarConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1):
super(BayarConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.minus1 = (torch.ones(self.in_channels, self.out_channels, 1) * -1.000)
self.kernel = nn.Parameter(torch.rand(self.in_channels, self.out_channels, kernel_size ** 2 - 1), requires_grad=True)
def bayarConstraint(self):
self.kernel.data = self.kernel.permute(2, 0, 1)
self.kernel.data = torch.div(self.kernel.data, self.kernel.data.sum(0))
self.kernel.data = self.kernel.permute(1, 2, 0)
ctr = self.kernel_size ** 2 // 2
real_kernel = torch.cat((self.kernel[:, :, :ctr], self.minus1.to(self.kernel.device), self.kernel[:, :, ctr:]), dim=2)
real_kernel = real_kernel.reshape((self.out_channels, self.in_channels, self.kernel_size, self.kernel_size))
return real_kernel
def forward(self, x):
x = F.conv2d(x, self.bayarConstraint(), stride=self.stride, padding=self.padding)
return x
class DoubleConv(nn.Module):
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels, mid_channels=None)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
else:
self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2])
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
class VideoInpaintingDetectionNet(nn.Module):
def __init__(self, bilinear=True, sobel=True, dct=True, bayarconv=True):
super(VideoInpaintingDetectionNet, self).__init__()
self.sobel = sobel
self.dct = dct
self.bayarconv = bayarconv
self.bayar_conv = BayarConv(3, 9)
self.sobel_conv = SobelConv(3, 9)
self.dct_conv = DctFilter(512)
self.inc = DoubleConv(27, 64)
self.down1 = Down(64, 128)
self.down2 = Down(128, 256)
self.down3 = Down(256, 512)
factor = 2 if bilinear else 1
self.down4 = Down(512, 1024 // factor)
self.up1 = Up(1024, 512 // factor, bilinear)
self.up2 = Up(512, 256 // factor, bilinear)
self.up3 = Up(256, 128 // factor, bilinear)
self.up4 = Up(128, 64, bilinear)
self.outc = OutConv(64, 1)
def forward(self, x):
f1 = self.sobel_conv(x)
f2 = self.dct_conv(x)
f3 = self.bayar_conv(x)
f = torch.cat([f1, f2, f3], dim=1)
x1 = self.inc(f)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.outc(x)
out = torch.sigmoid(x)
return out
if __name__ == '__main__':
device = torch.device('cuda:0')
model = VideoInpaintingDetectionNet(bilinear=True, sobel=True, dct=True, bayarconv=False)
model = model.to(device)
input = torch.rand(size=[1, 3, 512, 512]).to(device)
print(input.size())
output = model(input)
print(output.size())
```
#### File: video-inpainting-detection/Unet/train_m.py
```python
import os
import cv2
import torch
import random
import argparse
import datetime
import numpy as np
from torch.utils.data import DataLoader
from torch.optim import Adam
from torch.optim.lr_scheduler import StepLR
from model import VideoInpaintingDetectionNet
from loss import FocalLoss
from utils import F1, IOU
from torch.utils.data import Dataset
from torchvision import transforms
class IID_Dataset(Dataset):
def __init__(self, choice='train'):
self.input_size = (512, 512)
if choice == 'train':
self.image_path = '../dataset/DSTT-quality/1/train'
self.mask_path = '../dataset/mask_white/'
if choice == 'test':
self.image_path = '../dataset/DSTT-quality/1/val'
self.mask_path = '../dataset/mask_white/'
self.train_files = []
self.choice = choice
names = os.listdir(self.image_path)
for i in range(len(names)):
name = names[i]
image_name_dir = os.path.join(self.image_path, name)
mask_name_dir = os.path.join(self.mask_path, name)
mask_files = sorted(os.listdir(mask_name_dir))
mask_num = len(mask_files)
image_files = sorted(os.listdir(image_name_dir))
image_num = len(image_files)
frame_num = min(mask_num, image_num)
for j in range(frame_num):
mask_file = os.path.join(mask_name_dir, mask_files[j])
image_file = os.path.join(image_name_dir, image_files[j])
self.train_files.append([image_file, mask_file])
self.transform = transforms.Compose([
transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
def tensor(self, img):
img = img / 255.
return torch.from_numpy(img).float().permute(2, 0, 1)
def __len__(self):
return len(self.train_files)
def __getitem__(self, item):
fname1, fname2 = self.train_files[item]
img, mask = cv2.imread(fname1), cv2.imread(fname2)
img, mask = cv2.resize(img, self.input_size), cv2.resize(mask, self.input_size)
if self.choice == 'train':
if random.random() < 0.5:
img = cv2.flip(img, 0)
mask = cv2.flip(mask, 0)
if random.random() < 0.5:
img = cv2.flip(img, 1)
mask = cv2.flip(mask, 1)
img = self.transform(img)
mask = self.tensor(mask[:, :, :1])
return img, mask, fname1, fname2
def train(args):
gpu_id = args.gpu_id
epochs = 60
batch_size = 4
lr = 1e-4
print('epochs:', epochs)
print('batch_size:', batch_size)
print('lr:', lr)
print('gpu_id:', gpu_id)
# init dataset
train_dataset = IID_Dataset(choice='train')
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
test_dataset = IID_Dataset(choice='test')
test_loader = DataLoader(dataset=test_dataset, batch_size=1, shuffle=False, num_workers=4)
print('train_dataset_len:', len(train_dataset))
print('train_loader_len:', len(train_loader))
print('test_dataset_len:', len(test_dataset))
print('test_loader_len:', len(test_loader))
# init model
device = torch.device('cuda:' + str(gpu_id))
model = VideoInpaintingDetectionNet(bilinear=True, sobel=True, dct=True, bayarconv=True)
model = model.to(device=device)
# init loss function
loss_function = FocalLoss()
# init optim
optimizer = Adam(model.parameters(), lr=lr, betas=(0.9, 0.999))
lr_sche = StepLR(optimizer, step_size=10, gamma=0.1)
# init val_info
val_info = []
max_iou = 0.0
max_iou_index = 0
max_f1 = 0.0
max_f1_index = 0
# training
for epoch in range(epochs):
# train
model.train()
epoch_train_loss = 0.0
epoch_train_loss1 = 0.0
for idx, (image, label, image_path, label_path) in enumerate(train_loader):
image_torch, label_torch = image.float().to(device), label.float().to(device)
predict = model(image_torch)
optimizer.zero_grad()
loss = loss_function(predict, label_torch)
loss.backward()
optimizer.step()
iter_loss = loss.cpu().float()
epoch_train_loss = epoch_train_loss + iter_loss
epoch_train_loss1 = epoch_train_loss + iter_loss
time = datetime.datetime.now().strftime('%H:%M:%S')
if (idx + 1) % 50 == 0 and idx != 0:
epoch_train_loss = epoch_train_loss / 50
log_string = '[%s] epoch:[%d] iter:[%d]/[%d] loss:[%.5f]' % (time, epoch + 1, idx + 1, len(train_loader), epoch_train_loss)
print(log_string)
epoch_train_loss = 0.0
lr_sche.step()
epoch_train_loss1 = epoch_train_loss1 / len(train_loader)
log_string = 'epoch:[%d] train_loss:[%.5f]' % (epoch + 1, epoch_train_loss1)
print(log_string)
# test
model.eval()
val_loss, val_iou, val_f1 = 0.0, 0.0, 0.0
with torch.no_grad():
for idx, (image, label, image_path, label_path) in enumerate(test_loader):
image_torch, label_torch = image.float().to(device), label.float().to(device)
predict = model(image_torch)
loss = loss_function(predict, label_torch)
iter_loss = loss.cpu().float()
val_loss = val_loss + iter_loss
predict_mask = predict[0, 0, ...].cpu().detach().numpy()
predict_mask = np.where(predict_mask <= 0.5, 0, 1).flatten()
label = label.cpu().detach().numpy().astype('int').flatten()
iou, f1 = IOU(predict_mask, label), F1(predict_mask, label)
val_iou = val_iou + iou
val_f1 = val_f1 + f1
val_loss = val_loss / len(test_loader)
val_iou = val_iou / len(test_loader)
val_f1 = val_f1 / len(test_loader)
torch.save(model, os.path.join('./ckpt/1/', str(epoch + 1) + '.pth'))
log_string = 'epoch:[%d] val_loss:[%.5f] val_iou:[%.5f] val_f1:[%.5f]' % (epoch + 1, val_loss, val_iou, val_f1)
print(log_string)
val_info.append(log_string)
if val_iou > max_iou:
max_iou = val_iou
max_iou_index = epoch + 1
if val_f1 > max_f1:
max_f1 = val_f1
max_f1_index = epoch + 1
for i in val_info:
print(i)
print('max_iou:[%.5f] max_iou_index:[%d]' % (max_iou, max_iou_index))
print('max_f1:[%.5f] max_f1_index:[%d]' % (max_f1, max_f1_index))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='VideoInpaintingDetection')
parser.add_argument('--gpu_id', type=int, required=True, help='gpu id')
args = parser.parse_args()
train(args)
``` |
{
"source": "1129ljc/video-interpolation-detection",
"score": 3
} |
#### File: video-interpolation-detection/Steganalysis-CNN/dataload.py
```python
import os
import json
import cv2
import numpy as np
import torch
from PIL import Image
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
transform = transforms.Compose(
[
# transforms.Resize(size=(224, 224)),
transforms.ToTensor(),
# transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
]
)
class DataSet(Dataset):
def __init__(self, data_file_path):
super(Dataset, self).__init__()
self.json_file_path = data_file_path
assert os.path.isfile(data_file_path), print('The dataset json file cannot be read')
with open(data_file_path, 'r', encoding='utf8')as fp:
data = fp.readlines()
self.image_path_list = []
self.image_label_list = []
for i in range(len(data)):
line = data[i].split(' ')
self.image_path_list.append(line[0])
self.image_label_list.append(int(line[1][0:-1]))
self.image_num = len(self.image_path_list)
def __len__(self):
return self.image_num
def __getitem__(self, item):
image_file = self.image_path_list[item]
label = self.image_label_list[item]
label_torch = torch.tensor(label)
# image_torch = transform(Image.open(image_file).convert('RGB'))
image_torch = torch.from_numpy(np.array(Image.open(image_file).convert('RGB')))
image_torch = image_torch.permute(2, 0, 1).float()
image_torch = torch.unsqueeze(image_torch[0, :, :], dim=0)
return image_file, image_torch, label_torch
```
#### File: video-interpolation-detection/Steganalysis-CNN/train.py
```python
import os
import cv2
import tqdm
import torch
import datetime
import torchvision
import torch.nn as nn
from resnet import _ResNet18
from dataload import DataSet
import torch.optim as optim
from torch.utils.data import DataLoader
def train():
# 各种参数
lr_init = 1e-4
epochs = 40
batch_size = 4
train_file_txt = '/sdb1/ljc/interpolation_detection/dataset/AVI_FPS30/AOBMC/FPS_30_AOBMC_CIF_train.txt'
train_log_file_dir = '/sdb1/ljc/interpolation_detection/dataset/AVI_FPS30/AOBMC/train_log3/'
# 加载网络
print('Load the network')
print('torch.cuda.is_available()', torch.cuda.is_available())
print('torch.cuda.device_count()', torch.cuda.device_count())
print('torch.cuda.get_device_name()', torch.cuda.get_device_name())
# resnet18 = torchvision.models.resnet18(pretrained=True)
resnet18 = _ResNet18()
device = torch.device('cuda:0')
resnet18.to(device)
print('resnet18', resnet18)
# 加载数据
print('Loading data training')
train_dataset = DataSet(train_file_txt)
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, drop_last=True, num_workers=4)
train_dataset_size = len(train_dataset)
train_dataset_batch_size = len(train_loader)
print('train_dataset_size: ', train_dataset_size)
print('train_dataset_batch_size: ', train_dataset_batch_size)
# 设置损失函数优化器等
print('Set up the loss function optimizer, etc')
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(params=resnet18.parameters(), lr=lr_init, betas=(0.9, 0.999), eps=1e-08)
scheduler = optim.lr_scheduler.StepLR(optimizer=optimizer, step_size=10, gamma=0.1, last_epoch=-1)
# 训练
for epoch_index in range(epochs):
resnet18.train()
epoch_train_loss = 0.0
epoch_train_loss1 = 0.0
for idx, (image_torch, label_torch) in enumerate(train_loader):
image_torch, label_torch = image_torch.to(device), label_torch.to(device)
optimizer.zero_grad()
predict = resnet18(x=image_torch)
loss = loss_function(input=predict, target=label_torch.long())
loss.backward()
optimizer.step()
iter_loss = loss.cpu().float()
epoch_train_loss = epoch_train_loss + iter_loss
epoch_train_loss1 = epoch_train_loss1 + iter_loss
time = datetime.datetime.now().strftime('%H:%M:%S')
if idx % 100 == 0:
epoch_train_loss /= 100
log_string = '[%s] epoch:[%d] iter:[%d]/[%d] loss:[%.5f]' % (
time, epoch_index + 1, idx + 1, train_dataset_batch_size, iter_loss)
print(log_string)
epoch_train_loss = 0.0
scheduler.step()
log_string = 'epoch:[%d] train_loss:[%.5f]' % (epoch_index + 1, epoch_train_loss1)
print(log_string)
torch.save(resnet18, train_log_file_dir + str(epoch_index + 1) + '.pth')
if __name__ == '__main__':
train()
``` |
{
"source": "112batman/ha-myenergi",
"score": 2
} |
#### File: custom_components/myenergi/number.py
```python
from homeassistant.components.number import NumberEntity
from .const import DOMAIN
from .entity import MyenergiEntity
async def async_setup_entry(hass, entry, async_add_devices):
"""Setup number platform."""
coordinator = hass.data[DOMAIN][entry.entry_id]
devices = []
# Don't cause a refresh when fetching devices
all_devices = await coordinator.client.get_devices("all", False)
for device in all_devices:
# Zappi only numbers
if device.kind == "zappi":
devices.append(MinimumGreenLevelNumber(coordinator, device, entry))
elif device.kind == "eddi":
devices.append(HeaterPriorityNumber(coordinator, device, entry))
async_add_devices(devices)
class HeaterPriorityNumber(MyenergiEntity, NumberEntity):
"""myenergi Sensor class."""
def __init__(self, coordinator, device, config_entry):
super().__init__(coordinator, device, config_entry)
@property
def unique_id(self):
"""Return a unique ID to use for this entity."""
return (
f"{self.config_entry.entry_id}-{self.device.serial_number}-heater_priority"
)
@property
def name(self):
"""Return the name of the sensor."""
return f"myenergi {self.device.name} Heater Priority"
@property
def value(self):
"""Return the state of the sensor."""
return self.device.heater_priority
async def async_set_value(self, value: float) -> None:
"""Change the selected option."""
await self.device.set_heater_priority(f"heater{int(value)}")
self.async_schedule_update_ha_state()
@property
def min_value(self):
return 1
@property
def max_value(self):
return 2
@property
def step(self):
return 1
class MinimumGreenLevelNumber(MyenergiEntity, NumberEntity):
"""myenergi Sensor class."""
def __init__(self, coordinator, device, config_entry):
super().__init__(coordinator, device, config_entry)
@property
def unique_id(self):
"""Return a unique ID to use for this entity."""
return f"{self.config_entry.entry_id}-{self.device.serial_number}-minimum_green_level"
@property
def name(self):
"""Return the name of the sensor."""
return f"myenergi {self.device.name} Minimum Green Level"
@property
def value(self):
"""Return the state of the sensor."""
return self.device.minimum_green_level
async def async_set_value(self, value: float) -> None:
"""Change the selected option."""
await self.device.set_minimum_green_level(int(value))
self.async_schedule_update_ha_state()
@property
def min_value(self):
return 0
@property
def max_value(self):
return 100
@property
def step(self):
return 1
```
#### File: ha-myenergi/tests/__init__.py
```python
from __future__ import annotations
import json
from typing import Any
from unittest.mock import Mock
from unittest.mock import patch
from custom_components.myenergi import DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from pytest_homeassistant_custom_component.common import MockConfigEntry
from .const import MOCK_CONFIG
TEST_CONFIG_ENTRY_ID = "77889900ab"
def load_fixture_json(name):
with open(f"tests/fixtures/{name}.json") as json_file:
data = json.load(json_file)
return data
def create_mock_myenergi_config_entry(
hass: HomeAssistant,
data: dict[str, Any] | None = None,
options: dict[str, Any] | None = None,
) -> ConfigEntry:
"""Add a test config entry."""
config_entry: MockConfigEntry = MockConfigEntry(
entry_id=TEST_CONFIG_ENTRY_ID,
domain=DOMAIN,
data=data or MOCK_CONFIG,
title="",
options=options or {},
)
config_entry.add_to_hass(hass)
return config_entry
async def setup_mock_myenergi_config_entry(
hass: HomeAssistant,
data: dict[str, Any] | None = None,
config_entry: ConfigEntry | None = None,
client: Mock | None = None,
) -> ConfigEntry:
client_data = "client"
if data is not None:
client_data = data.get("client_data", "client")
"""Add a mock sunspec config entry to hass."""
config_entry = config_entry or create_mock_myenergi_config_entry(hass, data)
"""Mock data from client.fetch_data()"""
with patch(
"pymyenergi.client.MyenergiClient.fetch_data",
return_value=load_fixture_json(client_data),
), patch(
"pymyenergi.zappi.Zappi.fetch_history_data",
return_value=load_fixture_json("history_zappi"),
), patch(
"pymyenergi.eddi.Eddi.fetch_history_data",
return_value=load_fixture_json("history_eddi"),
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
return config_entry
```
#### File: ha-myenergi/tests/test_number.py
```python
from unittest.mock import MagicMock
from homeassistant.components.number import DOMAIN as NUMBER_DOMAIN
from homeassistant.components.number import SERVICE_SET_VALUE
from homeassistant.const import (
ATTR_ENTITY_ID,
)
from homeassistant.core import HomeAssistant
from . import setup_mock_myenergi_config_entry
TEST_ZAPPI_NUMBER_GREEN_LEVEL = "number.myenergi_test_zappi_1_minimum_green_level"
TEST_EDDI_NUMBER_HEATER_PRIORITY = "number.myenergi_test_eddi_1_heater_priority"
async def test_number(hass: HomeAssistant, mock_zappi_set_green: MagicMock) -> None:
"""Verify device information includes expected details."""
await setup_mock_myenergi_config_entry(hass)
entity_state = hass.states.get(TEST_ZAPPI_NUMBER_GREEN_LEVEL)
assert entity_state
assert entity_state.state == "50"
await hass.services.async_call(
NUMBER_DOMAIN,
SERVICE_SET_VALUE,
{
ATTR_ENTITY_ID: TEST_ZAPPI_NUMBER_GREEN_LEVEL,
"value": "58",
},
blocking=False,
)
assert mock_zappi_set_green.call_count == 0
await hass.async_block_till_done()
assert mock_zappi_set_green.call_count == 1
mock_zappi_set_green.assert_called_with(58)
async def test_heater_priority(
hass: HomeAssistant, mock_eddi_heater: MagicMock
) -> None:
"""Verify device information includes expected details."""
await setup_mock_myenergi_config_entry(hass)
entity_state = hass.states.get(TEST_EDDI_NUMBER_HEATER_PRIORITY)
assert entity_state
assert entity_state.state == "1"
await hass.services.async_call(
NUMBER_DOMAIN,
SERVICE_SET_VALUE,
{
ATTR_ENTITY_ID: TEST_EDDI_NUMBER_HEATER_PRIORITY,
"value": "2",
},
blocking=False,
)
assert mock_eddi_heater.call_count == 0
await hass.async_block_till_done()
assert mock_eddi_heater.call_count == 1
mock_eddi_heater.assert_called_with("heater2")
``` |
{
"source": "112buddyd/mtgcollector",
"score": 3
} |
#### File: app/scripts/utilities.py
```python
import re
from app.models import Deck, Card, Collection
def parse_paste(txt, col_type=Deck):
# Parse text chunk for card names and card quantities
# Returns dict of name : {qty: int}
# Standard formatting is qty card_name
# blank line
# sideboard
if col_type != Deck:
board = None
else:
board = 'main'
r_card = re.compile(r'(?P<qty>\d{1,2})\s+(?P<name>(\w|\s+)+)(\n|$)')
results = {}
for line in txt:
# if blank line is detected, change board to side, only works if col_type is Deck
if line == '' and board == 'main':
board = 'side'
match = r_card.match(line)
if match:
results[match.group('name')] = {
'qty': int(match.group('qty')),
'board':board
}
return results
```
#### File: migrations/versions/a89df01c20eb_.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a89df01c20eb'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('db_info',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('md5', sa.String(length=128), nullable=True),
sa.Column('completed', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('cardstocollections',
sa.Column('card_id', sa.Integer(), nullable=True),
sa.Column('collection_id', sa.Integer(), nullable=True),
sa.Column('quantity', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['card_id'], ['card.id'], ),
sa.ForeignKeyConstraint(['collection_id'], ['collection.id'], )
)
op.add_column('cardstomainboard', sa.Column('quantity', sa.Integer(), nullable=True))
op.add_column('cardstosideboard', sa.Column('quantity', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('cardstosideboard', 'quantity')
op.drop_column('cardstomainboard', 'quantity')
op.drop_table('cardstocollections')
op.drop_table('db_info')
# ### end Alembic commands ###
``` |
{
"source": "112kutiko/wotb_has_my_money",
"score": 3
} |
#### File: 112kutiko/wotb_has_my_money/main_wotb.py
```python
import pandas as pd
import tkinter as tk
import sys
import win32ctypes.core
import os
from tkinter import filedialog
import plyer
from tkinter import messagebox
def fileo():
current_directory = plyer.filechooser.open_file()
return current_directory;
s=fileo()
print("start")
e=str(s[0]).replace(chr(92), "/")
df = pd.read_csv (e, sep=';')
pd.set_option("max_rows", None)
rez=pd.DataFrame(df)
df['PAYED_IN_CURR_AMT'].astype('float')
print("")
filtra=rez[['ACTION_DT','PAYMENT_CURRENCY','PAYED_IN_CURR_AMT','PAY_ORDER_STATUS_CODE','PAY_AGGREGATOR_DESC','PAY_ITEM_TYPE_NAME','AA_PACKAGE_LIST']]
all_cost=filtra[['PAY_ORDER_STATUS_CODE','PAYED_IN_CURR_AMT','PAY_AGGREGATOR_DESC']]
cop=3;
a=all_cost.groupby('PAY_ORDER_STATUS_CODE')['PAY_AGGREGATOR_DESC'].value_counts()
suma= all_cost.groupby('PAY_ORDER_STATUS_CODE')['PAYED_IN_CURR_AMT'].sum()
na=list(a['PAID'].index)
naa=list(a['PAID'].values)
list_of=[];
for i in range(len(naa)):
list_of.append(na[i] + " " +str(naa[i]) );
root = tk.Tk()
root.geometry("390x150")
root.title("how much money did i spend on the tank")
canvas = tk.Canvas(root,bg='grey')
ints0 = tk.Label(root,text="how much money did i spend on the tank")
ints0.grid(row=1, column=0,columnspan=2)
ints1 = tk.Label(root,text="all the purchases you made: ",anchor='w',justify="left",padx=0)
ints1.grid(row=2, column=0,sticky = 'w')
ints = tk.Label(root,text="money: ",anchor='w',padx=0)
ints2 = tk.Label(root,text=filtra['PAY_ORDER_STATUS_CODE'].value_counts()['PAID'])
ints2.grid(row=2, column=1,sticky = 'w')
ints3 = tk.Label(root,text=round(suma['PAID'],2))
ints3.grid(row=3, column=1,sticky = 'w')
ints.grid(row=3, column=0,sticky = 'w')
my = tk.Label(root,text="mainly used for purchasing:",anchor='w',justify="left",padx=0)
my.grid(row=4, column=0,sticky = 'w')
Lb1 = tk.Listbox(root,selectmode="BROWSE", height=4)
for i in range(len(naa)):
Lb1.insert(i,list_of[i])
Lb1.grid(row=4, column=1)
root.mainloop()
``` |
{
"source": "1130310223/Static-Dynamic-Attention",
"score": 2
} |
#### File: type1/len8/hybrid_len8_t1_train.py
```python
import sys
import os
import random
import re
import time
import torch
from torch.autograd import Variable
from torch import optim
import torch.nn as nn
#sys.path.append('../')
from hybrid_bid_t1_model import Seq2Seq
from hybrid_data_utils import *
import psutil
proc = psutil.Process(os.getpid())
def init_command_line(argv):
from argparse import ArgumentParser
usage = "seq2seq"
description = ArgumentParser(usage)
description.add_argument("--w2v_path", type=str, default="/users3/yfwang/data/w2v/opensubtitle/")
description.add_argument("--corpus_path", type=str, default="/users3/yfwang/data/corpus/opensubtitle/")
description.add_argument("--w2v", type=str, default="train_all_200e.w2v")
description.add_argument("--train_file", type=str, default="train_pairs.txt")
description.add_argument("--max_context_size", type=int, default=8)
description.add_argument("--batch_size", type=int, default=64)
description.add_argument("--enc_hidden_size", type=int, default=512)
description.add_argument("--max_senten_len", type=int, default=15)
description.add_argument("--lr", type=float, default=0.001)
description.add_argument("--weight_decay", type=float, default=1e-5)
description.add_argument("--dropout", type=float, default=0.5)
description.add_argument("--epochs", type=int, default=10)
description.add_argument("--teach_forcing", type=int, default=1)
description.add_argument("--shuffle", type=int, default=1)
description.add_argument("--print_every", type=int, default=200, help="print every batches when training")
description.add_argument("--save_model", type=int, default=1)
description.add_argument("--weights", type=str, default=None)
return description.parse_args(argv)
opts = init_command_line(sys.argv[1:])
print ("Configure:")
print (" w2v:",os.path.join(opts.w2v_path,opts.w2v))
print (" train_file:",os.path.join(opts.corpus_path,opts.train_file))
print (" max_context_size:",opts.max_context_size)
print (" batch_size:",opts.batch_size)
print (" enc_hidden_size:",opts.enc_hidden_size)
print (" max_senten_len:",opts.max_senten_len)
print (" learning rate:",opts.lr)
print (" weight_decay:",opts.weight_decay)
print (" dropout:",opts.dropout)
print (" epochs:",opts.epochs)
print (" teach_forcing:",opts.teach_forcing)
print (" shuffle:",opts.shuffle)
print (" print_every:",opts.print_every)
print (" save_model:",opts.save_model)
print (" weights:",opts.weights)
print ("")
'''单个batch的训练函数'''
def train_batch(reply_tensor_batch,contexts_tensor_batch,pad_matrix_batch,model,model_optimizer,criterion,ini_idx):
loss = 0
model_optimizer.zero_grad()
list_pred = model(reply_tensor_batch,contexts_tensor_batch,pad_matrix_batch,ini_idx)
# 预测的每个字的loss相加,构成整句的loss
for idx,reply_tensor in enumerate(reply_tensor_batch):
loss_s = criterion(list_pred[idx],Variable(reply_tensor).cuda())
loss += loss_s
loss.backward()
model_optimizer.step()
return loss.data[0]
# 多轮训练函数
def train_model(word2index,ini_idx,corpus_pairs,model,model_optimizer,criterion,epochs,
batch_size,max_senten_len,max_context_size,print_every,save_model,shuffle):
print ("start training...")
model.train()
state_loss = 10000.0
for ei in range(epochs):
print ("Iteration {}: ".format(ei+1))
epoch_loss = 0
every_loss = 0
t0 = time.time()
pairs_batches,num_batches = buildingPairsBatch(corpus_pairs,batch_size,shuffle=shuffle)
print ("num_batches:",num_batches)
idx_batch = 0
for reply_tensor_batch, contexts_tensor_batch, pad_matrix_batch in getTensorsPairsBatch(word2index,pairs_batches,max_context_size):
loss = train_batch(reply_tensor_batch,contexts_tensor_batch,pad_matrix_batch,model,model_optimizer,criterion,ini_idx)
epoch_loss += loss
every_loss += loss
if (idx_batch+1)%print_every == 0:
every_avg_loss = every_loss/(max_senten_len*(idx_batch+1))
#every_loss = 0
t = round((time.time()-t0),2)
print ("{} batches finished, avg_loss:{},{}".format(idx_batch+1, every_avg_loss,str(t)))
idx_batch += 1
print ("memory percent: %.2f%%" % (proc.memory_percent()))
mem_info = proc.memory_info()
res_mem_use = mem_info[0]
print ("res_mem_use: {:.2f}MB".format(float(res_mem_use)/1024/1024))
epoch_avg_loss = epoch_loss/(max_senten_len*num_batches)
print ("epoch_avg_loss:",epoch_avg_loss)
if save_model and epoch_avg_loss < state_loss:
print ("save model...")
torch.save(model.state_dict(), "./seq2seq_parameters_IterEnd")
state_loss = epoch_avg_loss
print ("Iteration time:",time.time()-t0)
print ("=============================================" )
print ("")
if __name__ == '__main__':
ini_char = '</i>'
unk_char = '<unk>'
t0 = time.time()
print ("loading word2vec...")
ctable = W2vCharacterTable(os.path.join(opts.w2v_path,opts.w2v),ini_char,unk_char)
print(" dict size:",ctable.getDictSize())
print (" emb size:",ctable.getEmbSize())
print ("")
train_file_name = os.path.join(opts.corpus_path,opts.train_file)
ctable,corpus_pairs = readingData(ctable,train_file_name,opts.max_senten_len,opts.max_context_size)
print (time.time()-t0)
print ("")
seq2seq = Seq2Seq(ctable.getDictSize(),ctable.getEmbSize(),opts.enc_hidden_size,opts.batch_size,opts.dropout,
opts.max_senten_len,opts.teach_forcing).cuda()
# 加载保存好的模型继续训练
if opts.weights != None:
print ("load weights...")
seq2seq.load_state_dict(torch.load(opts.weights))
else:
seq2seq.init_parameters(ctable.getEmbMatrix())
model_optimizer = optim.Adam(seq2seq.parameters(), lr=opts.lr, weight_decay=opts.weight_decay)
criterion = nn.NLLLoss()
print ("memory percent: %.2f%%" % (proc.memory_percent()))
mem_info = proc.memory_info()
res_mem_use = mem_info[0]
print ("res_mem_use: {:.2f}MB".format(float(res_mem_use)/1024/1024))
print ("")
word2index = ctable.getWord2Index()
ini_idx = word2index[ini_char]
train_model(word2index,ini_idx,corpus_pairs,seq2seq,model_optimizer,criterion,opts.epochs,opts.batch_size,
opts.max_senten_len,opts.max_context_size,opts.print_every,opts.save_model,opts.shuffle)
print ("")
``` |
{
"source": "1132719438/pynvx",
"score": 2
} |
#### File: pynvx/tests/test.py
```python
from __future__ import print_function, absolute_import, division
import sys
import pynvx as m
if sys.version_info[0] == 2:
string_types = basestring
integer_types = [int, long]
else:
string_types = str
integer_types = [int]
# test pynvx api
def test_init():
v = m.cudaInit(ignore=True)
assert isinstance(v, int)
def test_driver_version():
v = m.cudaSystemGetDriverVersion(ignore=True)
assert isinstance(v, int)
def test_runtime_version():
v = m.cudaSystemGetRuntimeVersion(ignore=True)
assert isinstance(v, int)
def test_device_count():
v = m.cudaDeviceGetCount(ignore=True)
assert isinstance(v, int)
def test_device_handles():
v = m.cudaDeviceGetHandles(ignore=True)
assert isinstance(v, list)
def test_name():
v = m.cudaGetName(0, ignore=True)
assert isinstance(v, string_types)
def test_clock_rate():
v = m.cudaGetClockRate(0, ignore=True)
assert isinstance(v, int)
def test_compute_capability():
v = m.cudaGetComputeCapability(0, ignore=True)
assert isinstance(v, list)
def test_major():
v = m.cudaGetMajor(0, ignore=True)
assert isinstance(v, int)
def test_minor():
v = m.cudaGetMinor(0, ignore=True)
assert isinstance(v, int)
def test_process_count():
v = m.cudaGetMultiProcessorCount(0, ignore=True)
assert isinstance(v, int)
def test_pci_bus_id():
v = m.cudaGetPciBusID(0, ignore=True)
assert isinstance(v, int)
def test_pci_device_id():
v = m.cudaGetPciDeviceID(0, ignore=True)
assert isinstance(v, int)
def test_pci_domain_id():
v = m.cudaGetPciDomainID(0, ignore=True)
assert isinstance(v, int)
def test_memory_info():
v = m.cudaGetMemInfo(0, ignore=True)
assert isinstance(v, list)
def test_memory_total():
v = m.cudaGetMemTotal(0, ignore=True)
assert type(v) in integer_types
def test_memory_free():
v = m.cudaGetMemFree(0, ignore=True)
assert type(v) in integer_types
def test_memory_used():
v = m.cudaGetMemUsed(0, ignore=True)
assert type(v) in integer_types
# test pynvml wrapper
def test_pynvml_wrapper():
from pynvx import pynvml
from pynvx.pynvml import NVMLError
try:
pynvml.nvmlInit()
except NVMLError:
pass
``` |
{
"source": "1134642046/BookCrawler",
"score": 3
} |
#### File: 1134642046/BookCrawler/BookTop250WordCloud.py
```python
import requests
from bs4 import BeautifulSoup as bs
import re
import tools.Tools as tools
def get_top250_book(html_data, topchart_list):
# 转化为BeautifulSoup对象
soup = bs(html_data, 'html.parser')
# 搜索Top250图书div
topchart_book = soup.find('div', 'indent')
# 搜索列表中所有图书
topchart_book_list = topchart_book.find_all('tr', 'item')
# print(topchart_book_list[0])
# 遍历图书馆列表,从中过滤出我们所需的信息
for item in topchart_book_list:
# 新建字典用于存放我们的图书信息,之后可用class来存储
topchart_dict = {}
# print(item)
# 搜索到具体信息的位置
commentsNumber = item.find('span','pl')
# print(commentsNumber)
# 得到图书评价人数
topchart_dict['value'] = re.search(r'[0-9]+',commentsNumber.string.strip()).group()
# 得到图书名称
topchart_dict['name'] = ''
for string in item.find('div', 'pl2').a.stripped_strings:
if string is not None:
topchart_dict['name'] = topchart_dict['name'] + string
# print(topchart_dict)
# 将图书信息加入到数组中
topchart_list.append(topchart_dict)
# print(topchart_list)
def run():
topchart_list = []
count = 0
url = 'https://book.douban.com/top250?start='
url2 = url
while count <= 250:
# print(url2)
html_data = tools.get_html(url2)
# print(url2)
get_top250_book(html_data, topchart_list)
count += 25
url2 = url+str(count)
return topchart_list
```
#### File: 1134642046/BookCrawler/manmanmai.py
```python
import requests
import json
import datetime
import re
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import time
requests.packages.urllib3.disable_warnings()
# 慢慢买历史价格爬取
class ManManMai:
def __init__(self):
# 无界面运行
chrome_opt = Options() # 创建参数设置对象.
chrome_opt.add_argument('--headless') # 无界面化.
# chrome_opt.add_argument('--disable-gpu') # 配合上面的无界面化.
chrome_opt.add_argument('no-sandbox')
chrome_opt.add_argument('disable-dev-shm-usage')
# chrome_opt.add_argument('--window-size=1366,768') # 设置窗口大小, 窗口大小会有影响.
# # 创建Chrome对象并传入设置信息.
# '/usr/bin/chromedriver',
self.driver = webdriver.Chrome( options=chrome_opt)
# 获取京东链接
def get_jd_url(self, full_url):
# 模拟用户点击
self.driver.get(full_url)
full_url = self.driver.current_url
match_obj = re.match(r'[a-zA-z]+://[^\s]*[?]', full_url)
jd_url = match_obj.group(0)[:-1]
# self.driver.quit()
return jd_url
def raw(self, text): # 转化URL字符串
escape_dict = {
'/': '%252F',
'?': '%253F',
'=': '%253D',
':': '%253A',
'&': '%26',
}
new_string = ''
for char in text:
try:
new_string += escape_dict[char]
except KeyError:
new_string += char
return new_string
# 分析与数据清洗
def mmm(self, item):
item = self.raw(item)
url = 'https://apapia.manmanbuy.com/ChromeWidgetServices/WidgetServices.ashx'
s = requests.session()
headers = {
'Host': 'apapia.manmanbuy.com',
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8',
'Proxy-Connection': 'close',
'Cookie': 'ASP.NET_SessionId=uwhkmhd023ce0yx22jag2e0o; jjkcpnew111=cp46144734_1171363291_2017/11/25',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_3 like Mac OS X) AppleWebKit/603.3.8 (KHTML, like Gecko) Mobile/14G60 mmbWebBrowse',
'Content-Length': '457',
'Accept-Encoding': 'gzip',
'Connection': 'close',
}
postdata = 'c_devid=2C5039AF-99D0-4800-BC36-DEB3654D202C&username=&qs=true&c_engver=1.2.35&c_devtoken=&c_devmodel=<PASSWORD>&c_contype=wifi&' \
't=1537348981671&c_win=w_320_h_568&p_url={}&' \
'c_ostype=ios&jsoncallback=%3F&c_ctrl=w_search_trend0_f_content&methodName=getBiJiaInfo_wxsmall&c_devtype=phone&' \
'jgzspic=no&c_operator=%E4%B8%AD%E5%9B%BD%E7%A7%BB%E5%8A%A8&c_appver=2.9.0&bj=false&c_dp=2&c_osver=10.3.3'.format(item)
s.headers.update(headers)
req = s.get(url=url, data=postdata, verify=False).text
# print(req)
try:
js = json.loads(req)
title = js['single']['title'] ##名称
except Exception as e:
print(e)
# exit(mmm(item))
###数据清洗
pic = js['single']['smallpic'] ##图片
jiagequshi = js['single']['jiagequshi'] ##价格趋势
lowerPrice = js['single']['lowerPrice'] ##最低价格
lowerDate = js['single']['lowerDate'] ##最低价格日期
lowerDate = re.search('[1-9]\d{0,9}', lowerDate).group(0)
# print(lowerDate)
lowerDate = time.strftime("%Y-%m-%d", time.localtime(int(lowerDate)))
itemurl = js['single']['url'] ##商品链接
qushi = js['single']['qushi'] ##趋势
changPriceRemark = js['single']['changPriceRemark'] ##趋势变动
date_list = [] ##日期
price_list = [] ##价格
##日期转换
datalist = jiagequshi.replace('[Date.UTC(', '').replace(')', '').replace(']', '').split(',')
for i in range(0, len(datalist), 5):
day = int(datalist[i + 2])
if int(datalist[i + 1]) == 12:
mon = 1
year = int(datalist[i]) + 1
else:
mon = int(datalist[i + 1]) + 1
year = int(datalist[i])
date = datetime.date(year=year, month=mon, day=day)
price = float(datalist[i + 3])
date_list.append(date.strftime('%Y-%m-%d'))
price_list.append(price)
data = {'date': date_list, 'price': price_list}
data['title'] = title
data['pic'] = pic
data['lowerPrice'] = lowerPrice
data['lowerDate'] = lowerDate
data['itemurl'] = itemurl
data['qushi'] = qushi
data['changPriceRemark'] = changPriceRemark
print(data)
return data
```
#### File: 1134642046/BookCrawler/MostWatchedBooks.py
```python
from bs4 import BeautifulSoup as bs
import re
import tools.Tools as tools
# 新建数组用于存放后续的数据
most_watched_book_list = []
def get_book_list(html_data):
# 转化为BeautifulSoup对象
soup = bs(html_data, 'html.parser')
# 搜索最受关注的图书列表
book_ul = soup.find_all('ul', 'chart-dashed-list')
# 搜索列表中所有图书
book_list = book_ul[0].find_all('div', 'media__body')
# 遍历图书馆列表,从中过滤出我们所需的信息
for item in book_list:
# 新建字典用于存放我们的图书信息,之后可用class来存储
most_watched_book_dict = {}
# 得到图书名称
most_watched_book_dict['name'] = item.h2.a.string.strip()
# 得到图书评价人数(根据评价人数作为词云判断依据)
evaluation = item.find('span', 'fleft ml8 color-gray').string.strip()
most_watched_book_dict['value'] = re.search(r'[0-9]+', evaluation).group()
# 将图书信息加入到字典中
most_watched_book_list.append(most_watched_book_dict)
def run():
url1 = 'https://book.douban.com/chart?subcat=I'
url2 = 'https://book.douban.com/chart?subcat=F'
r1 = tools.get_html(url1)
r2 = tools.get_html(url2)
get_book_list(r1)
get_book_list(r2)
return most_watched_book_list
# run()
# print(most_watched_book_list)
``` |
{
"source": "1135/python_demos",
"score": 3
} |
#### File: 1135/python_demos/web_filter_whiteList.py
```python
import re
#白名单法【安全】
# 安全测试函数
def isWhitelist(input_from_user):
regex_ = '[^0-9a-zA-Z._]' # 如果存在 白名单字符 0-9|a-z|A-Z|._ 以外的字符则return 0
result_list2 = re.findall(regex_,long_str_from_user,re.MULTILINE)# re.MULTILINE多行模式
if len(result_list2):
print '发现特殊字符'+str(result_list2)
return 0# 不符合白名单规则
else:
return 1# 符合
# 生产使用的过滤函数
def filter_whitelist(input_from_user):
return re.sub("[^0-9A-Za-z]", "", long_str_from_user)
if __name__ == '__main__':
# 如 含有换行符号 不符合白名单规则
long_str_from_user = """
test+_{|}<>?汉
123!@#¥%……*^&*(4)5,6
"""
if isWhitelist(long_str_from_user) == 0:
print "不符合白名单规则"
#将非白名单字符过滤掉(继续执行)
print filter_whitelist(long_str_from_user)
``` |
{
"source": "1136863240/data_synchronization",
"score": 3
} |
#### File: data_synchronization/data-client-test/main.py
```python
import os
import socket
def main():
# android_system = android.Android()
print('客户端启动')
# android_system.makeToast('客户端启动')
# server_ip = android_system.dialogGetInput('连接服务器', '服务器IP地址').result
server_ip = '127.0.0.1'
print('正在连接服务器:%s' % server_ip)
# android_system.makeToast('正在连接服务器')
socket_server = socket.socket()
socket_server.connect((server_ip, 20183))
print('开始同步数据')
# android_system.makeToast('开始同步数据')
# target_dir = '/storage/9017-100F/Pictures/pic'
# target_dir = '/storage/emulated/0/test'
# target_dir = 'd:/project/test1'
target_dir = 'C:\\Users\\Administrator\\Desktop\\test1'
current_file = None
file_name = ''
file_index = 1
receive_list = []
while True:
data = socket_server.recv(300).decode('utf-8')
if data == 'exit':
socket_server.sendall(bytes('success', 'utf-8'))
print('退出')
break
elif data[:7] == 'current':
file_name = data[9:]
receive_list.append(file_name)
target_file = '%s/%s' % (target_dir, file_name)
if os.path.isfile(target_file):
print('跳过文件:%s' % file_name)
socket_server.sendall(bytes('skip', 'utf-8'))
else:
print('选中文件:%s' % file_name)
current_file = open(target_file, 'wb')
socket_server.sendall(bytes('success', 'utf-8'))
elif data == 'file-data':
socket_server.sendall(bytes('start', 'utf-8'))
print('正在接收数据:%s' % file_name)
file_size = int(socket_server.recv(30).decode('utf-8'))
socket_server.sendall(bytes('got', 'utf-8'))
recv_size = 0
while recv_size < file_size:
file_data = socket_server.recv(20480000)
current_file.write(file_data)
recv_size += len(file_data)
print('接收完毕:%s' % file_name)
socket_server.sendall(bytes('finished', 'utf-8'))
elif data == 'close':
current_file.close()
print('%s 已关闭' % file_name)
socket_server.sendall(bytes('closed', 'utf-8'))
file_index = file_index + 1
socket_server.close()
# 查找现有的文件列表
# 将不存在的文件删除
target_file_list = os.listdir(target_dir)
for file_item in target_file_list:
if file_item not in receive_list:
del_file = '%s/%s' % (target_dir, file_item)
print('删除文件:%s' % del_file)
os.unlink(del_file)
if __name__ == '__main__':
main()
``` |
{
"source": "1138886114/Global-contrast-based-salient-region-detection",
"score": 3
} |
#### File: 1138886114/Global-contrast-based-salient-region-detection/LC.py
```python
import numpy as np
import time
import cv2
def LC(image_gray):
image_height,image_width = image_gray.shape[:2]
base_matrix = np.zeros((256,256),dtype=np.int32)
base_line = np.array(range(256))
base_matrix[0] = base_line
for i in range(1,256):
base_matrix[i] = np.roll(base_line,i)
base_matrix = np.triu(base_matrix)
temp_matrix = np.triu(base_matrix).T
base_matrix = np.add(base_matrix ,temp_matrix)
hist_array = cv2.calcHist([image_gray], [0], None, [256], [0.0, 256.0])
hist_reshape = hist_array.reshape(1,256)
temp_matrix = np.tile(hist_reshape, (256, 1))
sum_diag_hist = np.sum(np.multiply(base_matrix,temp_matrix),axis=1)
image_gray_value = image_gray.reshape(1,image_height*image_width)[0]
image_gray_copy = np.zeros(image_height * image_width, dtype=np.int32)
for i,x in enumerate(image_gray_value):
image_gray_copy[i] = sum_diag_hist[x]
image_result = image_gray_copy.reshape(image_height,image_width)
image_result = (image_result-np.min(image_result))/(np.max(image_result)-np.min(image_result))
return image_result
if __name__ == '__main__':
file = r"C:\Users\xxx\Desktop\001.png"
start = time.time()
image_gray = cv2.imread(file, 0)
saliency_image = LC(image_gray)
# cv2.imwrite(r"C:\Users\xxx\Desktop\001_1.png",saliency_image*255)
end = time.time()
print("Duration: %.2f seconds." % (end - start))
cv2.imshow("gray saliency image", saliency_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
``` |
{
"source": "1140251/Ethsential",
"score": 2
} |
#### File: Ethsential/ethsential/__main__.py
```python
import sys
from .src.applications.server import ETHSENTIAL
from .src.applications.cli import CLI
from .src.parser import create_parser
def main():
parser = create_parser()
args = parser.parse_args()
if args.action == 'cli':
try:
CLI.exec_cmd(args)
except Exception as e:
if hasattr(e, 'message'):
print(getattr(e, 'message', repr(e)))
else:
print(e)
sys.exit(0)
elif args.action == 'install':
try:
CLI.install()
except Exception as e:
if hasattr(e, 'message'):
print(getattr(e, 'message', repr(e)))
else:
print(e)
elif args.action == 'tcp':
ETHSENTIAL.start_tcp(args.host, args.port)
else:
ETHSENTIAL.start_io()
if __name__ == '__main__':
main()
```
#### File: ethsential/src/parser_test.py
```python
import unittest
from io import StringIO
from unittest.mock import patch
from .parser import create_parser
class ParserTest(unittest.TestCase):
def setUp(self):
self.parser = create_parser()
def test_pass(self):
_ = self.parser.parse_args([])
def test_tcp_pass(self):
parsed = self.parser.parse_args(['tcp'])
self.assertEqual(parsed.action, 'tcp')
@patch('sys.stderr', new_callable=StringIO)
def test_cli_fail(self, mock_stderr):
with self.assertRaises(SystemExit):
_ = self.parser.parse_args(['cli'])
self.assertRegex(mock_stderr.getvalue(
), 'the following arguments are required: -f/--file, -t/--tool')
def test_cli_full_pass(self):
parsed = self.parser.parse_args(
['cli', '-f', 'test', 'p', '-t', 'all', 'mythril', '-op', 'example'])
self.assertEqual(parsed.action, 'cli')
@patch('sys.stderr', new_callable=StringIO)
def test_cli_fail_tool(self, mock_stderr):
with self.assertRaises(SystemExit):
_ = self.parser.parse_args(['cli', '-f', 'test', '-t', 'tool'])
self.assertRegex(mock_stderr.getvalue(), r"invalid choice: 'tool'")
@patch('sys.stderr', new_callable=StringIO)
def test_cli_empty_file(self, mock_stderr):
with self.assertRaises(SystemExit):
_ = self.parser.parse_args(['cli', '-f', '-t', 'all'])
self.assertRegex(mock_stderr.getvalue(),
'argument -f/--file: expected at least one argument')
@patch('sys.stderr', new_callable=StringIO)
def test_cli_empty_tool(self, mock_stderr):
with self.assertRaises(SystemExit):
_ = self.parser.parse_args(['cli', '-f', 'file', '-t'])
self.assertRegex(mock_stderr.getvalue(),
'argument -t/--tools: expected at least one argument')
```
#### File: src/tools/result.py
```python
import json
class Result:
def __init__(self, success, issues, error):
self.success = success
self.issues = issues
self.error = error
def to_json(self):
return json.loads(json.dumps(self, default=lambda obj: self.__dict__))
```
#### File: src/tools/slither.py
```python
import json
from . tool import Tool
class Slither(Tool):
def __init__(self):
pass
image = "1140251/slither"
command = '/bin/bash -c "solc use {version} && slither {contract} --json -"'
lang_supported = ["solidity"]
def parse(self, str_output):
result = {}
try:
result = json.loads(str_output)
except Exception:
data = str_output.splitlines()
result = json.loads(data[-1])
finally:
result["issues"] = []
if result["success"] and result['results'] and "detectors" in result['results']:
for detector in result['results']['detectors']:
for element in detector["elements"]:
json_response = {
"severity": detector["impact"] if(
'impact' in detector) else "",
"pattern": detector["check"] if(
'check' in detector) else "",
"description": detector["description"] if(
'description' in detector) else "",
"lines": [int(i) for i in element["source_mapping"]["lines"]],
"function": element["name"] if(
'name' in detector) else "",
"contract": ""
}
if element["type"] == "function":
if "type_specific_fields" in element and "signature" in element["type_specific_fields"]:
json_response["function"] = element["type_specific_fields"]["signature"]
if element["type"] != "contract":
json_response["contract"] = self.get_contract_name(
element)
result["issues"].append(json_response)
result['results'] = None
return result
def get_contract_name(self, element):
if "type_specific_fields" in element and "parent" in element["type_specific_fields"]:
if "type" in element["type_specific_fields"]["parent"] and element["type_specific_fields"]["parent"]["type"] == "contract":
return element["type_specific_fields"]["parent"]["name"]
else:
return self.get_contract_name(element["type_specific_fields"]["parent"])
else:
return None
``` |
{
"source": "1143048123/cddh",
"score": 3
} |
#### File: 1143048123/cddh/native_prophet.py
```python
# coding: utf-8
# quote from kmaiya/HQAutomator
# 谷歌搜索部分原版搬运,未做修改
import time
import json
import requests
import webbrowser
questions = []
def get_answer():
resp = requests.get('http://htpmsg.jiecaojingxuan.com/msg/current',timeout=4).text
resp_dict = json.loads(resp)
if resp_dict['msg'] == 'no data':
return 'Waiting for question...'
else:
resp_dict = eval(str(resp))
question = resp_dict['data']['event']['desc']
question = question[question.find('.') + 1:question.find('?')]
if question not in questions:
questions.append(question)
webbrowser.open("https://www.baidu.com/s?ie=UTF-8&wd=" + question)
else:
return 'Waiting for new question...'
def main():
while True:
print(time.strftime('%H:%M:%S',time.localtime(time.time())))
print(get_answer())
time.sleep(1)
if __name__ == '__main__':
main()
``` |
{
"source": "1143048123/chongding",
"score": 3
} |
#### File: 1143048123/chongding/do.py
```python
# coding: utf-8
# quote from kmaiya/HQAutomator
# 谷歌搜索部分原版搬运,未做修改
import time
import json
import requests
from googleapiclient.discovery import build
g_cse_id = ''
g_cse_api_key = ''
questions = []
def google_search(query, start):
service = build("customsearch", "v1", developerKey=g_cse_api_key)
res = service.cse().list(q=query, cx=g_cse_id, start=start).execute()
return res
# Google Question and count number of each result
def metric1Func(question, answers):
met1 = [0, 0, 0]
res = google_search(question, None)
items = str(res['items']).lower()
met1[0] = items.count(answers[0].lower())
met1[1] = items.count(answers[1].lower())
met1[2] = items.count(answers[2].lower())
return met1
# Google Question and each specific Answer and count total results
def metric2Func(question, answers):
met2 = [0, 0, 0]
res0 = google_search(question + ' "' + answers[0] + '"', None)
res1 = google_search(question + ' "' + answers[1] + '"', None)
res2 = google_search(question + ' "' + answers[2] + '"', None)
return [int(res0['searchInformation']['totalResults']), int(res1['searchInformation']['totalResults']), int(res2['searchInformation']['totalResults'])]
def predict(metric1, metric2, answers):
max1 = metric1[0]
max2 = metric2[0]
for x in range(1, 3):
if metric1[x] > max1:
max1 = metric1[x]
if metric2[x] > max2:
max2 = metric2[x]
if metric1.count(0) == 3:
return answers[metric2.index(max2)]
elif metric1.count(max1) == 1:
if metric1.index(max1) == metric2.index(max2):
return answers[metric1.index(max1)]
else:
percent1 = max1 / sum(metric1)
percent2 = max2 / sum(metric2)
if percent1 >= percent2:
return answers[metric1.index(max1)]
else:
return answers[metric2.index(max2)]
elif metric1.count(max1) == 3:
return answers[metric2.index(max2)]
else:
return answers[metric2.index(max2)]
def get_answer():
resp = requests.get('http://htpmsg.jiecaojingxuan.com/msg/current',timeout=4).text
resp_dict = json.loads(resp)
if resp_dict['msg'] == 'no data':
return 'Waiting for question...'
else:
resp_dict = eval(str(resp))
question = resp_dict['data']['event']['desc']
question = question[question.find('.') + 1:question.find('?')]
if question not in questions:
questions.append(question)
answers = eval(resp_dict['data']['event']['options'])
met1 = metric1Func(question, answers)
met2 = metric2Func(question, answers)
return predict(met1, met2, answers)
else:
return 'Waiting for new question...'
def main():
while True:
print(time.strftime('%H:%M:%S',time.localtime(time.time())))
print(get_answer())
time.sleep(1)
if __name__ == '__main__':
main()
``` |
{
"source": "1144639754/logkit",
"score": 2
} |
#### File: python-release-tool/sender/kodo.py
```python
import os
import qiniu
import ConfigParser
from base import Sender
class Kodo(Sender):
def __init__(self, cf):
Sender.__init__(self)
try:
ak = cf.get('sender_kodo', 'ak')
except ConfigParser.NoSectionError:
raise Exception('ak is empty')
except ConfigParser.NoOptionError:
raise Exception('ak is empty')
try:
sk = cf.get('sender_kodo', 'sk')
except ConfigParser.NoSectionError:
raise Exception('sk is empty')
except ConfigParser.NoOptionError:
raise Exception('sk is empty')
try:
bucket = cf.get('sender_kodo', 'bucket')
except ConfigParser.NoSectionError:
raise Exception('bucket is empty')
except ConfigParser.NoOptionError:
raise Exception('bucket is empty')
self.bucket = bucket
self.q = qiniu.Auth(ak, sk)
def _prepare(self, file_name):
self.token = self.q.upload_token(self.bucket, file_name, 3600*24*365*100)
def _put_file(self, file_path, file_name):
print 'begin to send file %s to bucket %s' % (file_name, self.bucket)
ret, info = qiniu.put_file(self.token, file_name, file_path)
if info.status_code == 200:
assert ret['key'] == file_name
assert ret['hash'] == qiniu.etag(file_path)
print 'send file %s to bucket %s finished' % (file_name, self.bucket)
else:
print 'send file %s error' % (file_name, )
print 'response is', info
def send(self, file_path):
file_name = os.path.basename(file_path)
self._prepare(file_name)
self._put_file(file_path, file_name)
``` |
{
"source": "1146116559/robort-iaib",
"score": 2
} |
#### File: 1146116559/robort-iaib/alanlam.py
```python
import os
import cv2
from matplotlib import pyplot as plt
import numpy as np
import math
cap = cv2.VideoCapture(1)
currentWB = cap.get(cv2.CAP_PROP_WHITE_BALANCE_BLUE_U)
cap.set(cv2.CAP_PROP_WHITE_BALANCE_BLUE_U, currentWB)
def qiege(gray):
gn=gray.ravel()
gh=np.zeros(256,dtype =int)
for i in range (len(gn)):
gh[gn[i]]+=1
img= np.zeros((512,512,3),np.uint8)
for i in range(256):
cv2.line(img,(i*2,2000),(i*2,2000-gh[i]),(255,0,0),2)
qu = np.zeros(9)
qc = math.ceil(256/30)
for i in range(qc):
qu[i-1] = gh[(i-1)*30:((i-1)*30+30)].sum()
max_2=int(0)
wei=int(0)
for i in range (qc):
if qu[i] >max_2:
max_2=qu[i]
wei=i
max_1=int(0)
wei_1=int(0)
for i in range(qc):
if(qu[i] > max_1)and(i != wei):
max_1=qu[i]
wei_1=i
if abs(wei_1-wei)>2:
print("stranger,none")
#break (realtime)
elif len(qu) > int(wei + 2):
ret,thresh1 = cv2.threshold(gray,(wei + 2)*30,255,4)
cv2.imshow('thresh1-f',thresh1)
cv2.imshow('img',img)
return thresh1
def houghl(lines):
lines_data =np.empty([0,8])
for line in lines:
for rho,theta in line:
a= np.cos(theta)
b= np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv2.line(tempIamge,(x1,y1),(x2,y2),(0,255,0),5)
if(x2-x1 != 0):
slope = abs((y2-y1)/(x2-x1))
a = np.array([(x1,x2,y1,y2,slope,0,rho,theta)])
lines_data = np.append(lines_data,a, axis=0)
#print(slop)
else:
slop = 0
a= np.array([(x1,x2,y1,y2,slope,1,rho,theta)])
lines_data = np.append(lines_data,a, axis=0)
for i in range(len(lines_data)):
if lines_data[i][5] != 1 :
return lines_data
#print("start capture")
while(True):
try :
ret,frame = cap.read()
scr = frame.copy()
brightLAB = cv2.cvtColor(scr, cv2.COLOR_BGR2LAB)
red = brightLAB[..., 1]
ret,red_1 = cv2.threshold (red, 130, 255, 0)
white_mask = cv2.bitwise_and(scr,scr, mask=red)
gray = cv2.cvtColor(white_mask,cv2.COLOR_BGR2GRAY)
thresh1 = qiege(gray)
tempIamge = scr.copy()
edges = cv2.Canny(gray,150,200,apertureSize = 3)
lines = cv2.HoughLines(edges,1,np.pi/180,150)
lines_data=houghl(lines)
# print("....")
if cv2.waitKey(1) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
except ValueError:
print(error)
#print("DONE.")
```
#### File: robort-iaib/vs/real_time_rgb_v2.py
```python
import numpy as np
import cv2
import matplotlib.pyplot as plt
import numpy as np
import math
def ClassifyColor( BGR, width, height ): ##分類顏色 (BGR, width, height)
r_threshold = 20 ##r閾值 before 10
b_threshold = 20 ##b閾值 before 10
FortyFive_degree = math.pi / 4 ## 45度
grey_threshold = 10.0 * math.pi / 180.0 ##色角上下限 before 5
for y in range(0, height, 1):
cur_row = BGR[y]
for x in range(0, width, 1):
[b1, g1, r1] = cur_row[x] #b1, g1 and r1 are type unsigned integer 8 bits
#Convert to 32 bits integer #b1,g1和r1是無符號整數8位類型 轉換為32位整數
b = int(b1)
g = int(g1)
r = int(r1)
#Red color
if r - b > r_threshold:
if r - g > r_threshold:
cur_row[x] = [255, 255, 255]
continue ##跳过某些循环
#Blue color
if b - r > b_threshold:
if b - g > b_threshold:
cur_row[x] = [0, 0, 0]
continue
#Other colors
cur_row[x] = [0, 0, 0]
return BGR
cap = cv2.VideoCapture(0)
while(1):
if __name__ == '__main__':
ret, frame = cap.read()
if frame.size == 0:
##if ret == False:
print(f"Fail to read image {filename}")
else:
cv2.imshow('Original frame',frame)
(height, width, channels) = frame.shape
print(f"frame dimension ( {height} , {width}, {channels})\n" )
if channels != 3:
print("Image is not a color image ##################")
if frame.dtype != "uint8":
print("Image is not of type uint8 #################")
ms = frame.copy()
kernel = np.ones((5,5), np.uint8)
##dilation = cv2.dilate(test, kernel, iterations = 3)
dilation = cv2.dilate(ms, kernel, iterations = 7)
cv2.imshow('dilation', dilation)
kernel = np.ones((7,7), np.uint8)
##erosion = cv2.erode(dilation, kernel, iterations = 3)
erosion = cv2.erode(dilation, kernel, iterations = 9)
cv2.imshow('erosion', erosion)
#Convert image to NumPy array (Create a new 2D array)
# Note: The order is in BGR format! 將圖像轉換為NumPy數組(創建新的2D數組)
BGR_array = np.array( erosion )
#Classify red, blue and grey color
Result_array = ClassifyColor( BGR_array, width, height )
cv2.imshow('BGR',Result_array)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
# Close the window
cap.release()
# De-allocate any associated memory usage
cv2.destroyAllWindows()
``` |
{
"source": "1146976048qq/AAA",
"score": 3
} |
#### File: AAA/data_process/data_process.py
```python
import argparse
import json
import os
import re
import sys
import pandas as np
from allennlp.predictors.predictor import Predictor
from lxml import etree
from nltk.tokenize import TreebankWordTokenizer
from tqdm import tqdm
pt_models_dir = '/data/kkzhang/Cross_GAT/AllenNLP'
model_path = os.path.join(pt_models_dir, "biaffine-dependency-parser-ptb-2020.04.06.tar.gz")
def parse_args():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--model_path', type=str, default=model_path,
help='Path of biaffine dependency parser.')
parser.add_argument('--data_path', type=str, default='/data/kkzhang/Cross_GAT/dataset/raw_data',
help='Directory of where original data held.')
return parser.parse_args()
def text2docs(file_path, predictor):
'''
Annotate the sentences from extracted txt file using AllenNLP's predictor.
'''
with open(file_path, 'r') as f:
sentences = f.readlines()
docs = []
print('Predicting dependency information...')
for i in tqdm(range(len(sentences))):
docs.append(predictor.predict(sentence=sentences[i]))
return docs
def dependencies2format(doc): # doc.sentences[i]
'''
Format annotation: sentence of keys
- tokens
- tags
- predicted_dependencies
- predicted_heads
- dependencies
'''
sentence = {}
sentence['tokens'] = doc['words']
sentence['tags'] = doc['pos']
# sentence['energy'] = doc['energy']
predicted_dependencies = doc['predicted_dependencies']
predicted_heads = doc['predicted_heads']
sentence['predicted_dependencies'] = doc['predicted_dependencies']
sentence['predicted_heads'] = doc['predicted_heads']
sentence['dependencies'] = []
for idx, item in enumerate(predicted_dependencies):
dep_tag = item
frm = predicted_heads[idx]
to = idx + 1
sentence['dependencies'].append([dep_tag, frm, to])
return sentence
def get_dependencies(file_path, predictor):
docs = text2docs(file_path, predictor)
sentences = [dependencies2format(doc) for doc in docs]
return sentences
def syntax2json(sentences, origin_file):
json_data = []
tk = TreebankWordTokenizer()
mismatch_counter = 0
idx = 0
with open(origin_file, 'r') as fopen:
raw = fopen.readlines()
for sentence in raw:
example = dict()
example["sentence"] = sentence
example['tokens'] = sentences[idx]['tokens']
example['tags'] = sentences[idx]['tags']
example['predicted_dependencies'] = sentences[idx]['predicted_dependencies']
example['predicted_heads'] = sentences[idx]['predicted_heads']
example['dependencies'] = sentences[idx]['dependencies']
json_data.append(example)
idx+=1
# extended_filename = origin_file.replace('.txt', '_biaffine_depparsed.json')
extended_filename = origin_file.replace('.txt', '_depparse.json')
with open(extended_filename, 'w') as f:
json.dump(json_data, f)
print('done,json_data length:', len(json_data))
print('idx_length:', idx) # DataItem Number
def main():
args = parse_args()
print("-------------", args.model_path)
predictor = Predictor.from_path(args.model_path)
# data = [('books/review_negative.txt', 'books/review_positive.txt', 'books/review_unlabeled'),
# ('dvd/review_negative.txt', 'dvd/review_positive.txt', 'dvd/review_unlabeled.txt'),
# ('electronics/review_negative.txt', 'electronics/review_positive.txt', 'electronics/review_unlabeled.txt'),
# ('kitchen/review_negative.txt', 'kitchen/review_positive.txt', 'kitchen/review_unlabeled.txt'),
# ('video/review_negative.txt', 'video/review_positive.txt', 'video/review_unlabeled.txt')
# ]
# for neg_file, pos_file, unlabel_file in data:
# neg_sentences = get_dependencies(os.path.join(args.data_path, neg_file), predictor)
# pos_sentences = get_dependencies(os.path.join(args.data_path, pos_file), predictor)
# unlabel_sentences = get_dependencies(os.path.join(args.data_path, unlabel_file), predictor)
# print(len(neg_sentences), len(pos_sentences), len(unlabel_sentences))
# syntax2json(neg_sentences, os.path.join(args.data_path, neg_file))
# syntax2json(pos_sentences, os.path.join(args.data_path, pos_file))
# syntax2json(unlabel_sentences, os.path.join(args.data_path, unlabel_file))
data = [('books/review_negative.txt', 'books/review_positive.txt'),
('dvd/review_negative.txt', 'dvd/review_positive.txt'),
('electronics/review_negative.txt', 'electronics/review_positive.txt'),
('kitchen/review_negative.txt', 'kitchen/review_positive.txt'),
('video/review_negative.txt', 'video/review_positive.txt')
]
for neg_file, pos_file in data:
neg_sentences = get_dependencies(os.path.join(args.data_path, neg_file), predictor)
pos_sentences = get_dependencies(os.path.join(args.data_path, pos_file), predictor)
print(len(neg_sentences), len(pos_sentences))
syntax2json(neg_sentences, os.path.join(args.data_path, neg_file))
syntax2json(pos_sentences, os.path.join(args.data_path, pos_file))
if __name__ == "__main__":
main()
``` |
{
"source": "1146976048qq/Dynamic_T",
"score": 3
} |
#### File: Dynamic_T/bert_model/bert_dataloader.py
```python
import torch
from torch.utils.data import Dataset, TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
#from .bert_datafeatures import convert_examples_to_features
class BertDataset(TensorDataset):
def __init__(self, *tensors):
tensors = tuple(self.convert_datatype(data_) for data_ in tensors)
super(BertDataset, self).__init__(*tensors)
def check_datatype(self, data_tensor):
return type(data_tensor)==type(torch.tensor([1,2]))
def convert_datatype(self, data_tensor):
"""
Convert data_tensor to tensor.LongTensor()
"""
if not self.check_datatype(data_tensor):
return torch.LongTensor(data_tensor)
else:
return data_tensor
def shape(self):
return self.data_tensor[0].shape
def get_batch(data_examples, bert_input_proc,
label_available=True,batch_size=16,num_workers=-1):
"""
Args:
data_examples: examples from DataProcessor get_*_examples
#label_list: list of all labels
label_map: dict, {label:label_index}
max_seq_length: int, fixed length that sentences are converted to
tokenizer: BertTokenizer
output_mode: task mode, whether it is classification or regression
label_availabel: True, whether there is label in dataset
batch_size: int
num_workers: int, for distributed training
return:
DataLoader
"""
#data = convert_examples_to_features(data_examples, label_map, max_seq_length, tokenizer, output_mode)
#bert_x_proc = BertInputFeatures(tokenizer, max_seq_len, label_proc)
data = bert_input_proc.fit(data_examples)
# loop over data
# to do: think an efficient way to process features
input_ids = [f.input_ids for f in data]
input_mask = [f.input_mask for f in data]
segment_ids = [f.segment_ids for f in data]
if label_available:
label_id = [f.label_id for f in data]
# for train and dev dataset
data_set = BertDataset(input_ids, input_mask, segment_ids, label_id)
# use sampler
if num_workers == -1:
data_sampler = RandomSampler(data_set)
else:
data_sampler = DistributedSampler(data_set)
else:
# for test dataset
data_set = BertDataset(input_ids, input_mask, segment_ids)
#data_sampler = SequentialSampler(data_set)
data_sampler = None
return DataLoader(data_set, sampler=data_sampler, batch_size=batch_size)
# import torch
# from torch.utils.data import Dataset, TensorDataset, DataLoader, RandomSampler, SequentialSampler
# from torch.utils.data.distributed import DistributedSampler
# from .bert_datafeatures import convert_examples_to_features
# class BertDataset(TensorDataset):
# def __init__(self, *tensors):
# tensors = tuple(self.convert_datatype(data_) for data_ in tensors)
# super(BertDataset, self).__init__(*tensors)
# def check_datatype(self, data_tensor):
# return type(data_tensor)==type(torch.tensor([1,2]))
# def convert_datatype(self, data_tensor):
# """
# Convert data_tensor to tensor.LongTensor()
# """
# if not self.check_datatype(data_tensor):
# return torch.LongTensor(data_tensor)
# else:
# return data_tensor
# def shape(self):
# return self.data_tensor[0].shape
# def get_batch(data_examples, label_map, max_seq_length, tokenizer, output_mode="classification",
# label_available=True, batch_size=32, num_workers=-1):
# """
# Args:
# data_examples: examples from DataProcessor get_*_examples
# #label_list: list of all labels
# label_map: dict, {label:label_index}
# max_seq_length: int, fixed length that sentences are converted to
# tokenizer: BertTokenizer
# output_mode: task mode, whether it is classification or regression
# label_availabel: True, whether there is label in dataset
# batch_size: int
# num_workers: int, for distributed training
# return:
# DataLoader
# """
# data = convert_examples_to_features(data_examples, label_map, max_seq_length, tokenizer, output_mode)
# # loop over data
# # to do: think an efficient way to process features
# input_ids = [f.input_ids for f in data]
# input_mask = [f.input_mask for f in data]
# segment_ids = [f.segment_ids for f in data]
# if label_available:
# label_id = [f.label_id for f in data]
# # for train and dev dataset
# data_set = BertDataset(input_ids, input_mask, segment_ids, label_id)
# # use sampler
# if num_workers == -1:
# data_sampler = RandomSampler(data_set)
# else:
# data_sampler = DistributedSampler(data_set)
# else:
# # for test dataset
# data_set = BertDataset(input_ids, input_mask, segment_ids)
# data_sampler = SequentialSampler(data_set)
# return DataLoader(data_set, sampler=data_sampler, batch_size=batch_size)
```
#### File: Dynamic_T/transformer/transformer.py
```python
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math, copy, time
from torch.autograd import Variable
import logging
class Embeddings(nn.Module):
"""
This is word embeddings
"""
def __init__(self, vocab_size, d_model):
super(Embeddings, self).__init__()
self.vocab_size = vocab_size
self.d_model = d_model # word_vec_size, 512
self.build_model()
def build_model(self):
self.word_embeddings = nn.Embedding(self.vocab_size, self.d_model)
def forward(self, x):
return self.word_embeddings(x)*math.sqrt(self.d_model)
class PositionEncoding(nn.Module):
"""
This is the special position encoding used in transformer
"""
def __init__(self, d_model, keep_prob, max_len=5000):
super(PositionEncoding,self).__init__()
self.d_model = d_model # word_vec_size, 512
self.keep_prob = keep_prob
self.max_len = max_len
self.build_model()
def build_model(self):
self.dropout = nn.Dropout(self.keep_prob)
# compute position encoding
self.pe = torch.zeros(self.max_len, self.d_model)
#position = torch.arange(0,self.max_len).unsqueeze(1)
#div_term = torch.exp(torch.arange(0,self.d_model,2)*(-math.log(10000.0)/self.d_model))
position = torch.arange(0., self.max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0., self.d_model, 2) * -(math.log(10000.0) / self.d_model))
self.pe[:,0::2] = torch.sin(position*div_term)
self.pe[:,1::2] = torch.cos(position*div_term)
self.pe = self.pe.unsqueeze(0)
def forward(self, x):
x = x + Variable(self.pe[:,:x.size(1)],requires_grad=False)
return self.dropout(x)
class PositionwiseFeedForward(nn.Module):
"""
This is the positionwise Feed Forward layer
"""
def __init__(self, d_model, d_ff, keep_prob=0.1):
super(PositionwiseFeedForward, self).__init__()
self.d_model = d_model
self.d_ff = d_ff
self.keep_prob = keep_prob
self.build_model()
def build_model(self):
self.W_1 = nn.Linear(self.d_model, self.d_ff)
self.W_2 = nn.Linear(self.d_ff, self.d_model)
self.dropout = nn.Dropout(self.keep_prob)
def forward(self, x):
"""
should be max(0,xW_1+b1)W_2+b_2
"""
return self.W_2(self.dropout(F.relu(self.W_1(x))))
class ScaledDotProductAttention(nn.Module):
def __init__(self, keep_prob=0.1):
super(ScaledDotProductAttention,self).__init__()
self.keep_prob = keep_prob
self.build_model()
def build_model(self):
self.dropout = nn.Dropout(self.keep_prob)
def forward(self,query,key,value,mask=None):
"""
Args:
query: (batch, heads, q_len, d_k)
key: (batch, heads,k_len, d_k)
value: (batch, heads,k_len, d_k)
"""
d_k = query.size(3)
seq_len = query.size(2)
heads = query.size(1)
scores = torch.matmul(query, key.transpose(-2,-1))/math.sqrt(d_k)
# scores (batch,heads, q_len, k_len)
if mask is not None:
scores = scores.masked_fill(mask==0,-1e9)
p_attn = F.softmax(scores,dim=-1)
# p_attn: (batch, heads, q_len, k_len)
logging.debug('scaleddot attention: p_attn shape {}'.format(p_attn.shape))
p_attn = self.dropout(p_attn)
p_attn = torch.matmul(p_attn, value)
# p_attn shape: (batch, heads, q_len, d_k)
p_attn = p_attn.transpose(2,1).contiguous().view(-1, seq_len, heads*d_k)
#return torch.matmul(p_attn,value), p_attn
return p_attn
def clones(module, N):
"""
Produce N identical layers
"""
from copy import deepcopy
return nn.ModuleList([deepcopy(module) for _ in range(N)])
class MultiHeadAttention(nn.Module):
"""
multihead attention, h is number of heads
"""
def __init__(self, hidden_size, heads, keep_prob=0.1):
super(MultiHeadAttention,self).__init__()
assert hidden_size % heads == 0
self.hidden_size = hidden_size
self.heads = heads
self.keep_prob = keep_prob
self.d_k = self.hidden_size // self.heads
self.build_model()
def build_model(self):
self.dropout = nn.Dropout(self.keep_prob)
self.linears = clones(nn.Linear(self.hidden_size, self.hidden_size), 4)
self.scaleddotattn = ScaledDotProductAttention(self.keep_prob)
self.attn = None
def forward(self, inputs, mask=None):
"""
Args:
inputs: (batch, seq_len, d_model)
"""
if mask is not None:
# same mask applied to all h heads
mask = mask.unsqueeze(1)
seq_len = inputs.size(1)
# first, do all the linear projections in batch from d_model to h*d_k
query, key, value = [l(inputs).view(-1,seq_len,self.heads,self.d_k).transpose(1,2) for l in self.linears[:3]]
#query,key,value = [l(x).view(-1,seq_len,self.h,self.d_k).transpose(1,2) for l,x in zip(self.linears,(inputs,inputs,inputs))]
logging.debug('multihead attnention: query shape {}, key shape {}, value shape {}'.format(query.shape, key.shape, value.shape))
# second, apply attention on all the projected vectors in batch
#x,self.attn = self.scaleddotattn(query,key,value,mask)
x = self.scaleddotattn(query, key, value, mask)
logging.debug('multihead attention: x {}'.format(x.shape))
# third, concat using a view and apply a final linear
x = self.linears[-1](x)
#x = x.transpose(1,2).contiguous().view(-1,seq_len, self.h*self.d_k)
logging.debug('multihead attention: x {}'.format(x.shape))
return x
#return self.linears[-1](x)
class LayerNorm(nn.Module):
"""
There is implementation in nn.LayerNorm
"""
def __init__(self,features,eps=1e-6):
super(LayerNorm,self).__init__()
self.features = features
self.eps = eps
self.build_model()
def build_model(self):
self.a_2 = nn.Parameter(torch.ones(self.features))
self.b_2 = nn.Parameter(torch.zeros(self.features))
def forward(self, x):
mean = x.mean(-1,keepdim=True)
std = x.std(-1,keepdim=True)
logging.debug('LayerNorm: mean shape {}, std shape {}'.format(mean.shape, std.shape))
return self.a_2*(x-mean)/(std + self.eps)+self.b_2
class SublayerConnection(nn.Module):
def __init__(self,size,keep_prob):
super(SublayerConnection,self).__init__()
self.keep_prob = keep_prob
self.size = size
self.build_model()
def build_model(self):
self.dropout = nn.Dropout(self.keep_prob)
self.norm = LayerNorm(self.size)
def forward(self, x, sublayer):
"""
Apply residual connection to any sublayer with the same size
"""
return x+self.dropout(sublayer(self.norm(x)))
class EncoderLayer(nn.Module):
"""
This is just one layer of encoder,contains one multihead attention and one positionwise feed forward layer
"""
def __init__(self,size,self_attn,feed_forward,keep_prob):
super(EncoderLayer,self).__init__()
self.size = size
self.self_attn = self_attn
self.feed_forward = feed_forward
self.keep_prob = keep_prob
self.build_model()
def build_model(self):
self.sublayer = clones(SublayerConnection(self.size,self.keep_prob),2)
def forward(self, x, mask):
"""
one encoder layer
"""
x = self.sublayer[0](x, lambda x:self.self_attn(x,x,x,mask))
return self.sublayer[1](x, self.feed_forward)
class Encoder(nn.Module):
"""
N layers of encoder
"""
def __init__(self, layer, N=6):
super(Encoder, self).__init__()
self.layer = layer # encoder layer
self.N = N
self.build_model()
def build_model(self):
self.layers = clones(self.layer, self.N)
self.norm = LayerNorm(self.layer.size)
def forward(self, x, mask):
"""
Pass the input and mask through each layer
"""
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class DecoderLayer(nn.Module):
def __init__(self, size, self_attn, src_attn, feed_forward, keep_prob):
super(DecoderLayer,self).__init__()
self.size = size
self.self_attn = self_attn
self.feed_forward = feed_forward
self.src_attn = src_attn
self.keep_prob = keep_prob
self.build_model()
def build_model(self):
self.sublayer = clones(SublayerConnection(self.size,self.keep_prob), 3)
def forward(self, x, memory, src_mask, tgt_mask):
"""
one decoder layer
Args:
x: target
memory: output from encoder
src_mask: mask for source input
tgt_mask: mask for target input
"""
m = memory
x = self.sublayer[0](x, lambda x:self.self_attn(x,x,x,tgt_mask))
x = self.sublayer[1](x, lambda x:self.src_attn(x,m,m,src_mask))
return self.sublayer[2](x, self.feed_forward)
class Decoder(nn.Module):
"""
N layers of decoder
"""
def __init__(self, layer, N=6):
super(Decoder, self).__init__()
self.layer = layer # decoder layer
self.N = N
self.build_model()
def build_model(self):
self.layers = clones(self.layer, self.N)
self.norm = LayerNorm(self.layer.size)
def forward(self, x, memory, src_mask, tgt_mask):
"""
Args:
x: target
memory: output from encoder
src_mask: mask for source input
tgt_mask: mask for target input
"""
for layer in self.layers:
x = layer(x, memory, src_mask, tgt_mask)
return self.norm(x)
class Generator(nn.Module):
"""
Final Layer after decoder layers, include linear and softmax, returns probability
"""
def __init__(self,vocab_size, d_model):
super(Generator, self).__init__()
self.vocab_size = vocab_size
self.d_model = d_model
self.build_model()
def build_model(self):
self.proj = nn.Linear(self.d_model, self.vocab_size)
def forward(self, x):
return F.log_softmax(self.proj(x),dim=-1)
class EncoderDecoder(nn.Module):
def __init__(self,encoder,decoder,src_embed,tgt_embed,generator):
super(EncoderDecoder,self).__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator = generator
def encode(self, src, src_mask):
"""
Args:
src: source sentence (batch, seq_len)
src_mask: mask for source sentence
"""
return self.encoder(self.src_embed(src), src_mask)
def decode(self, memory, src_mask, tgt, tgt_mask):
"""
Args:
memory: output from encoder
src_mask: mask for source input
tgt: target sentence
tgt_mask: mask for target input
"""
return self.decoder(self.tgt_embed(tgt),memory,src_mask,tgt_mask)
def forward(self,src,tgt,src_mask,tgt_mask):
"""
process source and target input
"""
return self.decode(self.encode(src,src_mask),src_mask,tgt,tgt_mask)
def subsequent_mask(size):
"Mask out subsequent positions."
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0
def make_model(src_vocab,tgt_vocab,N=6,d_model=512,d_ff=2048,h=8,keep_prob=0.1):
"""
Helper: construct transformer encoder-decoder
"""
from copy import deepcopy
attn = MultiHeadAttention(d_model,h,keep_prob)
ff = PositionwiseFeedForward(d_model,d_ff,keep_prob)
position = PositionEncoding(d_model,keep_prob)
model = EncoderDecoder(Encoder(EncoderLayer(d_model,deepcopy(attn),deepcopy(ff),keep_prob),N),
Decoder(DecoderLayer(d_model,deepcopy(attn),deepcopy(attn),deepcopy(ff),keep_prob),N),
nn.Sequential(Embeddings(src_vocab,d_model),deepcopy(position)),
nn.Sequential(Embeddings(tgt_vocab,d_model),deepcopy(position)),
Generator(tgt_vocab,d_model)
)
# initialize parameters
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
``` |
{
"source": "1146976048qq/IATN",
"score": 3
} |
#### File: 1146976048qq/IATN/iatn.py
```python
import tensorflow as tf
from keras.models import Sequential
from keras.layers import LSTM, Dense, Bidirectional
# from attention import AttLayer
class IATN(tf.keras.Model):
def __init__(self, config):
super(IATN, self).__init__()
self.n_hidden = config.n_hidden #hidden_size
self.l2_reg = config.l2_reg
self.n_class = config.n_class # num_class
self.max_s_len = config.max_s_len # max_setence_length
self.max_a_len = config.max_a_len # max_aspect_length
self.emb_dim = config.emb_dim
self.emb_matrix = config.emb_matrix
self.aspect_bilstm = tf.keras.layers.Bidirectional(LSTM(self.n_hidden, return_sequences=True, recurrent_initializer='glorot_uniform', stateful=True))
self.sentence_bilstm = tf.keras.layers.Bidirectional(LSTM(self.n_hidden, return_sequences=True, recurrent_activation='sigmoid', recurrent_initializer='glorot_uniform', stateful=True))
self.aspect_w = tf.contrib.eager.Variable(tf.random_normal([self.n_hidden, self.n_hidden]), name='aspect_w')
self.aspect_b = tf.contrib.eager.Variable(tf.zeros([self.n_hidden]), name='aspect_b')
self.sentence_w = tf.contrib.eager.Variable(tf.random_normal([self.n_hidden, self.n_hidden]), name='sentence_w')
self.sentence_b = tf.contrib.eager.Variable(tf.zeros([self.n_hidden]), name='sentence_b')
self.output_fc = tf.keras.layers.Dense(self.n_class, kernel_regularizer=tf.keras.regularizers.l2(l=self.l2_reg))
def call(self, data, dropout=0.5):
aspects, sentences, domain_labels, sentiment_labels, aspect_lens, sentence_lens = data
aspect_inputs = tf.nn.embedding_lookup(self.embedding_matrix, aspects)
aspect_inputs = tf.cast(aspect_inputs, tf.float32)
aspect_inputs = tf.nn.dropout(aspect_inputs, keep_prob=dropout)
sentence_inputs = tf.nn.embedding_lookup(self.embedding_matrix, sentences)
sentence_inputs = tf.cast sentence_inputs, tf.float32)
sentence_inputs = tf.nn.dropout(sentence_inputs, keep_prob=dropout)
aspect_outputs = self.aspect_bilstm(aspect_inputs)
aspect_avg = tf.reduce_mean(aspect_outputs, 1)
sentence_outputs = self.sentence_bilstm(sentence_inputs)
sentence_avg = tf.reduce_mean sentence_outputs, 1)
aspect_att = tf.nn.softmax(tf.nn.tanh(tf.einsum('ijk,kl,ilm->ijm', aspect_outputs,
self.aspect_w, tf.expand_dims(sentence_avg, -1)) + self.aspect_b))
aspect_rep = tf.reduce_sum(aspect_att * aspect_outputs, 1)
sentence_att = tf.nn.softmax(tf.nn.tanh(tf.einsum('ijk,kl,ilm->ijm', sentence_outputs,
self sentence_w, tf.expand_dims(aspect_avg, -1)) + self sentence_b))
sentence_rep = tf.reduce_sum(sentence_att * sentence_outputs, 1)
domain_rep = tf.concat([aspect_rep, sentence_rep], 1)
domain_predict = self.output_fc(domain_rep) # for domain classification
sentiment_rep = sentence_rep
sentiemnt_predict = self.output_fc(sentiment_rep) # for sentiment classification
return domain_predict, sentiemnt_predict, domain_labels, sentiment_labels
```
#### File: 1146976048qq/IATN/train.py
```python
import tensorflow as tf
from utils import evaluate, get_data_info, read_data, load_word_embeddings
from iatn import IATN
from evals import *
import os
import time
import math
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5
tf.enable_eager_execution(config=config)
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('emb_dim', 300, 'dimension of word embedding')
tf.app.flags.DEFINE_integer('n_hidden', 300, 'number of hidden unit')
tf.app.flags.DEFINE_integer('n_class', 3, 'number of distinct class')
tf.app.flags.DEFINE_float('l2_reg', 0.001, 'l2 regularization')
tf.app.flags.DEFINE_integer('max_a_len', 0, 'max length of aspects')
tf.app.flags.DEFINE_integer('max_s_len', 0, 'max length of sentences')
tf.app.flags.DEFINE_string('embedding_matrix', '', 'word ids to word vectors')
batch_size = 128
learning_rate = 0.01
n_epoch = 20
pre_processed = 1
embedding_file_name = 'data/kkzhang/glove.840B.300d.txt'
dataset = 'data/kkzhang/laptop/'
logdir = 'logs/'
def run(model, train_data, test_data):
print("Train the IATN model ......")
max_acc, step = 0., -1
train_data_size = len(train_data[0])
train_data = tf.data.Dataset.from_tensor_slices(train_data)
train_data = train_data.shuffle(buffer_size=train_data_size).batch(batch_size, drop_remainder=True)
test_data_size = len(test_data[0])
test_data = tf.data.Dataset.from_tensor_slices(test_data)
test_data = test_data.batch(batch_size, drop_remainder=True)
iterator = tf.data.Iterator.from_structure(train_data.output_types, train_data.output_shapes)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
writer = tf.contrib.summary.create_file_writer(logdir)
writer.set_as_default()
for i in range(n_epoch):
cost, domain_predict_list, domain_label_list, sentiemnt_predict_list, sentiment_labels_list = 0., [], [], [], []
iterator.make_initializer(train_data)
for _ in range(math.floor(train_data_size / batch_size)):
data = iterator.get_next()
with tf.GradientTape() as tape:
domain_predict, sentiemnt_predict, domain_labels, sentiment_labels = model(data, dropout=0.2)
sentiemnt_loss_t = tf.nn.softmax_cross_entropy_with_logits_v2(
logits = sentiemnt_predict, sentiment_labels = sentiment_labels)
domain_loss_t = tf.nn.softmax_cross_entropy_with_logits_v2(
logits = domain_predict, domain_labels = domain_labels)
sentiment_loss = tf.reduce_mean(sentiemnt_loss_t)
domain_loss = tf.reduce_mean(domain_loss_t)
sentiment_cost += tf.reduce_sum(sentiemnt_loss_t)
domain_cost += tf.reduce_sum(domain_loss_t)
loss_value = sentiment_loss + domain_loss
gradients = tape.gradient(loss_value, model.variables)
optimizer.apply_gradients(zip(gradients, model.variables))
sentiemnt_predict_list.extend(tf.argmax(tf.nn.softmax(sentiemnt_predict), 1).numpy())
sentiemnt_labels_list.extend(tf.argmax(sentiment_labels, 1).numpy())
domain_predict_list.extend(tf.argmax(tf.nn.softmax(domain_predict), 1).numpy())
domain_labels_list.extend(tf.argmax(domain_labels, 1).numpy())
sentiment_train_acc, _, _ = evaluate(pred=sentiemnt_predict_list, gold=sentiment_labels_list)
sentiment_train_loss = sentiment_loss / train_data_size
domain_train_acc, _, _ = evaluate(pred=domain_predict_list, gold=domain_labels_list)
domain_train_loss = domian_cost / train_data_size
train_loss = (sentiment_cost+domain_cost) / train_data_size
def main(_):
start_time = time.time()
print('Loading data info ...')
word2id, FLAGS.max_aspect_len, FLAGS.max_sentence_len = get_data_info(dataset, pre_processed)
print('Loading training and testing data ...')
train_data = read_data(word2id, FLAGS.max_aspect_len, FLAGS.max_sentence_len, dataset + 'train', pre_processed)
test_data = read_data(word2id, FLAGS.max_aspect_len, FLAGS.max_sentence_len, dataset + 'test', pre_processed)
print('Loading pre-trained word vectors ...')
FLAGS.embedding_matrix = load_word_embeddings(embedding_file_name, FLAGS.embedding_dim, word2id)
model = IATN(FLAGS)
run(model, train_data, test_data)
end_time = time.time()
print('Time Costing: %s' % (end_time - start_time))
if __name__ == '__main__':
tf.app.run()
``` |
{
"source": "114708/TkInter-Part-2",
"score": 4
} |
#### File: 114708/TkInter-Part-2/TkInter.py
```python
import tkinter.messagebox
from tkinter import *
window = Tk()
window.title("<NAME>")
window.geometry('700x420')
# The different grids
Label(window, text="Name").grid(row=0)
name = Entry(window)
name.grid(row=0, column=1)
Label(window, text="Email").grid(row=1)
email = Entry(window)
email.grid(row=1, column=1)
Label(window, text="ISBN").grid(row=2)
isbn = Entry(window)
isbn.grid(row=2, column=1)
Label(window, text="Height").grid(row=3)
height = Entry(window)
height.grid(row=3, column=1)
Label(window, text="Length 1").grid(row=4)
length1 = Entry(window)
length1.grid(row=4, column=1)
Label(window, text="Length 2").grid(row=5)
length2 = Entry(window)
length2.grid(row=5, column=1)
Label(window, text="Length 3").grid(row=6)
length3 = Entry(window)
length3.grid(row=6, column=1)
Label(window, text="Length 4").grid(row=7)
length4 = Entry(window)
length4.grid(row=7, column=1)
# The options
paint_choice = IntVar()
paint_choice.set(2)
Radiobutton(window, text="Luxury", variable=paint_choice, value=1).grid(row=8, column=0)
Radiobutton(window, text="Standard", variable=paint_choice, value=2).grid(row=8, column=1)
Radiobutton(window, text="Economy", variable=paint_choice, value=3).grid(row=8, column=2)
# Undercoat option
use_undercoat = IntVar()
undercoat = Checkbutton(window, text="Undercoat", variable=use_undercoat)
undercoat.grid(row=8, column=8)
# Calculation
def perform_calc():
print(use_undercoat.get())
paint_quality = paint_choice.get()
area1 = int(height.get()) * int(length1.get())
area2 = int(height.get()) * int(length2.get())
area3 = int(height.get()) * int(length3.get())
area4 = int(height.get()) * int(length4.get())
area = int(area1) + int(area2) + int(area3) + int(area4)
if paint_quality == 1:
paint_cost = 1.90
elif paint_quality == 2:
paint_cost = 1.00
else:
paint_cost = 0.60
if use_undercoat.get():
paint_cost += 0.50
total_paint_cost = paint_cost * area
itemised_total = f"total area = {area} \n" # appears in the message box
itemised_total += f"Paint cost = {total_paint_cost}" # appears in the message box
digits = []
for _ in range(1, 11):
digits.append(isbn.get) % 10
isbn = isbn // 10
digits.reverse()
total = 0
for scale in range(0, 10):
total += scale * digits[scale - 1]
cd = total % 11
if cd == digits[9]:
print("Check digit is ok")
else:
print("Check digit is invalid")
if name is None:
print("Value has not been set")
# Length Check
elif len(name) < 2:
print("Name is too short")
# Format check
elif email.find("@") < 0:
print("Email address is not valid")
# Message box
tkinter.messagebox.showinfo("Alert Message", itemised_total)
# def handle_click(event):
# print("The button was clicked")
# greeting = "Hello "
# print("Name: " + entry.get())
# print("ISBN: " + entry2.get())
# entry.insert(0, greeting)
# Label(window, text="Name").grid(row=1)
# entry = Entry(window)
# entry.grid(row=1, column=2)
# label5 = tkinter.Label(text="name")
# entry = tkinter.Entry()
# label6 = tkinter.Label(text="ISBN")
# entry2 = tkinter.Entry()
# button1 = tkinter.Button(text="Click me 1!") # , command=handle_click()
# label5.pack()
# entry.pack()
# label6.pack()
# entry2.pack()
# button1.pack()
Button(window, text="Calculate", command=perform_calc).grid(row=10, column=1)
window.mainloop()
``` |
{
"source": "1153470104/Jiayan",
"score": 2
} |
#### File: Jiayan/jiayan/__init__.py
```python
import kenlm
from jiayan.lexicon.pmi_entropy_constructor import PMIEntropyLexiconConstructor
from jiayan.tokenizer.hmm_tokenizer import CharHMMTokenizer
from jiayan.tokenizer.tang_tokenizer import TangCharHMMTokenizer
from jiayan.tokenizer.ngram_tokenizer import WordNgramTokenizer
from jiayan.sentencizer.crf_sentencizer import CRFSentencizer
from jiayan.sentencizer.crf_punctuator import CRFPunctuator
from jiayan.postagger.crf_pos_tagger import CRFPOSTagger
def load_lm(lm):
return kenlm.LanguageModel(lm)
```
#### File: Jiayan/jiayan/tang_test.py
```python
from jiayan import CharHMMTokenizer
from jiayan import TangCharHMMTokenizer
from jiayan import load_lm
def init_file():
f1 = open("resource/qujiang_hmm.txt", 'w')
f2 = open("resource/qujiang_tang.txt", 'w')
f1.close()
f2.close()
def list_to_file(path, p_list):
f = open(path, 'a', encoding='utf-8')
txt = ""
for p in p_list:
txt = txt+ p + " "
txt = txt[:-1]
f.write(txt)
f.write("\n")
f.close()
def list_to_text(p_list):
txt = ""
for p in p_list:
txt = txt+ p + " "
return txt[:-1]
if __name__ == '__main__':
lm_path = 'C:/TJlab/Tang/chinese_poetry/jiayan.klm'
print('\nTokenizing test text with HMM...')
# init_file()
lm = load_lm(lm_path)
hmm_tokenizer = CharHMMTokenizer(lm)
tang_tokenizer = TangCharHMMTokenizer(lm)
# f = open("resource/qujiang_raw.txt", encoding='utf-8')
# line = f.readline()
# while line:
# # list_to_file("resource/qujiang_hmm.txt", list(tang_tokenizer.tokenize(line)))
# list_to_file("resource/qujiang_tang.txt", tang_tokenizer.intervene_tokenize(line))
# # list_to_file("resource/qujiang_tang_trans.txt", tang_tokenizer.intervene(line))
# line = f.readline()
# f.close()
text0 = "送春归,三月尽日日暮时。去年杏园花飞御沟绿,何处送春曲江曲。今年杜鹃花落子规啼,送春何处西江西。帝城送春犹怏怏" \
",天涯送春能不加惆怅。莫惆怅,送春人。冗员无替五年罢,应须准拟再送浔阳春。五年炎凉凡十变,又知此身健不健。" \
"好去今年江上春,明年未死还相见。"
text1 = "春泪烂罗绮,泣声抽恨多。莫滴芙蓉池,愁伤连蒂荷。壹朵花叶飞,壹枝花光彩。美人惜花心,但愿春长在。"
text2 = "晦日同携手,临流壹望春。可怜杨柳陌,愁杀故乡人。"
text3 = "去岁欢游何处去,曲江西岸杏园东。花下忘归因美景,尊前劝酒是春风。各从微宦风尘里,共度流年离别中。今日相逢愁又喜,八人分散两人同。"
text4 = "及第新春选胜游,杏园初宴曲江头。紫毫粉壁题仙籍,柳色箫声拂御楼。霁景露光明远岸,晚空山翠坠芳洲。归时不省花间醉,绮陌香车似水流。"
text5 = "寂寂孤莺啼杏园,寥寥壹犬吠桃源。落花芳草无寻处,万壑千峰独闭门。"
# test
# print(tang_tokenizer.seg_score("霁景"))
# print(tang_tokenizer.seg_score("霁") + tang_tokenizer.seg_score("景"))
# print(tang_tokenizer.seg_score("晚空"))
# print(tang_tokenizer.seg_score("晚") + tang_tokenizer.seg_score("空"))
# print(tang_tokenizer.validate(list(tang_tokenizer.sentences(text1))))
# print(list_to_text(list(hmm_tokenizer.tokenize(text0))))
# print(list_to_text(list(tang_tokenizer.tokenize(text0))))
# print(list_to_text(list(hmm_tokenizer.tokenize(text1))))
print(list_to_text(list(tang_tokenizer.tokenize(text1))))
print(list_to_text(tang_tokenizer.intervene_tokenize(text1)))
# print(list_to_text(list(hmm_tokenizer.tokenize(text2))))
# print(list_to_text(list(tang_tokenizer.tokenize(text2))))
# print(list_to_text(tang_tokenizer.intervene_tokenize(text2)))
# print(list_to_text(list(hmm_tokenizer.tokenize(text3))))
print(list_to_text(list(tang_tokenizer.tokenize(text3))))
print(list_to_text(tang_tokenizer.intervene_tokenize(text3)))
#
# # print(list_to_text(list(hmm_tokenizer.tokenize(text4))))
print(list_to_text(list(tang_tokenizer.tokenize(text4))))
print(list_to_text(tang_tokenizer.intervene_tokenize(text4)))
#
# # print(list_to_text(list(hmm_tokenizer.tokenize(text5))))
print(list_to_text(list(tang_tokenizer.tokenize(text5))))
print(list_to_text(tang_tokenizer.intervene_tokenize(text5)))
``` |
{
"source": "1155107756/pgdrive",
"score": 3
} |
#### File: envs/generation_envs/change_friction_env.py
```python
import numpy as np
from pgdrive.envs.pgdrive_env import PGDriveEnv
from pgdrive.utils import PGConfig, get_np_random
class ChangeFrictionEnv(PGDriveEnv):
@staticmethod
def default_config() -> PGConfig:
config = PGDriveEnv.default_config()
config.update(
{
"change_friction": True,
"friction_min": 0.8,
"friction_max": 1.2,
"vehicle_config": {
"wheel_friction": 1.0
}
}
)
return config
def __init__(self, config=None):
super(ChangeFrictionEnv, self).__init__(config)
self.parameter_list = dict()
self._random_state = get_np_random(0) # Use a fixed seed.
for k in self.maps:
self.parameter_list[k] = dict(
wheel_friction=self._random_state.uniform(self.config["friction_min"], self.config["friction_max"])
)
def _reset_vehicles(self):
if self.config["change_friction"] and self.vehicle is not None:
self.for_each_vehicle(lambda v: v.destroy(self.pg_world))
del self.vehicles
# We reset the friction here!
parameter = self.parameter_list[self.current_seed]
v_config = self.config["vehicle_config"]
v_config["wheel_friction"] = parameter["wheel_friction"]
self.vehicles = self._get_vehicles()
# initialize track vehicles
# first tracked vehicles
vehicles = sorted(self.vehicles.items())
self.current_track_vehicle = vehicles[0][1]
self.current_track_vehicle_id = vehicles[0][0]
for _, vehicle in vehicles:
if vehicle is not self.current_track_vehicle:
# for display
vehicle.remove_display_region()
self.vehicles.update(self.done_vehicles)
self.done_vehicles = {}
self.for_each_vehicle(lambda v: v.reset(self.current_map))
if __name__ == '__main__':
env = ChangeFrictionEnv(config={"environment_num": 100, "start_seed": 1000, "change_friction": True})
env.seed(100000)
obs = env.reset()
for s in range(100):
action = np.array([0.0, 1.0])
o, r, d, i = env.step(action)
if d:
env.reset()
```
#### File: envs/generation_envs/safe_pgdrive_env.py
```python
from pgdrive.envs.pgdrive_env import PGDriveEnv
from pgdrive.utils import PGConfig
class SafePGDriveEnv(PGDriveEnv):
def default_config(self) -> PGConfig:
config = super(SafePGDriveEnv, self).default_config()
config.update(
{
"accident_prob": 0.5,
"crash_vehicle_cost": 1,
"crash_object_cost": 1,
"crash_vehicle_penalty": 0.,
"crash_object_penalty": 0.,
"out_of_road_cost": 0., # only give penalty for out_of_road
"traffic_density": 0.2,
}
)
return config
def done_function(self, vehicle):
done, done_info = super(SafePGDriveEnv, self).done_function(vehicle)
if done_info["crash_vehicle"]:
done = False
elif done_info["crash_object"]:
done = False
return done, done_info
if __name__ == "__main__":
env = SafePGDriveEnv(
{
"manual_control": True,
"use_render": True,
"environment_num": 100,
"start_seed": 75,
"debug": True,
"cull_scene": True,
"pg_world_config": {
"pstats": True
}
}
)
o = env.reset()
total_cost = 0
for i in range(1, 100000):
o, r, d, info = env.step([0, 1])
total_cost += info["cost"]
env.render(text={"cost": total_cost, "seed": env.current_map.random_seed})
if d:
total_cost = 0
print("Reset")
env.reset()
env.close()
```
#### File: pgdrive/envs/multi_agent_pgdrive.py
```python
from pgdrive.envs.pgdrive_env import PGDriveEnv
from pgdrive.scene_creator.blocks.first_block import FirstBlock
from pgdrive.scene_creator.vehicle.base_vehicle import BaseVehicle
from pgdrive.utils import setup_logger, PGConfig
class MultiAgentPGDrive(PGDriveEnv):
@staticmethod
def default_config() -> PGConfig:
config = PGDriveEnv.default_config()
config.update(
{
"environment_num": 1,
"traffic_density": 0.,
"start_seed": 10,
"map": "yY",
"target_vehicle_configs": {
"agent0": {
"born_longitude": 10,
"born_lateral": 1.5,
"born_lane_index": (FirstBlock.NODE_1, FirstBlock.NODE_2, 1),
# "show_lidar": True
},
"agent1": {
"born_longitude": 10,
# "show_lidar": True,
"born_lateral": -1,
},
"agent2": {
"born_longitude": 10,
"born_lane_index": (FirstBlock.NODE_1, FirstBlock.NODE_2, 2),
# "show_lidar": True,
"born_lateral": 1,
},
"agent3": {
"born_longitude": 10,
# "show_lidar": True,
"born_lateral": 2,
}
},
"num_agents": 4,
"crash_done": True
}
)
# Some collision bugs still exist, always set to False now!!!!
# config.extend_config_with_unknown_keys({"crash_done": True})
return config
def __init__(self, config=None):
super(MultiAgentPGDrive, self).__init__(config)
def done_function(self, vehicle):
# crash will not done
done, done_info = super(MultiAgentPGDrive, self).done_function(vehicle)
if vehicle.crash_vehicle and not self.config["crash_done"]:
done = False
done_info["crash_vehicle"] = False
elif vehicle.out_of_route and vehicle.on_lane and not vehicle.crash_sidewalk:
done = False
done_info["out_of_road"] = False
return done, done_info
def step(self, actions):
# remove useless actions
id_to_remove = []
for id in actions.keys():
if id in self.done_vehicles.keys():
id_to_remove.append(id)
for id in id_to_remove:
actions.pop(id)
o, r, d, i = super(MultiAgentPGDrive, self).step(actions)
for id, done in d.items():
if done and id in self.vehicles.keys():
v = self.vehicles.pop(id)
v.prepare_step([0, -1])
self.done_vehicles[id] = v
return o, r, d, i
def _get_vehicles(self):
# TODO We only support homogenous vehicle config now!
return {
"agent{}".format(i): BaseVehicle(self.pg_world, self.config["vehicle_config"])
for i in range(self.num_agents)
}
def _get_observations(self):
return {
"agent{}".format(i): self.get_single_observation(self.config["vehicle_config"])
for i in range(self.num_agents)
}
# def reward_function(self, vehicle):
# """
# Override this func to get a new reward function
# :param vehicle: BaseVehicle
# :return: reward
# """
# step_info = dict()
#
# # Reward for moving forward in current lane
# current_lane = vehicle.lane
# long_last, _ = current_lane.local_coordinates(vehicle.last_position)
# long_now, lateral_now = current_lane.local_coordinates(vehicle.position)
#
# reward = 0.0
#
# # reward for lane keeping, without it vehicle can learn to overtake but fail to keep in lane
#
# lateral_factor = 1.0
#
# reward += vehicle.vehicle_config["driving_reward"] * (long_now - long_last) * lateral_factor
#
# reward += vehicle.vehicle_config["speed_reward"] * (vehicle.speed / vehicle.max_speed)
# step_info["step_reward"] = reward
#
# if vehicle.crash_vehicle:
# reward = -vehicle.vehicle_config["crash_vehicle_penalty"]
# elif vehicle.crash_object:
# reward = -vehicle.vehicle_config["crash_object_penalty"]
# elif vehicle.arrive_destination:
# reward = +vehicle.vehicle_config["success_reward"]
#
# return reward, step_info
if __name__ == "__main__":
setup_logger(True)
env = MultiAgentPGDrive(
{
"use_render": True,
"debug": False,
"manual_control": True,
"pg_world_config": {
"pstats": False
}
}
)
o = env.reset()
total_r = 0
for i in range(1, 100000):
o, r, d, info = env.step({"agent0": [-1, 0], "agent1": [0, 0], "agent2": [-1, 0], "agent3": [0, 0]})
for r_ in r.values():
total_r += r_
# o, r, d, info = env.step([0,1])
d.update({"total_r": total_r})
env.render(text=d)
if len(env.vehicles) == 0:
total_r = 0
print("Reset")
env.reset()
env.close()
```
#### File: scene_creator/object/traffic_object.py
```python
from typing import Sequence, Tuple
import numpy as np
from panda3d.bullet import BulletRigidBodyNode, BulletCylinderShape
from panda3d.core import NodePath
from pgdrive.constants import BodyName
from pgdrive.utils.asset_loader import AssetLoader
from pgdrive.utils.coordinates_shift import panda_position, panda_heading
from pgdrive.utils.element import Element
LaneIndex = Tuple[str, str, int]
class ObjectNode(BulletRigidBodyNode):
"""
Collision Properties should place here, info here can used for collision callback
"""
COST_ONCE = True # cost will give at the first time
def __init__(self, object_body_name: str):
BulletRigidBodyNode.__init__(self, object_body_name)
BulletRigidBodyNode.setPythonTag(self, object_body_name, self)
self.crashed = False
class Object(Element):
"""
Common interface for objects that appear on the road, beside vehicles.
"""
NAME = None
RADIUS = 0.25
HEIGHT = 1.2
MASS = 1
def __init__(self, lane, lane_index: LaneIndex, position: Sequence[float], heading: float = 0.):
"""
:param lane: the lane to spawn object
:param lane_index: the lane_index of the spawn point
:param position: cartesian position of object in the surface
:param heading: the angle from positive direction of horizontal axis
"""
assert self.NAME is not None, "Assign a name for this class for finding it easily"
super(Object, self).__init__()
self.position = position
self.speed = 0
self.heading = heading / np.pi * 180
self.lane_index = lane_index
self.lane = lane
self.body_node = None
@classmethod
def make_on_lane(cls, lane, lane_index: LaneIndex, longitudinal: float, lateral: float):
"""
Create an object on a given lane at a longitudinal position.
:param lane: the lane to spawn object
:param lane_index: the lane_index of the spawn point
:param longitudinal: longitudinal position along the lane
:param lateral: lateral position
:return: An object with at the specified position
"""
return cls(lane, lane_index, lane.position(longitudinal, lateral), lane.heading_at(longitudinal))
def set_static(self, static: bool = False):
mass = 0 if static else self.MASS
self.body_node.setMass(mass)
@classmethod
def type(cls):
return cls.__subclasses__()
class TrafficCone(Object):
"""Placed near the construction section to indicate that traffic is prohibited"""
NAME = BodyName.Traffic_cone
def __init__(self, lane, lane_index: LaneIndex, position: Sequence[float], heading: float = 0.):
super(TrafficCone, self).__init__(lane, lane_index, position, heading)
self.body_node = ObjectNode(self.NAME)
self.body_node.addShape(BulletCylinderShape(self.RADIUS, self.HEIGHT))
self.node_path: NodePath = NodePath(self.body_node)
self.node_path.setPos(panda_position(self.position, self.HEIGHT / 2))
self.dynamic_nodes.append(self.body_node)
self.node_path.setH(panda_heading(self.heading))
if self.render:
model = self.loader.loadModel(AssetLoader.file_path("models", "traffic_cone", "scene.gltf"))
model.setScale(0.02)
model.setPos(0, 0, -self.HEIGHT / 2)
model.reparentTo(self.node_path)
class TrafficTriangle(Object):
"""Placed behind the vehicle when it breaks down"""
NAME = BodyName.Traffic_triangle
def __init__(self, lane, lane_index: LaneIndex, position: Sequence[float], heading: float = 0.):
super(TrafficTriangle, self).__init__(lane, lane_index, position, heading)
self.body_node = ObjectNode(self.NAME)
self.body_node.addShape(BulletCylinderShape(self.RADIUS, self.HEIGHT))
self.node_path: NodePath = NodePath(self.body_node)
self.node_path.setPos(panda_position(self.position, self.HEIGHT / 2))
self.dynamic_nodes.append(self.body_node)
self.node_path.setH(panda_heading(self.heading))
if self.render:
model = self.loader.loadModel(AssetLoader.file_path("models", "warning", "warning.gltf"))
model.setScale(0.02)
model.setH(-90)
model.setPos(0, 0, -self.HEIGHT / 2)
model.reparentTo(self.node_path)
```
#### File: tests/test_env/test_build_city.py
```python
from pgdrive import PGDriveEnv
from pgdrive.scene_creator.city_map import CityMap
from pgdrive.world.pg_world import PGWorld
def _t(num_blocks):
default_config = PGDriveEnv.default_config()
world_config = default_config["pg_world_config"]
world_config.update({"use_render": False, "use_image": False, "debug": False})
world = PGWorld(config=world_config)
try:
map_config = default_config["map_config"]
map_config.update(dict(type="block_num", config=num_blocks))
map = CityMap(world, map_config)
finally:
world.close_world()
def test_build_city():
_t(num_blocks=1)
_t(num_blocks=3)
_t(num_blocks=20)
```
#### File: tests/test_env/test_naive_multi_agent.py
```python
import gym
from pgdrive import PGDriveEnv
def _a(env, action):
assert env.action_space.contains(action)
obs, reward, done, info = env.step(action)
assert env.observation_space.contains(obs)
assert isinstance(info, dict)
def _step(env):
try:
obs = env.reset()
assert env.observation_space.contains(obs)
for _ in range(5):
_a(env, env.action_space.sample())
finally:
env.close()
def test_naive_multi_agent_pgdrive():
# env = PGDriveEnv(config={"num_agents": 1})
# assert isinstance(env.action_space, gym.spaces.Box)
# _step(env)
# env.close()
env = PGDriveEnv(
config={
"num_agents": 10,
"target_vehicle_configs": {"agent{}".format(i): {
"born_longitude": i * 4
}
for i in range(10)}
}
)
assert isinstance(env.action_space, gym.spaces.Dict)
obs = env.reset()
assert isinstance(obs, dict)
a = env.action_space.sample()
assert isinstance(a, dict)
o, r, d, i = env.step(a)
assert isinstance(o, dict)
assert isinstance(r, dict)
assert isinstance(d, dict)
assert isinstance(i, dict)
_step(env)
if __name__ == '__main__':
test_naive_multi_agent_pgdrive()
```
#### File: tests/test_env/test_pgdrive_env_v2.py
```python
from pgdrive.envs.pgdrive_env_v2 import PGDriveEnvV2
from pgdrive.tests.test_env.test_pgdrive_env import _act
def test_pgdrive_env_v2():
env = PGDriveEnvV2()
assert env.observation_space.shape[0] == 120 + 19
try:
obs = env.reset()
assert env.observation_space.contains(obs)
_act(env, env.action_space.sample())
for x in [-1, 0, 1]:
env.reset()
for y in [-1, 0, 1]:
_act(env, [x, y])
finally:
env.close()
if __name__ == '__main__':
test_pgdrive_env_v2()
```
#### File: tests/test_env/test_top_down_env.py
```python
import numpy as np
from pgdrive.envs.top_down_env import TopDownSingleFramePGDriveEnv, TopDownPGDriveEnv, TopDownPGDriveEnvV2
def test_top_down_rendering():
for env in [
TopDownSingleFramePGDriveEnv(dict(environment_num=5, map="C", traffic_density=1.0)),
TopDownPGDriveEnv(dict(environment_num=5, map="C", traffic_density=1.0)),
TopDownPGDriveEnv(dict(environment_num=5, map="C", frame_stack=1, post_stack=2)),
TopDownPGDriveEnvV2(dict(environment_num=5, map="C", frame_stack=1, post_stack=2)),
]:
try:
for _ in range(5):
o = env.reset()
assert np.mean(o) > 0.0
for _ in range(10):
o, *_ = env.step([0, 1])
assert np.mean(o) > 0.0
for _ in range(10):
o, *_ = env.step([-0.05, 1])
assert np.mean(o) > 0.0
finally:
env.close()
if __name__ == "__main__":
test_top_down_rendering()
```
#### File: tests/test_funtionality/test_lidar.py
```python
from pgdrive.envs.pgdrive_env import PGDriveEnv
from pgdrive.utils import setup_logger
class TestEnv(PGDriveEnv):
def __init__(self, config):
super(TestEnv, self).__init__(config)
def test_lidar(render=False):
setup_logger(debug=True)
env = TestEnv(
{
"use_render": render,
"manual_control": render,
"environment_num": 1,
"traffic_density": 0.3,
"vehicle_config": {
"show_lidar": True
}
}
)
try:
env.reset()
detect_vehicle = False
for i in range(1, 100000):
o, r, d, info = env.step([0, 1])
if len(env.vehicle.lidar.get_surrounding_vehicles()) != 0:
detect_vehicle = True
if d:
break
assert detect_vehicle, "Lidar detection failed"
finally:
env.close()
if __name__ == "__main__":
test_lidar(render=True)
```
#### File: tests/test_funtionality/test_navigation.py
```python
from pgdrive.envs.pgdrive_env import PGDriveEnv
from pgdrive.scene_creator.map import Map, MapGenerateMethod
from pgdrive.scene_creator.vehicle_module.PID_controller import PIDController
class TestEnv(PGDriveEnv):
def __init__(self, vis):
super(TestEnv, self).__init__(
{
"environment_num": 10,
"traffic_density": 0.0,
"use_render": vis,
"start_seed": 5,
"map_config": {
Map.GENERATE_TYPE: MapGenerateMethod.BIG_BLOCK_NUM,
Map.GENERATE_CONFIG: 7,
Map.LANE_WIDTH: 3.5,
Map.LANE_NUM: 3,
}
}
)
class Target:
def __init__(self, target_lateral, target_speed):
self.lateral = target_lateral
self.speed = target_speed
def go_right(self):
self.lateral += 0.25 if self.lateral < 0.625 else 0
def go_left(self):
self.lateral -= 0.25 if self.lateral > 0.125 else 0
def faster(self):
self.speed += 10
def slower(self):
self.speed -= 10
def test_navigation(vis=False):
env = TestEnv(vis)
target = Target(0.375, 30)
o = env.reset()
if vis:
env.pg_world.accept('d', target.go_right)
env.pg_world.accept('a', target.go_left)
env.pg_world.accept('w', target.faster)
env.pg_world.accept('s', target.slower)
steering_controller = PIDController(1.6, 0.0008, 27.3)
acc_controller = PIDController(0.1, 0.001, 0.3)
steering_error = o[0] - target.lateral
steering = steering_controller.get_result(steering_error)
acc_error = env.vehicles[env.DEFAULT_AGENT].speed - target.speed
acc = acc_controller.get_result(acc_error)
for i in range(1, 1000000 if vis else 2000):
o, r, d, info = env.step([-steering, acc])
# calculate new action
steering_error = o[0] - target.lateral
steering = steering_controller.get_result(steering_error)
t_speed = target.speed if abs(o[12] - 0.5) < 0.01 else target.speed - 10
acc_error = env.vehicles[env.DEFAULT_AGENT].speed - t_speed
acc = acc_controller.get_result(acc_error)
if vis:
if i < 700:
env.render(
text={
"W": "Target speed +",
"S": "Target speed -",
"A": "Change to left lane",
"D": "Change to right lane"
}
)
if i == 500:
env.pg_world.on_screen_message.data.clear()
else:
env.render()
if d:
print("Reset")
o = env.reset()
steering_controller.reset()
steering_error = o[0] - target.lateral
steering = steering_controller.get_result(steering_error, o[11])
acc_controller.reset()
acc_error = env.vehicles[env.DEFAULT_AGENT].speed - target.speed
acc = acc_controller.get_result(acc_error)
env.close()
if __name__ == "__main__":
test_navigation(vis=True)
```
#### File: tests/vis_funtionality/vis_render_msg.py
```python
from pgdrive.envs.pgdrive_env import PGDriveEnv
from pgdrive.scene_creator.map import Map, MapGenerateMethod
from pgdrive.utils import setup_logger
setup_logger(debug=True)
class TestEnv(PGDriveEnv):
def __init__(self):
super(TestEnv, self).__init__(
{
"environment_num": 4,
"traffic_density": 0.1,
"start_seed": 3,
"pg_world_config": {
"debug": False,
},
"image_source": "mini_map",
"manual_control": True,
"use_render": True,
"use_image": False,
"steering_penalty": 0.0,
"decision_repeat": 5,
"rgb_clip": True,
"map_config": {
Map.GENERATE_TYPE: MapGenerateMethod.BIG_BLOCK_NUM,
Map.GENERATE_CONFIG: 12,
Map.LANE_WIDTH: 3.5,
Map.LANE_NUM: 3,
}
}
)
if __name__ == "__main__":
env = TestEnv()
env.reset()
for i in range(1, 100000):
o, r, d, info = env.step([0, 1])
env.render(text={"Frame": i, "Speed": env.vehicle.speed})
env.close()
``` |
{
"source": "1157788361/My_Cooperate_model",
"score": 3
} |
#### File: medseg/analysis/visual_embedding.py
```python
import umap
from sklearn.datasets import fetch_openml
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
sns.set(context="paper", style="white")
def plot_umap(x, y, num_classes=10, display_labels=None, title="data embedded into two dimensions by UMAP", save_path=None):
'''
visualize the data embedding from a single dataset with UMAP
:param x: input features or images, 2D array [N by D]
:param y: target labels for display: 1D array [N]
:param num_classes: how many classes in total, e.g. MNIST: has 10 classes
:param display_labels: : the labels shown in its colorbar for reference
:return:
'''
reducer = umap.UMAP(random_state=42)
print('load data:', x.shape)
embedding = reducer.fit_transform(x)
fig, ax = plt.subplots(figsize=(12, 10))
color = y.astype(int)
plt.scatter(
embedding[:, 0], embedding[:, 1], c=color, cmap="Spectral"
)
plt.setp(ax, xticks=[], yticks=[])
cbar = plt.colorbar(boundaries=np.arange(num_classes + 1) - 0.5)
cbar.set_ticks(np.arange(num_classes))
classes = [str(i) for i in range(num_classes)]
# set colorbar font size
ticklabs = cbar.ax.get_yticklabels()
cbar.ax.set_yticklabels(ticklabs, fontsize=14)
if display_labels is None:
cbar.set_ticklabels(classes)
else:
cbar.set_ticklabels(display_labels)
plt.title(title, fontsize=18)
if not save_path is None:
plt.savefig(save_path)
plt.show()
def plot_multi_dataset_umap(dataArraysList, dataset_labels=None, display_labels=None):
'''
plot umap for multiple datasets
:param dataArraysList: a list of 2D arrays, each element contains all images from one dataset
:param dataset_labels: the labels for identifying different dataset
:param display_labels: the labels for displaying on the plots.
:return: concatenated image features 2D array dim=[all_samples in the datasets]*features, labels: 1D array: dim=[all_samples in the datasets],
'''
if dataset_labels == None:
labels = list(np.arange(len(dataArraysList)))
else:
labels = dataset_labels
total_dataset_features = []
total_dataset_labels = []
for dataset_id, dataset in enumerate(dataArraysList):
print('load {} [dataset_size*num_features] from dataset {}'.format(dataset.shape, dataset_id))
dataset_marker = labels[dataset_id]
dataset_label_array = np.ones((len(dataset))) * dataset_marker
total_dataset_features.append(dataset)
total_dataset_labels.append(dataset_label_array)
if len(dataArraysList) >= 2:
input = np.vstack(total_dataset_features)
label = np.concatenate(total_dataset_labels, axis=0)
else:
input = total_dataset_features[0]
label = total_dataset_labels[0]
assert len(input.shape) == 2, 'input features must be 2D array, but got {}'.format(input.shape)
assert len(label.shape) == 1, 'labels must be 1D array, but got {}'.format(label.shape)
print('X:', input.shape)
print('Y:', label.shape)
plot_umap(x=input, y=label, num_classes=len(dataArraysList), display_labels=display_labels)
return total_dataset_features, total_dataset_labels
if __name__ == '__main__':
import numpy as np
y = np.arange(0, 1000)
y[:50] = 0
y[50:] = 1
X = np.random.random((1000, 100))
plot_umap(X, y, num_classes=2, save_path='./output/umap.png')
```
#### File: medseg/common_utils/load_args.py
```python
import json
class Params():
"""Class that loads hyperparameters from a json file.
Example:
```
params = Params(json_path)
print(params.learning_rate)
params.learning_rate = 0.5 # change the value of learning_rate in params
```
"""
def __init__(self, json_path):
with open(json_path) as f:
params = json.load(f)
self.__dict__.update(params)
def save(self, json_path):
with open(json_path, 'w') as f:
json.dump(self.__dict__, f, indent=4)
def update(self, json_path):
"""Loads parameters from json file"""
with open(json_path) as f:
params = json.load(f)
self.__dict__.update(params)
@property
def dict(self):
"""Gives dict-like access to Params instance by `params.dict['learning_rate']"""
return self.__dict__
def save_dict_to_json(d, json_path):
"""Saves dict of floats in json file
Args:
d: (dict) of float-castable values (np.float, int, float, etc.)
json_path: (string) path to json file
"""
with open(json_path, 'w') as f:
# We need to convert the values to float for json (it doesn't accept np.array, np.float, )
d = {k: float(v) for k, v in d.items()}
json.dump(d, f, indent=4)
if __name__ == '__main__':
params = Params('/vol/medic01/users/cc215/Dropbox/projects/DeformADA/configs/gat_loss.json')
print(params.dict)
```
#### File: medseg/common_utils/uncertainty.py
```python
import numpy as np
def cal_entropy_maps(pred_probs, eps=1e-7, threhold=0., use_max=False):
'''
calculate entropy maps from probs which is the output from neural networks after the softmax layer.
eps is used to prevent np.log2(zero). Note that this function is working on one image only.
:param 2D soft_max_prob_maps: C_classes*H*W
:param threshold: float uncertainty below this value will be filtered out.
:param use_max: if use max, then find the maximum prob over classes and use the value to cal entropy,
other wise calculate entropy across each channel and then averaged them.
:return: A 2D map H*W with values >0
'''
assert len(pred_probs.shape) == 3, 'only support input of three dimension [Channel, H, W]'
if use_max:
ax_probs = np.amax(pred_probs, axis=0) # using maxium prob to cal entropy
entropy = (-ax_probs * np.log2(ax_probs + eps))
else:
entropy = (-pred_probs * np.log2(pred_probs + eps)).sum(axis=0)
entropy = np.nan_to_num(entropy)
entropy[entropy < threhold] = 0.
return entropy
def cal_batch_entropy_maps(pred_probs, eps=1e-7, threhold=0., use_max=False):
'''
calculate entropy maps from probs which is the output from neural networks after the softmax layer.
eps is used to prevent np.log2(zero). Note that this function is working on batches of image.
:param 3D soft_max_prob_maps: N*C_classes*H*W
:param threshold: float uncertainty below this value will be filtered out.
:param use_max: if use max, then find the maximum prob over classes and use the value to cal entropy,
other wise calculate entropy across each channel and then averaged them.
:return: A 3D map [ N*H*W] with values >0
'''
assert len(pred_probs.shape) == 4, 'only support input of four dimension [N, Channel, H, W]'
if use_max:
ax_probs = np.amax(pred_probs, axis=1) # using maxium prob to cal entropy
entropy = (-ax_probs * np.log2(ax_probs + eps))
else:
entropy = (-pred_probs * np.log2(pred_probs + eps)).sum(axis=1)
entropy = np.nan_to_num(entropy)
entropy[entropy < threhold] = 0.
if len(entropy.shape) < 3:
print('check dimensionality of the output')
raise ValueError
return entropy
```
#### File: 1157788361/My_Cooperate_model/try_nn_model.py
```python
import torch.nn as nn
import torch
#TODO:下次学 model 的具体事情,包括 梯度等细节。
class my_net(nn.Module):
def __init__(self):
super(my_net, self).__init__()
```
#### File: 1157788361/My_Cooperate_model/try_requires_grad.py
```python
# x.grad: tensor([ 28., 224.])
# x1.grad: tensor([ 28., 224.])
# y5.backward(torch.ones(y3.shape))
# print('grad')
# print('x.grad:',x.grad) # 只有叶子节点有 grad 值。
# print('x1.grad:',x1.grad)
# print('y1.grad:', y1.grad) # 都是 x 产生的复合函数。即都是叶子节点。
# print('y2.grad:', y2.grad) # 非叶子节点grad计算完之后自动清空
# print('y3.grad:', y3.grad)
# print('y4.grad:', y4.grad)
""" 研究 将 原计算图中叶子节点剥离出来并重新计算,是否对原 计算图的 反向传播有影响。"""
""" PyTorch使用的是动态图,它的计算图在每次前向传播时都是从头开始构建,所以它能够使用Python控制语句(如for、if等)根据需求创建计算图。 """
import torch
# 简化网络,没有 detach 影响
x = torch.tensor([1.0,2.0],requires_grad=True)
x1 = torch.tensor([3.0,4.0],requires_grad=True)
y1 = x**2
y2 = y1*2
y3 = y2*2
y4 = y1 +y3+y2
y5 = y4**2 + x1
print('剥离前的 计算图状态')
print(x, x.requires_grad,x.grad)
print(x1, x1.requires_grad,x1.grad)
print(y1, y1.requires_grad,y1.grad) # 非叶子节点grad计算完之后自动清空
print(y2, y2.requires_grad,y2.grad)
print(y3, y3.requires_grad,y3.grad)
print(y4, y4.requires_grad,y4.grad)
# 如果 y5.backward(torch.ones(y5.shape)) ,输出:
# tensor([1., 2.], requires_grad=True) True tensor([ 196., 1568.])
# tensor([3., 4.], requires_grad=True) True tensor([1., 1.])
# tensor([1., 4.], grad_fn=<PowBackward0>) True None
# tensor([2., 8.], grad_fn=<MulBackward0>) True None
# tensor([ 4., 16.], grad_fn=<MulBackward0>) True None
# tensor([ 7., 28.], grad_fn=<AddBackward0>) True None
y_1 = y3
# 将 叶子节点从 计算图中剥离出来。
p_list = [x,x1,y1,y2,y3,y4,y5]
for p in p_list:
if p.grad is not None: # 计算的时候,只有 叶子节点的 p.grad 为 not None。
# 当我们再训练网络的时候可能希望保持一部分的网络参数不变,只对其中一部分的参数进行调整;
# 或者只 训练部分分支网络,并不让其梯度对主网络的梯度造成影响,
# 这时候我们就需要使用detach()函数来切断一些分支的反向传播
p.grad.detach_()
p.grad.zero_()
# 剥离后的各个计算图的节点状态:
# 无论是否经历:y5.backward(torch.ones(y5.shape)),输出结果不变。即 grad_fn 自动记录 中间节点的 计算过程。即 动态构建 计算图 。
print('detach_and_zero_grad')
print('x.grad:',x,x.grad)
print('x1.grad:',x1,x1.grad)
print('y1.grad:', y1,y1.grad) # 都是 x 产生的复合函数。即都是叶子节点。
print('y2.grad:', y2,y2.grad)
print('y3.grad:', y3,y3.grad)
print('y4.grad:', y4,y4.grad)
print('y5.grad:', y5,y5.grad)
# 输出:
# detach_and_zero_grad
# x.grad: tensor([1., 2.], requires_grad=True) None
# x1.grad: tensor([3., 4.], requires_grad=True) None
# y1.grad: tensor([1., 4.], grad_fn=<PowBackward0>) None
# y2.grad: tensor([2., 8.], grad_fn=<MulBackward0>) None
# y3.grad: tensor([ 4., 16.], grad_fn=<MulBackward0>) None
# y4.grad: tensor([ 7., 28.], grad_fn=<AddBackward0>) None
# y5.grad: tensor([ 52., 788.], grad_fn=<AddBackward0>) None
# 可以看到剥离后的 点中,计算图依然存在。 只是梯度值消失。 梯度值依然可以根据 自身值 与 计算图 进行梯度计算。
# x = torch.tensor([1.0,2.0],requires_grad=True)
# x1 = torch.tensor([3.0,4.0],requires_grad=True) # 如果直接用 上计算图使用过的 叶子节点,则会将 原计算图的 叶子节点破坏掉,导致 backward 叶子节点的 grad == None,虽然进行了p.grad.detach_() p.grad.zero_()
# x_det = torch.tensor([1.0,2.0],requires_grad=True)
# x1_det = torch.tensor([3.0,4.0],requires_grad=True)
# x = x_det
# x1 = x1_det # 名字相同都会出现 x.grad == None
"""实验 当 叶子节点变量名不同,中间变量名相同,不会影响 backward 的结果"""
# 只有这时(叶子节点的变量名不同),才会在 y5.backward 时,回溯到 已经记录的 x,x1 的叶子节点上。
# x_det = torch.tensor([1.0,2.0],requires_grad=True)
# x1_det = torch.tensor([3.0,4.0],requires_grad=True)
# # 与上一个计算图的计算方式不一样
# y1 = x_det*2
# y2 = y1**2
# y3 = y2*2
# y4 = y1 +y3+y2
# y6 = y4**2 + x1_det*2
#
# y_2 = y3
#
# # 在将计算图中叶子节点剥离后,仍然可以反向传播。即 结果 y5 的 grad_fn 保存了 其后向传播的函数,与 可追溯的中间计算过程(参数名可被其他计算图利用,即y1,y2,y3可以多图使用。),
# # 和 叶子节点的名字(x,x1)。(叶子节点被重置 或者 参与其他计算图,会导致后向传播无法继续。) 应该是 在同一存储空间,产生冲突。 考虑 clean_image_l.detach().clone()
# y5.backward(torch.ones(y5.shape))
# print('y5 在 detach_and_zero 后,依然对原结果进行 backward')
# print('grad')
# print('x.grad:',x,x.grad) # x.grad: tensor([1., 2.], requires_grad=True) tensor([ 196., 1568.])
# print('x1.grad:',x1,x1.grad) # x1.grad: tensor([3., 4.], requires_grad=True) tensor([1., 1.])
# print('y1.grad:', y1,y1.grad) # 都是 x 产生的复合函数。即都是叶子节点。
# print('y2.grad:', y2,y2.grad)
# print('y3.grad:', y3,y3.grad)
# print('y4.grad:', y4,y4.grad)
# 从结果可以看到 上下两个的
#
# y6.backward(torch.ones(y5.shape))
# print('grad')
# print('x_det.grad:',x_det,x_det.grad) # x_det.grad: tensor([1., 2.], requires_grad=True) tensor([ 728., 5200.])
# print('x1_det.grad:',x1_det,x1_det.grad) # x1_det.grad: tensor([3., 4.], requires_grad=True) tensor([2., 2.])
# print('y1.grad:', y1,y1.grad) # 都是 x 产生的复合函数。即都是叶子节点。
# print('y2.grad:',y2, y2.grad)
# print('y3.grad:', y3,y3.grad)
# print('y4.grad:', y4,y4.grad)
#
# print('y_1 == y_2 ,{}'.format(y_2==y_1))
# print('y3 == y3 ,{}'.format(y3==y3))
# # y6.backward(torch.ones(y3.shape)) 同样会计入 重复图的后向传播 (Trying to backward through the graph a second time)
"""使用 clone 操作,替换原 x1,x 等 叶子节点 ,即换一个存储空间。"""
x_ = x.detach().clone() # 默认 required_grad 为 false
x_1 = x1.detach().clone() # 可以通过xxx.requires_grad_()将默认的Flase修改为True
x_.requires_grad_()
x_1.requires_grad_()
y1 = x_*2
y2 = y1**2
y3 = y2*2
y4 = y1 +y3+y2
y6 = y4**2 + x_1*2
y_2 = y3
# 在将计算图中叶子节点剥离后,仍然可以反向传播。即 结果 y5 的 grad_fn 保存了 其后向传播的函数,与 可追溯的中间计算过程(参数名可被其他计算图利用,即y1,y2,y3可以多图使用。),
# 和 叶子节点的名字(x,x1)。(叶子节点被重置 或者 参与其他计算图,会导致后向传播无法继续。) 应该是 在同一存储空间,产生冲突。clean_image_l.detach().clone()
y5.backward(torch.ones(y5.shape))
print('y5 在 detach_and_zero 后,依然对原结果进行 backward')
print('grad')
print('x.grad:',x,x.grad) # x.grad: tensor([1., 2.], requires_grad=True) tensor([ 196., 1568.])
print('x1.grad:',x1,x1.grad) # x1.grad: tensor([3., 4.], requires_grad=True) tensor([1., 1.])
print('y1.grad:', y1,y1.grad)
print('y2.grad:', y2,y2.grad)
print('y3.grad:', y3,y3.grad)
print('y4.grad:', y4,y4.grad)
y6.backward(torch.ones(y6.shape))
print('y5 在 detach_and_zero 后,依然对原结果进行 backward')
print('grad')
print('x_.grad:',x_,x_.grad) # x_.grad: tensor([1., 2.], requires_grad=True) tensor([ 728., 5200.])
print('x_1.grad:',x_1,x_1.grad) # x_1.grad: tensor([3., 4.], requires_grad=True) tensor([2., 2.])
print('y1.grad:', y1,y1.grad)
print('y2.grad:', y2,y2.grad)
print('y3.grad:', y3,y3.grad)
print('y4.grad:', y4,y4.grad)
"""总结如下 :
1. 当使用 detach 将中间节点从计算图中脱离时,后续的计算中,计算图不在记录其 grad_fn ,即为 None ,也即 loss.backward 时,当追溯到 与其相关的中间计算时,此节点 会自动被忽略。不再计算其梯度。
2. 当建立好一个计算图后,如果,将其叶子节点全部 detach 后,再利用叶子节点重新建立新的计算图时, 只要不改变 叶子节点在 detach 前的 存储位置,如利用 detach().clone() ,前一个计算图依然可以进行 loss.backward。并计算梯度。
3. 每一个计算图的建立,中间节点都是零时创建,尽管使用相同参数名。存储位置不重复利用。这也是虽然创建了几个新的计算图,但是每一个都可以进行自己的 loss.backward 回溯。"""
"""在PyTorch中计算图的特点可总结如下:
1.autograd根据用户对variable的操作构建其计算图。对变量的操作抽象为Function。
2.对于那些不是任何函数(Function)的输出,由用户创建的节点称为叶子节点,叶子节点的grad_fn为None。叶子节点中需要求导的variable,具有AccumulateGrad标识,因其梯度是累加的。
3.variable默认是不需要求导的,即requires_grad属性默认为False,如果某一个节点requires_grad被设置为True,那么所有依赖它的节点requires_grad都为True。(如: a = b +c ,则为 a 依赖 b 和 c , 即计算图中,一个节点requires_grad 为 True,所有建立在其之上的运算,requires_grad 都为 True.)
4.variable的volatile属性默认为False,如果某一个variable的volatile属性被设为True,那么所有依赖它的节点volatile属性都为True。volatile属性为True的节点不会求导,volatile的优先级比requires_grad高。
5.多次反向传播时,梯度是累加的。反向传播的中间缓存会被清空,为进行多次反向传播需指定retain_graph=True来保存这些缓存。
6.非叶子节点的梯度计算完之后即被清空,可以使用autograd.grad或hook技术获取非叶子节点的值。
7.variable的grad与data形状一致,应避免直接修改variable.data,因为对data的直接操作无法利用autograd进行反向传播
8.反向传播函数backward的参数grad_variables可以看成链式求导的中间结果,如果是标量,可以省略,默认为1
9.PyTorch采用动态图设计,可以很方便地查看中间层的输出,动态的设计计算图结构。
"""
""" loss.backward(grad_loss ) 的情况 """
def f(x):
'''计算y'''
y = x**2 * torch.exp(x)
return y
x = torch.randn(3,4, requires_grad = True)
y = f(x)
print(y)
# 结果:
# tensor([[1.6681e-01, 2.9650e+00, 9.1634e+00, 4.9143e-01],
# [7.4560e-02, 3.3950e+00, 1.8273e+01, 2.8271e-01],
# [7.8892e+00, 4.2957e-04, 4.1004e-01, 1.2708e-02]], grad_fn=<MulBackward0>)
# y.backward() 如果以此执行 backward(),无 x.grad 结果。 即 backward() 函数中,参数 grad_variables 出错。
# 参数 grad_variables 应为 结果值 对 当前值 的 导数。 相当于 dy/dy。 结果为 1 ,但形状为 y.size()
y.backward(torch.ones(y.size()),retain_graph=True) # gradient形状与y一致
# t.ones(y.size())相当于grad_variables:形状与variable一致,对于y.backward(),grad_variables相当于链式法则 𝑑𝑧𝑑𝑥=𝑑𝑧𝑑𝑦×𝑑𝑦𝑑𝑥 中的 𝐝𝐳𝐝𝐲 。
# 不能单独运行两次 RuntimeError: Trying to backward through the graph a second time, but the saved intermediate results have already been freed. Specify retain_graph=True when calling backward the first time.
print(x.grad)
# 如果 :
z = torch.sum(y) # 单独的 sum函数 不行,需要 使用 torch.sum() 因为 还有 grad_fn 等后向传播函数。
z.backward() # 此时的 grad_variables =dz/dz ,因为 z 是标量, dz/dz =1 ,可以省略不写。
print(x.grad)
""" 计算图 的 研究。"""
import torch as t
x = t.ones(1)
b = t.rand(1, requires_grad = True)
w = t.rand(1, requires_grad = True)
y = (w*3) * x**2 # 等价于y=w.mul(x)
# y.requires_grad = False
# RuntimeError: you can only change requires_grad flags of leaf variables.
z = y + b # 等价于z=y.add(b)
y.backward()
# y.grad 中间节点 y 无 grad
print(w.grad)
print('x.grad',x.grad)
print(b.grad)
print('x.requires_grad:',x.requires_grad)
print('b.requires_grad:',b.requires_grad)
print('w.requires_grad:',w.requires_grad)
print('y.requires_grad:',y.requires_grad) # 虽然未指定 y.requires_grad为True,但因为 y.grad 的计算 需要 w ,而 w.requires_grad=True
""" next_functions保存grad_fn的输入,是一个tuple,tuple的元素也是Function
第一个是y,它是乘法(mul)的输出,所以对应的反向传播函数y.grad_fn是MulBackward
第二个是b,它是叶子节点,由用户创建,grad_fn为None,但是有
"""
print('z.grad_fn.next_functions:',z.grad_fn.next_functions) # 计算图中 对应的 后向传播 无环有向图。
# variable的grad_fn对应着和图中的function相对应
print('z.grad_fn.next_functions[0][0] == y.grad_fn :',z.grad_fn.next_functions[0][0] == y.grad_fn)
print('z.grad_fn.next_functions[0][0].next_functions:',z.grad_fn.next_functions[0][0].next_functions)
print('y.grad_fn.next_functions:',y.grad_fn.next_functions)
""" 关闭自动求导功能。
有些时候我们可能不希望autograd对tensor求导。认为求导需要缓存许多中间结构,增加额外的内存/显存开销,那么我们可以关闭自动求导。对于不需要反向传播的情景(如inference,即测试推理时),
关闭自动求导可实现一定程度的速度提升,并节省约一半显存,因其不需要分配空间计算梯度。
"""
with t.no_grad():
x = t.ones(1)
w = t.rand(1, requires_grad = True)
y = x * w
# y依赖于w和x,虽然w.requires_grad = True,但是y的requires_grad依旧为False
print('x.requires_grad:',x.requires_grad)
print('w.requires_grad:',w.requires_grad)
print('y.requires_grad:',y.requires_grad) # 可以看到 # y.requires_grad: False
# 或者 通过 t.set_grad_enabled(False) 设置 ,并通过 t.set_grad_enabled(True) 恢复。
"""只要你对需要求导的叶子张量使用了这些操作,马上就会报错"""
"""所谓动态图,就是每次当我们搭建完一个计算图,然后在反向传播结束之后,整个计算图就在内存中被释放了。如果想再次使用的话,必须从头再搭一遍,"""
``` |
{
"source": "1159186649/Raspberry-Car",
"score": 3
} |
#### File: 1159186649/Raspberry-Car/Camclose.py
```python
import os
from time import sleep
import time
def Close():
f=os.popen("ps -ef | grep python")
txt=f.readlines()
for line in txt:
colum=line.split()
pid=colum[1]
name=colum[-1]
if name=='Main.py':
cmd = "sudo kill -9 %d" % int(pid)
os.system(cmd)
if __name__ == '__main__':
Close()
sleep(10)
print('hello')
```
#### File: 1159186649/Raspberry-Car/Main.py
```python
import os
from time import sleep
import RPi.GPIO as GPIO
import pigpio
import time
from flask import Flask, render_template, request, Response
from camera_pi import Camera
from picamera import PiCamera
import camera_pi
import threading
import sys
import random
app = Flask(__name__)
global panServoAngle
global tiltServoAngle
global flag
global xmotor
global ymotor
global flag1
global derail
derail=0
xmin=-30
xmax=150
ymin=-20
ymax=90
xmotor=60
ymotor=-20
movestep=10
flag=1
flag1=1
panServoAngle = 90
tiltServoAngle = 90
panPin = 17
tiltPin = 18
@app.route('/')
def index():
"""Video streaming home page."""
return render_template('index.html')
def gen(camera):
global flag
"""Video streaming generator function."""
while flag==1:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(Camera()),
mimetype='multipart/x-mixed-replace; boundary=frame')
def setmotor(servo,angle):
if servo==17:
os.system('sudo python3 angleServoCtrl.py ' + str(servo) + " " + str(angle))
elif servo==18:
os.system('sudo python3 angleServoCtrl.py ' + str(servo) + " " + str(angle))
@app.route("/panup")
def panup():
global ymotor
if ymotor+movestep<=ymax:
ymotor=ymotor+movestep
setmotor(tiltPin,ymotor)
return render_template("index.html")
@app.route("/pandown")
def pandown():
global ymotor
if ymotor-movestep>=ymin:
ymotor=ymotor-movestep
setmotor(tiltPin,ymotor)
return render_template("index.html")
@app.route("/panleft")
def panleft():
global xmotor
if xmotor+movestep<=xmax:
xmotor=xmotor+movestep
setmotor(panPin,xmotor)
return render_template("index.html")
@app.route("/panright")
def panright():
global xmotor
if xmotor-movestep>=xmin:
xmotor=xmotor-movestep
setmotor(panPin,xmotor)
return render_template("index.html")
@app.route("/panpause")
def panpaues():
return render_template("index.html")
def init():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7,GPIO.OUT)
GPIO.setup(11,GPIO.OUT)
GPIO.setup(13,GPIO.OUT)
GPIO.setup(15,GPIO.OUT)
def gobackward(tf=3):
init()
GPIO.output(7,0)
GPIO.output(11,1)
GPIO.output(13,1)
GPIO.output(15,0)
time.sleep(tf)
GPIO.cleanup()
def goforward(tf=3):
init()
GPIO.output(7,1)
GPIO.output(11,0)
GPIO.output(13,0)
GPIO.output(15,1)
time.sleep(tf)
GPIO.cleanup()
def goleft(tf=0.5):
init()
GPIO.output(7,1)
GPIO.output(11,0)
GPIO.output(13,1)
GPIO.output(15,0)
time.sleep(tf)
GPIO.cleanup()
def goright(tf=0.5):
init()
GPIO.output(7,0)
GPIO.output(11,1)
GPIO.output(13,0)
GPIO.output(15,1)
time.sleep(tf)
GPIO.cleanup()
def distance(measure='cm'):
try:
GPIO.setmode(GPIO.BOARD)
GPIO.setup(12,GPIO.OUT)
GPIO.setup(16,GPIO.IN)
GPIO.output(12,True)
time.sleep(0.000015)
GPIO.output(12,False)
while GPIO.input(16) == 0:
noisg = time.time()
while GPIO.input(16) == 1:
sig = time.time()
tl=sig - noisg
if measure == 'cm':
distance = tl/0.000058
elif measure == 'in':
distance = tl/0.000148
else:
print("improper choice of measure: in or cm")
distance = None
GPIO.cleanup()
return distance
except:
distance = 100
GPIO.cleanup()
return distance
def check_front():
init()
dist = distance()
if dist < 20:
init()
gobackward(2)
dist = distance()
if dist < 20:
init()
goleft(0.5)
init()
gobackward(2)
dist = distance()
if dist < 20:
sys.exit
@app.route("/backward")
def backward():
gobackward(3)
return render_template("index.html")
@app.route("/forward")
def forward():
goforward(3)
return render_template("index.html")
@app.route("/left")
def left():
goleft(0.5)
return render_template("index.html")
@app.route("/right")
def right():
goright(0.5)
return render_template("index.html")
def autonomy():
global derail
derail=1
while derail == 1:
tf = 0.060
x = random.randrange(0,3)
if x == 0:
for y in range(50):
check_front()
goforward(tf)
elif x == 1:
goleft(0.5)
for y in range(50):
check_front()
goforward(tf)
elif x == 2:
goright(0.5)
for y in range(50):
check_front()
goforward(tf)
time.sleep(0.5)
return render_template("index.html")
@app.route("/shutdownrun")
def shutdownrun():
global derail
derail = 0
return render_template("index.html")
def getimage():
camera = PiCamera()
camera.resolution = (1024,768)#摄像界面为1024*768
camera.start_preview()#开始摄像
time.sleep(1)
camera.capture('faceimage.jpg')#拍照并保存
time.sleep(1)
camera.close()
@app.route("/startre")
def startre():
global flag
global flag1
while True:
time.sleep(40)
flag=0
time.sleep(2)
getimage()
time.sleep(1)
flag=1
os.system("sudo python3 recognition.py")
if flag1==0:
flag1=1
return render_template("index.html")
return render_template("index.html")
@app.route("/stopre")
def stopre():
global flag
global flag1
flag=1
flag1=0
return render_template("index.html")
if __name__ == '__main__':
setmotor(tiltPin,ymotor)
setmotor(panPin,xmotor)
app.run(host='0.0.0.0', port =80, debug=True, threaded=True)
```
#### File: 1159186649/Raspberry-Car/recognition.py
```python
from aip import AipFace
from picamera import PiCamera
import urllib.request
import RPi.GPIO as GPIO
import base64
import time
import Main
#百度智能云人脸识别id、key
APP_ID = '*****'
API_KEY = '**********'
SECRET_KEY ='**********'
client = AipFace(APP_ID, API_KEY, SECRET_KEY)#创建一个客户端用以访问百度云
#图像编码方式
IMAGE_TYPE='BASE64'
camera = PiCamera()#定义一个摄像头对象
#用户组
GROUP = 'usr1'
#照相函数
# def getimage():
# camera.resolution = (1024,768)#摄像界面为1024*768
# camera.start_preview()#开始摄像
# time.sleep(1)
# camera.capture('faceimage.jpg')#拍照并保存
# time.sleep(1)
# camera.close()
# 对图片的格式进行转换
def transimage():
f = open('faceimage.jpg','rb')
img = base64.b64encode(f.read())
return img
#上传到百度api进行人脸检测
def go_api(image):
result = client.search(str(image, 'utf-8'), IMAGE_TYPE, GROUP);#在百度云人脸库中寻找有没有匹配的人脸
if result['error_msg'] == 'SUCCESS':#如果成功了
name = result['result']['user_list'][0]['user_id']#获取名字
score = result['result']['user_list'][0]['score']#获取相似度
if score < 80:#如果相似度大于80
print("对不起,我不认识你!")
name = 'Unknow'
return 0
curren_time = time.asctime(time.localtime(time.time()))#获取当前时间
#将人员出入的记录保存到Log.txt中
f = open('Log.txt','a')
f.write("Person: " + name + " " + "Time:" + str(curren_time)+'\n')
f.close()
return 1
if result['error_msg'] == 'pic not has face':
# print('检测不到人脸')
time.sleep(2)
return 1
else:
print(result['error_code']+' ' + result['error_code'])
return 0
#主函数
if __name__ == '__main__':
print('准备')
# getimage()#拍照
img = transimage()#转换照片格式
res = go_api(img)#将转换了格式的图片上传到百度云
if res == 1:#是人脸库中的人
print("正常")
else:
print("出现陌生人")
print('等40s进入下一轮')
time.sleep(1)
``` |
{
"source": "115GOTYOUR6/battlefield-stats",
"score": 3
} |
#### File: battlefield-stats/battlefield/directories.py
```python
from os import mkdir
from os import rmdir
def create_dir(direct):
"""Create the given directory in the present working directory.
The present working directory is taken as the location of invokation.
parameters:
- direct: A string, the file path to be created
returns:
- files: A dictionary containg information about the file path created
the structure is as follows:
|path (direct parameter eg. "some/thing")
|0 (The first file in the path. A 0 is stored if the
|1 creation of the file failed, a 1 on success.)
...
raises:
- TypeError: When the given parameter is not of the expected type
"""
if not isinstance(direct, str):
raise TypeError("The given parameter is not a sting")
if direct[-1] == '/':
direct = direct[:-1:]
files = {}
files["path"] = direct
path = ""
for ind, val in enumerate(direct.split('/')):
try:
if val == '':
val = '/'
path = path + val
mkdir(path)
files[ind] = 1
except FileExistsError:
files[ind] = 0
path = path + '/'
return files
def remove_dir(files):
"""Remove the files marked as created by create_pwd
parameters:
- files: The dictionary returned by create_pwd containing the files
created. The structure is as follows:
|path (direct parameter eg. "some/thing")
|0 (The first file in the path. A 0 is stored if the
|1 creation of the file failed, a 1 on success.)
...
raises:
- TypeError: When the given parameter does not meet the expected type
- ValueError: When the 'path' key of the dictionary, files, is not
present.
"""
if not isinstance(files, dict):
raise TypeError("The given parameter is not a dictionary")
if "path" not in files.keys():
raise ValueError("The directory created is not recorded in the"
" given dictionary")
for i in reversed(range(len(files.keys()) - 1)):
if not isinstance(files[i], int):
raise TypeError("The {i} entry in the given parameter is not"
" an integer".format(i=i))
if files[i] == 1:
try:
rm_file = "/".join(files['path'].split("/")[:i + 1:])
if rm_file == "/":
pass
elif rm_file == "/home":
pass
else:
rmdir(rm_file)
except OSError:
raise OSError("A directory that was marked as created is not"
" empty, {}".format(rm_file))
```
#### File: battlefield-stats/battlefield/weapons.py
```python
from re import sub
from battlefield import time_played
def create(data, prof):
"""The ouput of the scrub method is converted into a dictionary.
The intended format of the dictionary returned is:
|prof name
-|stat
--|weapon class
---|weapon name
----|value
parameters:
- data: The list obtained from the scrub method
- prof: The name of the profile the stats are for as a string
returns:
- s_c_dict: A dictionary containing the information in the data list
in the stats_class format
raises:
ValueError: Raised when:
- the number of elements in the data list is not a
multiple of the known number of entries there are
for each weapon
- one of the stats entries in the data list is not
of a type that is known to be present.
"""
stats = ["kills", "kpm", "time played", "shots fired", "shots hit",
"accuracy", "headshots"]
# this variable represents the total number of elements in the list that
# belong to a single weapon
ent = len(stats) + 2
s_c_dict = {}
s_c_dict[prof] = {}
for i in stats:
s_c_dict[prof][i] = {}
if len(data) % ent != 0:
raise ValueError("The number of entries in the scrubbed data was not a"
" multiple of {}. This may be the result of the"
" webpage changing its format or an unforseen entry"
" in a weapon stat field.".format(ent))
# len(data)/ent gives the number of weapons present in the data
for i in range(int(len(data)/ent)):
for stat in stats:
# if weapon class not in the stat part of dict make empty dict
if data[i*ent + 1] not in s_c_dict[prof][stat].keys():
s_c_dict[prof][stat][data[i*ent + 1]] = {}
try:
if stat in ["kpm", "accuracy"]:
s_c_dict[prof][stat][data[i*ent + 1]][data[i*ent]] = (
float(sub(",", "", data[i*ent + 2
+ stats.index(stat)])))
elif (stat
in ["kills", "shots fired", "shots hit", "headshots"]):
s_c_dict[prof][stat][data[i*ent + 1]][data[i*ent]] = (
int(sub(",", "", data[i*ent + 2 + stats.index(stat)])))
else:
s_c_dict[prof][stat][data[i*ent + 1]][data[i*ent]] = (
time_played.hours(data[i*ent + 2 + stats.index(stat)]))
except ValueError:
raise ValueError("One of the stats read in is not of the type"
" expected.")
return s_c_dict
def fillout(s_c_dict):
"""Adds 0 to weapon entries that exist in other profiles.
The methods looks at all the weapon entries under a profile and upon
finding one that exists in another profile an entry for that weapon is
created and given the value 0. This was created as weapons that have not
been used by a player will result in the weapon not appearing on the
battlefield tracker website.
parameters:
- s_c_dict: the s_c_dict to be filled out
returns:
raises:
"""
if len(s_c_dict) < 2:
raise ValueError("There are not 2 or more profiles present in the"
" given s_c_dict")
profs = list(s_c_dict.keys())
mast_prof = profs[0]
del profs[0]
for prof in profs:
for stat in s_c_dict[prof]:
if stat not in s_c_dict[mast_prof].keys():
s_c_dict[mast_prof][stat] = {}
for w_class in s_c_dict[prof][stat].keys():
if w_class not in s_c_dict[mast_prof][stat].keys():
s_c_dict[mast_prof][stat][w_class] = {}
for weap in s_c_dict[prof][stat][w_class].keys():
if weap not in s_c_dict[mast_prof][stat][w_class].keys():
s_c_dict[mast_prof][stat][w_class][weap] = 0
for prof in profs:
for stat in s_c_dict[mast_prof].keys():
if stat not in s_c_dict[prof].keys():
s_c_dict[prof][stat] = {}
for w_class in s_c_dict[mast_prof][stat].keys():
if w_class not in s_c_dict[prof][stat].keys():
s_c_dict[prof][stat][w_class] = {}
for weap in s_c_dict[mast_prof][stat][w_class].keys():
if weap not in s_c_dict[prof][stat][w_class].keys():
s_c_dict[prof][stat][w_class][weap] = 0
def add_hpk(stats_dict):
"""Add headshots per kill stat to given s_c dict.
parameters:
-stats_dict: The s_c dict to store the stat calculations to
returns:
raises:
- ValueError: Various empty stats_dict keys that should have keys
present or stats that don't have the same weapon class
or weapons present
"""
for prof in stats_dict.keys():
if ("headshots" not in stats_dict[prof].keys()
or "kills" not in stats_dict[prof].keys()):
raise ValueError("The kills or headshots stats are not present")
if len(stats_dict[prof]["headshots"].keys()) == 0:
raise ValueError("There are no weapon classes present in the"
" headshots stat")
if (stats_dict[prof]["headshots"].keys()
!= stats_dict[prof]["kills"].keys()):
raise ValueError("headshots and kills do not have the"
" same weapon classes present in the"
" dictionary")
stats_dict[prof]["hpk"] = {}
for w_class in stats_dict[prof]["headshots"].keys():
if len(stats_dict[prof]["headshots"][w_class].keys()) == 0:
del stats_dict[prof]["hpk"]
raise ValueError("There are no weapons in the {} class for"
" headshots"
.format(w_class))
if (stats_dict[prof]["headshots"][w_class].keys()
!= stats_dict[prof]["kills"][w_class].keys()):
del stats_dict[prof]["hpk"]
raise ValueError("heashots and kills do not have the same"
" weapons present in the dictionary")
stats_dict[prof]["hpk"][w_class] = {}
for weap in stats_dict[prof]["headshots"][w_class].keys():
try:
stats_dict[prof]["hpk"][w_class][weap] = (
stats_dict[prof]["headshots"][w_class][weap]
/stats_dict[prof]["kills"][w_class][weap]*100.0)
except ZeroDivisionError:
stats_dict[prof]["hpk"][w_class][weap] = 0
def stat_limits(stats_dict, prof, stat, up_buff=None, low_buff=None):
"""Returns the highest and lowest values found in the s_c_dict provided.
parameter:
- stats_dict: The s_c_dict the min max values are wanted from
- prof: The profile name the values are to be taken from as a string
- stat: The stat the values are to be taken from as a string
- up_buff: A value indicating the amount of buffer desired as a
percentage of on the max value
- low_buff: A value indicating the amount of buffer desired as a
percentage of the minimum value
returns:
- s_min: The smallest value among those for a particular stat and
weapon class provided
- s_max: The largest value ""
raises:
"""
s_min = 0
s_max = 0
for w_class in stats_dict[prof][stat].keys():
for weap in stats_dict[prof][stat][w_class].keys():
if stats_dict[prof][stat][w_class][weap] > s_max:
s_max = stats_dict[prof][stat][w_class][weap]
if stats_dict[prof][stat][w_class][weap] < s_min:
s_min = stats_dict[prof][stat][w_class][weap]
if up_buff is not None:
s_max = s_max + abs(s_max*up_buff)
if low_buff is not None:
s_min = s_min - abs(s_min*low_buff)
return s_min, s_max
``` |
{
"source": "1161678627/bird_sound_recognition",
"score": 3
} |
#### File: 1161678627/bird_sound_recognition/prepare_image_from_wav.py
```python
import os
import librosa
import matplotlib
from matplotlib import pyplot as plt
import numpy as np
import librosa.display
import noisereduce as no
import queue
import threading
import tqdm
from PIL import Image
SAMPLE_RATE = 32000
# 将当前文件夹解析为配套的dict地址-cat 键值对
def convert_flord_to_pathdict(train_data_path, train_voice_path_dict):
cat_list = os.listdir(train_data_path)
# 一共有 100 个类别文件夹
# print(cat_list)
for cat_dir in cat_list:
cat_fload_path = os.path.join(train_data_path, cat_dir)
sound_name_list = os.listdir(cat_fload_path)
sound_full_path_list = [(os.path.join(cat_fload_path, sound_name), sound_name) for sound_name in sound_name_list]
for sound_full_path, sound_name in sound_full_path_list:
train_voice_path_dict[sound_full_path] = [cat_dir, sound_name]
# 读取wav音频文件,并将其转换为高频滤波的ml频谱图
def convert_wav_mlpng(wav_path, dst_path):
signal, sampling_rate = librosa.load(path=wav_path, duration=duration) # 默认 sr=22050
sr = sampling_rate
# signal = no.reduce_noise(signal, signal, verbose=False)
# Plot mel-spectrogram with high-pass filter
N_FFT = 1024
HOP_SIZE = 1024
N_MELS = 128
WIN_SIZE = 1024
WINDOW_TYPE = 'hann'
FEATURE = 'mel'
FMIN = 20
S = librosa.feature.melspectrogram(y=signal, sr=sr,
n_fft=N_FFT,
hop_length=HOP_SIZE,
n_mels=N_MELS,
htk=True,
fmin=FMIN,
fmax=sr / 2)
plt.figure(figsize=(8, 8))
# librosa.display.specshow(librosa.power_to_db(S ** 2, ref=np.max), fmin=FMIN, y_axis='linear')
librosa.display.specshow(librosa.power_to_db(S ** 2, ref=np.max), fmin=FMIN)
# plt.colorbar(format='%+2.0f dB')
# plt.title('Mel-scaled spectrogram with high-pass filter - 10 seconds')
# plt.show()
plt.axis('off')
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top=1, bottom=0, left=0, right=1, hspace=0, wspace=0)
plt.margins(0, 0)
plt.savefig(dst_path)
plt.close()
print(dst_path, '写入成功!')
# 从dict中挨个解析地址,生成新的cat文件夹,cat文件夹中存放转换后的png 高频梅尔频谱图
def convert_pathdict_to_mlpng(path_dict):
for wav_path in tqdm.tqdm(path_dict):
label = path_dict[wav_path][0]
# 决定存放图片的大文件夹地址
if 'train' in wav_path:
dst_path = 'train_split_img_old_{}s'.format(duration)
elif 'test' in wav_path:
dst_path = 'test_split_img_old_{}s'.format(duration)
else:
dst_path = 'dev_split_img_old_{}s'.format(duration)
# 生成dstpath ---> train_split_img_old_5s
if not os.path.exists(dst_path):
os.mkdir(dst_path)
cat_path = os.path.join(dst_path, label)
# 判断是否生成了 cat文件夹,如果没有则手动生成
if not os.path.exists(cat_path):
os.mkdir(cat_path)
# 开始给cat文件夹下写入当前 转换后图片文件
# 转换后的图片名称为:
full_dst_path = os.path.join(cat_path, path_dict[wav_path][1].replace('wav', 'png'))
# 如果当前文件不存在,才执行写入
if not os.path.exists(full_dst_path):
convert_wav_mlpng(wav_path=wav_path, dst_path=full_dst_path)
else:
print(full_dst_path, '已经存在,跳过。')
# 配合多线程使用
def convert_tuple_to_mlpng(wav_path, label_):
label = label_[0]
# 决定存放图片的大文件夹地址
if 'train' in wav_path:
dst_path = 'train_img'
else:
dst_path = 'dev_img'
cat_path = os.path.join(dst_path, label)
# 判断是否生成了 cat文件夹,如果没有则手动生成
if not os.path.exists(cat_path):
os.mkdir(cat_path)
# 开始给cat文件夹下写入当前 转换后图片文件
# 转换后的图片名称为:
full_dst_path = os.path.join(cat_path, label_[1].replace('wav', 'png'))
# 如果当前文件不存在,才执行写入
if not os.path.exists(full_dst_path):
convert_wav_mlpng(wav_path=wav_path, dst_path=full_dst_path)
else:
print(full_dst_path, '已经存在,跳过。\n')
# 定义是否结束子线程的标志
exitFlag = 0
# 使用多线程完成数据读写
class Mythread(threading.Thread):
# 配置每个 子进程 必要 操作参数
def __init__(self, thread_id, que):
threading.Thread.__init__(self)
self.thread_id = thread_id
# 只有一个实例化的 队列 传进来,每个线程都共享这个 队列
self.que = que
# 配置每个子进程的 任务(重复性任务)
def run(self) -> None:
print("开启线程:", self.thread_id)
process_data(self.que, self.thread_id)
print("退出线程:", self.thread_id)
# 定义各个线程如何从queue中取数据并处理数据
def process_data(que, id):
while not exitFlag:
queueLock.acquire()
# 加锁从队列中获取一个数据给当前线程处理,取完数据立刻解锁
if not workQueue.empty():
data = que.get()
queueLock.release()
print(data)
convert_tuple_to_mlpng(data[0], data[1])
else:
queueLock.release()
queueLock = threading.Lock()
workQueue = queue.Queue(12522)
threads = []
threadNum = 10
if __name__ == '__main__':
for duration in [5, 10, 15, 20, 30]:
# duration = 5
# 训练集转换
train_data_path = 'train_split_data_{}s'.format(duration)
train_voice_path_dict = {}
convert_flord_to_pathdict(train_data_path, train_voice_path_dict)
# print(len(list(train_voice_path_dict.items())))
# print(train_voice_path_dict)
convert_pathdict_to_mlpng(train_voice_path_dict)
# 验证集转换
dev_data_path = 'dev_split_data_{}s'.format(duration)
dev_voice_path_dict = {}
convert_flord_to_pathdict(dev_data_path, dev_voice_path_dict)
# print(dev_voice_path_dict)
convert_pathdict_to_mlpng(dev_voice_path_dict)
# 测试集转换---将test_split_data 中的wav都转为img
test_data_path = 'test_split_data_{}s'.format(duration)
test_voice_path_dict = {}
convert_flord_to_pathdict(test_data_path, test_voice_path_dict)
# print(test_voice_path_dict)
convert_pathdict_to_mlpng(test_voice_path_dict)
# 无法使用多线程!
# train_voice_path_dict.update(dev_voice_path_dict)
# print(len(list(train_voice_path_dict.items())))
#
# # 填充队列
# queueLock.acquire()
# for wav_path in train_voice_path_dict:
# workQueue.put((wav_path, train_voice_path_dict[wav_path]))
# queueLock.release()
#
# # 创建新线程
# for index in range(threadNum):
# thread = Mythread(index+1, workQueue)
# thread.start()
# threads.append(thread)
#
# # 等待队列清空,实际上主线程会执行到这里,然后卡在这个循环,子线程们不断执行上面的 process_data 函数,消耗queue里面的数据,直到耗完
# while not workQueue.empty():
# pass
#
# # 当子线程消耗完了数据,修改flag,通知子线程是时候退出---子线程不会卡在 process_data 的循环中了,跳出后执行到 run函数中的下一句了
# exitFlag = 1
#
# # 等待所有线程执行完当前最后在处理的数据,即执行完了run的最后一行---线程没任务了,但是还没有清除掉。 使用 join中止(清除)所有子线程
# for t in threads:
# t.join()
#
# print('所有数据处理完毕,queue为空,主线程退出!')
``` |
{
"source": "1162620106/Pedestrian-warning-system",
"score": 3
} |
#### File: VOCdevkit/VOC2007/make1.py
```python
import xml.sax
import xml.dom.minidom
from xml.dom.minidom import getDOMImplementation
import cv2
import os
'''
对D2-City的每个视频和对应的标注文件:
第一步:将标注文件中关于person类的标注提取出来(主要是提取框坐标和第几帧frame信息)
第二步:保存视频中的这几帧,每帧一张图片(或隔几帧保存一次,保存到JPEGImages文件夹中,jpg格式)
第三步:将person类的标注转换成“labelImg”生成的那种xml格式(保存到Annotations文件夹中)
'''
xml_src_path = r'C:/AIoT/D2-City/valid_annotation/0008' # D2-City xml文件夹
xml_save_path = r'./Annotations' # 保存xml的文件夹
fileName = ""
videos_src_path = r'C:/AIoT/D2-City/valid_video/0008' # 视频文件夹
frame_save_path = r'./JPEGImages' # 保存图片的文件夹
class DataHandler(xml.sax.ContentHandler):
def __init__(self):
self.isPerson = False
self.ybr = ""
self.xbr = ""
self.ytl = ""
self.xtl = ""
self.frame = ""
self.dict = {}
# 元素开始事件处理
def startElement(self, tag, attributes):
if tag == "track":
if attributes["label"] == "person":
self.isPerson = True
if tag == "box" and self.isPerson:
self.ybr = attributes["ybr"]
self.xbr = attributes["xbr"]
self.ytl = attributes["ytl"]
self.xtl = attributes["xtl"]
self.frame = attributes["frame"]
# 元素结束事件处理
def endElement(self, tag):
if tag == "annotations" and len(self.dict) != 0:
dict_keys = self.dict.keys()
cap = cv2.VideoCapture(videos_src_path + "/" + fileName + ".mp4") # 用OpenCV一帧一帧读取出来
frame_count = 1
while True:
success, frame = cap.read()
if success:
if frame_count in dict_keys:
cv2.imwrite(frame_save_path + "/" + fileName + "_%d.jpg" % frame_count, frame)
print("Save frame: " + fileName + "_%d.jpg" % frame_count)
self.createXML(frame_count) # 基于[(坐标元组), ...]创建fileName_frame.xml
frame_count += 1
else:
break
cap.release()
self.dict.clear()
if tag == "track" and self.isPerson:
self.isPerson = False
if tag == "box" and self.isPerson:
# 每隔x帧取字典dict: {frame: [(坐标元组), ...]}
if int(self.frame) % 10 == 0:
tup = (self.ybr, self.xbr, self.ytl, self.xtl)
self.dict.setdefault(int(self.frame), []).append(tup)
# 基于[(坐标元组), ...]创建fileName_frame.xml
def createXML(self, frame_count):
implementation = getDOMImplementation()
document = implementation.createDocument(None, None, None)
annotation = document.createElement("annotation")
document.appendChild(annotation)
folder = document.createElement("folder")
folder.appendChild(document.createTextNode("JPEGImages"))
annotation.appendChild(folder)
filename = document.createElement("filename")
filename.appendChild(document.createTextNode(fileName + "_" + str(frame_count)))
annotation.appendChild(filename)
path = document.createElement("path")
path.appendChild(document.createTextNode("C:/AIoT/Projects/keras-yolo3-master/VOCdevkit/VOC2007/JPEGImages/" + fileName + "_%d.jpg" % frame_count))
annotation.appendChild(path)
source = document.createElement("source")
annotation.appendChild(source)
database = document.createElement("database")
database.appendChild(document.createTextNode("Unknown"))
source.appendChild(database)
size = document.createElement("size")
annotation.appendChild(size)
width = document.createElement("width")
width.appendChild(document.createTextNode("1920"))
size.appendChild(width)
height = document.createElement("height")
height.appendChild(document.createTextNode("1080"))
size.appendChild(height)
depth = document.createElement("depth")
depth.appendChild(document.createTextNode("3"))
size.appendChild(depth)
segmented = document.createElement("segmented")
segmented.appendChild(document.createTextNode("0"))
annotation.appendChild(segmented)
tup_list = self.dict.get(frame_count)
for tup in tup_list:
object = document.createElement("object")
annotation.appendChild(object)
name = document.createElement("name")
name.appendChild(document.createTextNode("person"))
object.appendChild(name)
pose = document.createElement("pose")
pose.appendChild(document.createTextNode("Unspecified"))
object.appendChild(pose)
truncated = document.createElement("truncated")
truncated.appendChild(document.createTextNode("0"))
object.appendChild(truncated)
difficult = document.createElement("difficult")
difficult.appendChild(document.createTextNode("0"))
object.appendChild(difficult)
bndbox = document.createElement("bndbox")
object.appendChild(bndbox)
xmin = document.createElement("xmin")
xmin.appendChild(document.createTextNode(tup[3]))
bndbox.appendChild(xmin)
ymin = document.createElement("ymin")
ymin.appendChild(document.createTextNode(tup[2]))
bndbox.appendChild(ymin)
xmax = document.createElement("xmax")
xmax.appendChild(document.createTextNode(tup[1]))
bndbox.appendChild(xmax)
ymax = document.createElement("ymax")
ymax.appendChild(document.createTextNode(tup[0]))
bndbox.appendChild(ymax)
out = open(xml_save_path + "/" + fileName + "_%d.xml" % frame_count, "w")
document.writexml(out, '', '\t', '\n')
print("Save xml: " + fileName + "_%d.xml" % frame_count)
if (__name__ == "__main__"):
# 创建一个XMLReader
parser = xml.sax.make_parser()
# turn off namepsaces
parser.setFeature(xml.sax.handler.feature_namespaces, 0)
# 重写 ContextHandler
Handler = DataHandler()
parser.setContentHandler(Handler)
xml_list = os.listdir(xml_src_path) # 用于返回指定的文件夹包含的文件或文件夹的名字的列表
for each_xml in xml_list:
fileName, _ = each_xml.split('.')
parser.parse(xml_src_path + "/" + each_xml)
``` |
{
"source": "11-626/Attention-Image-Captioning",
"score": 3
} |
#### File: 11-626/Attention-Image-Captioning/DataLoader.py
```python
import torch, torchvision
from collections import Counter
from random import seed, choice, sample
import json, PIL, sys
from random import seed, choice, sample
#-----------------------------------------------------------------------------
# vocabulary
#-----------------------------------------------------------------------------
class Vocabulary:
"""Simple vocabulary wrapper."""
def __init__(self):
self.word2idx = {}
self.idx2word = {}
self.idx = 0
self.max_word_length = 0
def add_word(self, word):
if not word in self.word2idx:
self.word2idx[word] = self.idx
self.idx2word[self.idx] = word
self.idx += 1
self.max_word_length = max(self.max_word_length, len(word))
def __call__(self, word):
if not word in self.word2idx:
return self.word2idx['<unk>']
return self.word2idx[word]
def __len__(self):
return len(self.word2idx)
def make(self, dataset, min_word_freq, karpathy_json_path="./Karpathy_splits", max_len=100):
r"""
Creates input files for training, validation, and test data.
:param dataset: name of dataset, one of 'coco', 'flickr8k', 'flickr30k'
:param karpathy_json_path: path of Karpathy JSON file with splits and captions
:param min_word_freq: words occuring less frequently than this threshold are binned as <unk>s
:param max_len: don't sample captions longer than this length
"""
assert dataset in ('coco', 'flickr8k', 'flickr30k')
# Read Karpathy JSON
with open(f"{karpathy_json_path}/dataset_{dataset}.json", 'r') as j:
data = json.load(j)
# Read image paths and captions for each image
train_image_paths = []
train_image_captions = []
val_image_paths = []
val_image_captions = []
test_image_paths = []
test_image_captions = []
word_freq = Counter()
for img in data['images']:
for c in img['sentences']:
# Update word frequency
word_freq.update(c['tokens'])
# Create word map
words = [w for w in word_freq.keys() if word_freq[w] > min_word_freq]
self.add_word("<pad>")
for word in words:
self.add_word(word)
self.add_word("<unk>")
self.add_word("<start>")
self.add_word("<end>")
#-----------------------------------------------------------------------------
# dataset and dataloader
#-----------------------------------------------------------------------------
class CaptionDataset(torch.utils.data.Dataset):
def __init__(self, data_folder, dataset, split, vocab, isLimit, transform=None, device=None):
r"""
:param data_folder: folder where data files are stored
:param data_name: base name of processed datasets
:param split: split, one of 'TRAIN', 'VAL', or 'TEST'
:param transform: image transform pipeline
"""
self.data_folder = data_folder
assert dataset in ('coco', 'flickr8k', 'flickr30k')
self.dataset = dataset
self.split = split
assert self.split in ("train", "val", "test"), f"bad split key word: split={split}"
self.vocab = vocab
self.isLimit = isLimit
self.transform = transform
self.device = device
self.prepare()
#print(self.images.shape, self.captions.shape, self.lengths.shape)
def __len__(self):
return self.captions.size(0)
def __getitem__(self, i):
img = self.images[i//self.captions_per_image,:,:,:]
caption = self.captions[i,:]
capleng = self.lengths[i]
if self.split is "train":
return img, caption, capleng
else:
# For validation of testing, also return all 'captions_per_image' captions to find BLEU-4 score
bias = (i // self.captions_per_image) * self.captions_per_image
all_captions = self.captions[bias:(bias+self.captions_per_image)]
return img, caption, capleng, all_captions
def prepare(self, captions_per_image=5, min_word_freq=5, max_len=98, karpathy_json_path="./Karpathy_splits/"):
self.captions_per_image = captions_per_image
data_folder = self.data_folder
dataset = self.dataset
vocab = self.vocab
image_folder = f"{data_folder}/images/"
#-- Read Karpathy JSON
with open(f"{karpathy_json_path}/dataset_{dataset}.json", 'r') as j:
data = json.load(j)
image_paths = []
image_captions = []
for img in data["images"]:
if img["split"] not in (self.split,):
continue
captions = []
for c in img["sentences"]:
if len(c["tokens"]) <= max_len:
captions.append(c["tokens"])
if len(captions) == 0:
continue
path = f"./{data_folder}/{dataset}/images/{img['filename']}"
image_paths.append(path)
image_captions.append(captions)
# Sanity check
assert len(image_paths) == len(image_captions), "bad lengths."
#-- limit
if self.isLimit:
limit = 100 if len(image_paths) > 2000 else 20
image_paths = image_paths[:limit]
image_captions = image_captions[:limit]
#-- prepare images
images = []
for dir in image_paths:
img = PIL.Image.open(dir)
if self.transform is not None:
img = self.transform(img)
images.append(img.unsqueeze(0))
images = torch.cat(images, dim=0).type( torch.FloatTensor )
#-- prepare captions, caption lengths
encoded_captions = []
caption_lengths = []
for caps in image_captions:
caps = list(caps)
# Sample captions
if len(caps) < captions_per_image:
captions = caps + [choice(caps) for _ in range(captions_per_image - len(caps))]
else:
captions = sample(caps, k=captions_per_image)
# Sanity check
assert len(captions) == captions_per_image
for j, c in enumerate(captions):
enc_c = [vocab('<start>')] + [vocab(word) for word in c] + [vocab('<end>')] + [vocab('<pad>')] * (max_len - len(c))
# Find caption lengths, <start> ... <end>
c_len = len(c) + 2
encoded_captions.append( enc_c )
caption_lengths.append( c_len )
encoded_captions = torch.tensor( encoded_captions ).type( torch.LongTensor )
caption_lengths = torch.tensor( caption_lengths ).type( torch.LongTensor )
#-- to device
if self.device is not None:
images = images.to(self.device)
encoded_captions = encoded_captions.to(self.device)
caption_lengths = caption_lengths.to(self.device)
#-- set attibutes
self.files = image_paths
self.image_captions = image_captions
self.images = images
self.captions = encoded_captions
self.lengths = caption_lengths
print(f"Loaded {self.images.size(0)} images and {self.captions.size(0)} captions for {self.split}.")
def get_dataloaders(Ps, vocab):
transform = torchvision.transforms.Compose([
torchvision.transforms.Resize((256,256)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std =[0.229, 0.224, 0.225]),
])
train_loader = torch.utils.data.DataLoader(
CaptionDataset(Ps["data_folder"], dataset=Ps["dataset"], split="train", vocab=vocab, isLimit=Ps["isLimit"], transform=transform, device=Ps["device"]),
batch_size=Ps["batch_size"], shuffle=True, num_workers=Ps["num_workers"], drop_last=True)
valid_loader = torch.utils.data.DataLoader(
CaptionDataset(Ps["data_folder"], dataset=Ps["dataset"], split="val", vocab=vocab, isLimit=Ps["isLimit"], transform=transform, device=Ps["device"]),
batch_size=Ps["batch_size"], shuffle=True, num_workers=Ps["num_workers"], drop_last=True)
return {"train" : train_loader, "valid": valid_loader}
#-----------------------------------------------------------------------------
# debug
#-----------------------------------------------------------------------------
if __name__ == "__main__":
pass
``` |
{
"source": "1163710117/HelloWorld",
"score": 3
} |
#### File: 1163710117/HelloWorld/DFA.py
```python
import re
#######改改改改改改改改
class DFA():
d0_9 = "[0-9]"
d1_9 = "[1-9]"
d0_7 = "[0-7]"
d0_9a_f = "[0-9a-f]"
def __init__(self):
self.conv_table = {
0: {'0': 2, '[1-9]': 1, "[a-zA-Z_]": 6, "[=!<>*%+-]": 12,
"[\(\)\[\];,:{}]": 11, "/": 22, "&": 16, "\|": 14, "\"": 28, "'": 31, "[\s]": 0, ".": 30},
# NUM tokens
1: {'[0-9]': 1, "\.": 27, "[eE]": 19},
2: {'x': 3, '[0-7]': 4, "\.": 27, "[8-9]": 30},
3: {'[0-9a-f]': 5, "[g-z]": 30},
4: {'[0-7]': 4, "[8-9]": 30},
5: {'[0-9a-f]': 5, "[g-z]": 30},
# IDF tokens
6: {"[a-zA-Z_0-9]": 6},
# boundary
11: {},
# operation
12: {"=": 13},
13: {},
14: {"\|": 15},
15: {},
16: {"&": 17},
17: {},
# scientific number
18: {"[0-9]": 18, "[eE]": 19},
19: {"[0-9]": 21, "[\+-]": 20},
20: {"[0-9]": 21},
21: {"[0-9]": 21},
27: {"[0-9]": 18},
# note
22: {"\*": 23, "/": 26, "=":13},
23: {"[^\*]": 23, "\*": 24},
24: {"[^\*/]": 23, "\*": 24, "/": 25},
25: {},
26: {"[^\\n]": 26},
# string const
28: {"\"": 29, "[^\"\\n]": 28},
29: {},
# char const
31: {"[^'\\n]": 32,},
32: {"'": 33},
33: {},
# error handler
30: {},
}
#######改改改改改改改改
self.dfa_end_status = [1, 2, 4, 5, 6, 12, 13, 14, 15, 16, 17, 18, 21, 22, 25, 26, 29, 30, 33]
#######改改改改改改改改
def get_dfa_table(self):
states = [str(s) for s in list(self.conv_table.keys())]
chars = set()
for s, v in self.conv_table.items():
for k, vv in v.items():
chars.add(k)
chars = list(chars)
t_content = [['']*len(chars) for i in range(len(states))]
for s, v in self.conv_table.items():
for k, vv in v.items():
x = states.index(str(s))
y = chars.index(k)
if vv in self.dfa_end_status:
vv = str(vv) + '(end state)'
t_content[x][y] = vv
else:
t_content[x][y] = str(vv)
return states, chars, t_content
def move(self, s, c):
if s in self.conv_table :
table = self.conv_table[s]
for p, s_to in table.items():
if re.match(p, c) != None:
return s_to
return 0
def init_table(self, data):
'''
用于从文件读取DFA转化表后的初始化
:param data: DFA转换表的字符串形式
:return:
'''
pass
```
#### File: HelloWorld/semantic/Semantic.py
```python
class Semantic():
def __init__(self):
self.indent = " "
self.symbol_table = []
self.symbol_name = []
self.error_table = []
self.offset = 0
def getNextParse(self, index):
if index < self.length:
parse_one = self.parse_array[index]
print
i = 0
for symbol in parse_one:
if not symbol == ' ':
break
i = i + 1
# grade = i/2
print("行: " + parse_one[i:])
return parse_one[i:]
else:
return ''
def analyze(self, parse_result):
result = ''
self.parse_array = parse_result.split("\n")
self.length = len(self.parse_array)
index = 0
value = self.getNextParse(index)
index = index + 1
if value[0] == 'P':
index = self.P_func(index)
else:
result = 'error'
symbol_result = self.deal_symbol()
error_result = self.deal_error()
return symbol_result + error_result
def deal_symbol(self):
symbol_result = '符号表:\nName\tType\tOffset\n'
for row in self.symbol_table:
symbol_result = symbol_result + row[0] + '\t' + row[1] + '\t' + row[2] + '\n'
return symbol_result
def deal_error(self):
error_result = 'Error:\n'
for row in self.error_table:
error_result = error_result + 'Error at line ' + row[0] + ': ' + row[1] + '\n'
return error_result
def P_func(self, index):
self.offset = 0
index = index + 1
index, d_width = self.D_func(index)
index = index + 1
index = self.S_func(index)
return index
def D_func(self, index):
value = self.getNextParse(index)
if value[0:4] == 'proc':
index = index + 2
index, x_type, x_width = self.X_func(index)
index = index + 1
index, c_type, c_width = self.C_func(index)
id_info = self.getNextParse(index)
id_value = id_info.split('(')[0]
id_value = id_value[4:]
index = index + 4
index, d_width = self.D_func(index)
index = index + 1
index = self.D1_func(index)
if id_value in self.symbol_name:
id_line = id_info.split('(')[1]
id_line = id_line.split(')')[0]
self.error_table.append((id_line, '重复的函数声明'))
return index, 0
else:
self.symbol_table.append((id_value, 'proc', self.offset))
self.offset = self.offset + 8
return index, 8
else:
index, t_type, t_width = self.T_func(index)
index = index + 1
id_info = self.getNextParse(index)
id_value = id_info.split('(')[0]
id_value = id_value[4:]
index = index + 1
if id_value in self.symbol_name:
id_line = id_info.split('(')[1]
id_line = id_line.split(')')[0]
self.error_table.append((id_line, '重复的变量声明'))
t_width = 0
else:
self.symbol_table.append((id_value, t_type, self.offset))
self.offset = self.offset + t_width
index = index + 1
index = self.D1_func(index)
return index, t_width
def D1_func(self, index):
'''
接收错误字符时返回空字符,即将错误字符当成结束符
:param grade:
:param index:
:return:
'''
value = self.getNextParse(index)
if value[0] == 'D':
index = index + 1
index, d_width = self.D_func(index)
index = index + 1
index = self.D1_func(index)
return index
else:
return index
def T_func(self, index):
value = self.getNextParse(index)
if value[0] == 'X':
index = index + 1
index, x_type, x_width = self.X_func(index)
t = x_type
w = x_width
index = index + 1
index, c_type, c_width = self.C_func(index, t, w)
return index, c_type, c_width
else:
index = index + 4
index, d_width = self.D_func(index)
index = index + 1
id_info = self.getNextParse(index)
id_value = id_info.split('(')[0]
id_value = id_value[4:]
index = index + 2
t_type = 'record'
t_width = d_width
if id_value in self.symbol_name:
id_line = id_info.split('(')[1]
id_line = id_line.split(')')[0]
self.error_table.append((id_line, '重复的变量声明'))
t_width = 0
else:
self.symbol_table.append((id_value, t_type, self.offset))
self.offset = self.offset + t_width
return index, t_type, t_width
def X_func(self, index):
value = self.getNextParse(index)
if value[0:4] == 'char':
x_type = 'char'
x_width = 1
elif value[0:3] == 'int':
x_type = 'int'
x_width = 4
elif value[0:5] == 'float':
x_type = 'float'
x_width = 4
elif value[0:7] == 'boolean':
x_type = 'boolean'
x_width = 4
else:
x_type = 'string'
x_width = 8
index = index + 1
return index, x_type, x_width
def C_func(self, index, t, w):
value = self.getNextParse(index)
if value[0] == '[':
index = index + 1
num_val = self.getNextParse(index).split(': ')[1]
num_val = num_val.split('(')[0]
index = index + 3
index, c_type, c_width = self.C_func(index, t, w)
c_type = 'array(' + num_val + ', ' + c_type + ')'
if num_val.isdigit() and not num_val[0] == '-':
c_width = int(num_val)*c_width
else:
id_info = self.getNextParse(index-1)
id_line = id_info.split('(')[1]
id_line = id_line.split(')')[0]
self.error_table.append(id_line, '数组声明是方框内不为正整数')
c_width = 0
return index, c_type, c_width
else:
c_type = t
c_width = w
return index, c_type, c_width
def S_func(self, index):
'''
当接收错误符号时返回空字符,即把错误字符当成终结符
:param grade:
:param index:
:return:
'''
return index
``` |
{
"source": "116pythonZS/hxl-Twisted",
"score": 3
} |
#### File: hxl-Twisted/basic-twisted/countdown.py
```python
class Countdown(object):
counter = 5
def count(self):
if self.counter == 0:
reactor.stop()
else:
print self.counter, '...'
self.counter -= 1
reactor.callLater(1, self.count)
from twisted.internet import reactor
reactor.callWhenRunning(Countdown().count)
print 'Start!'
reactor.run()
print 'Stop!'
``` |
Subsets and Splits