blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ea663b6b22d93520cc62ae3f93baa2a72b489749 | 360666ec687b8793a9560c086158877132a8bb95 | /en685.621/pa2/iris_ml.py | f0710481f4831a865cdd72183d2ed90cfc214f45 | [] | no_license | jakesciotto/jhu | d0c4f645876c030976db5b6f92613531ec741f13 | 8e528fc0804b837450b2e4cd5a4b4d4195249629 | refs/heads/master | 2023-05-11T10:21:05.948081 | 2023-05-01T18:22:54 | 2023-05-01T18:22:54 | 291,558,789 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 12,912 | py | # -----------------------------------------------------------
# iris_ml.py
#
# Jake Sciotto
# EN685.621 Algorithms for Data Science
# Johns Hopkins University
# Summer 2020
# -----------------------------------------------------------
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.mixture import GaussianMixture
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Perceptron
from sklearn.metrics import accuracy_score
from sklearn import svm
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import sys
import util
# -------------------------------------------------------------
# Data Cleansing
# -------------------------------------------------------------
dataset = pd.read_csv("input/iris_6_features_for_cleansing.csv")
dataset.columns = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'feature-5', 'feature-6', 'class']
target_names = ['setosa', 'versicolor', 'virginica']
# check for null values
blanks = util.find_blanks(dataset)
rows, cols = dataset.shape[0], dataset.shape[1]
# filling the blanks with the median for the time being
for i in range(0, cols):
dataset.iloc[:, i].fillna((dataset.iloc[:, i].median()), inplace=True)
# initial visualization
plt.figure(0)
plt.title("Initial visualization")
scatter = plt.scatter(dataset.iloc[:, 0], dataset.iloc[:, 1], c = dataset['class'], cmap = plt.cm.Set1, edgecolor = 'k')
plt.legend(*scatter.legend_elements(), loc="best", title="Class")
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
f = open("output/output.txt", "w")
# -------------------------------------------------------------
# Feature Generation
# -------------------------------------------------------------
# picking top two features [petal-length], [petal-width] (from pa1) to generate from
X = dataset.iloc[:, 2:4]
# separate classes
setosa = X[dataset['class'] == 1]
versi = X[dataset['class'] == 2]
virgi = X[dataset['class'] == 3]
# new lists for stats data
setosa_mean, versi_mean, virgi_mean, setosa_std, versi_std, virgi_std, setosa_cov, versi_cov, virgi_cov = ([] for i in range(9))
# mean
for i in range(0, 2):
setosa_mean.append(round(np.mean(setosa.iloc[:, i]), 4))
versi_mean.append(round(np.mean(versi.iloc[:, i]), 4))
virgi_mean.append(round(np.mean(virgi.iloc[:, i]), 4))
# standard deviation
for i in range(0, 2):
setosa_std.append(round(np.std(setosa.iloc[:, i], ddof=1), 4))
versi_std.append(round(np.std(versi.iloc[:, i], ddof=1), 4))
virgi_std.append(round(np.std(virgi.iloc[:, i], ddof=1), 4))
# covariance
setosa_cov = setosa.iloc[:, 0:2].cov()
versi_cov = versi.iloc[:, 0:2].cov()
virgi_cov = virgi.iloc[:, 0:2].cov()
# generate additional observations
new_setosa = pd.DataFrame(np.random.random_sample((50, 2)))
new_versi = pd.DataFrame(np.random.random_sample((50, 2)))
new_virgi = pd.DataFrame(np.random.random_sample((50, 2)))
# generate new lists for std and mean of new values
new_setosa_mean, new_setosa_std, new_versi_mean, new_versi_std, new_virgi_mean, new_virgi_std = ([] for i in range (6))
# copy
setosa_scaled = new_setosa.copy()
versi_scaled = new_versi.copy()
virgi_scaled = new_virgi.copy()
# find new means
for i in range(0, 2):
new_setosa_mean.append(np.mean(new_setosa.iloc[:, i]))
new_versi_mean.append(np.mean(new_versi.iloc[:, i]))
new_virgi_mean.append(np.mean(new_virgi.iloc[:, i]))
# find new stds
for i in range(0, 2):
new_setosa_std.append(np.std(new_setosa.iloc[:, i], ddof=1))
new_versi_std.append(np.std(new_versi.iloc[:, i], ddof=1))
new_virgi_std.append(np.std(new_virgi.iloc[:, i], ddof=1))
# z-score normalization
for i in range(0, 50):
for j in range(0, 2):
setosa_scaled.iloc[i, j] = (setosa_scaled.iloc[i, j] - new_setosa_mean[j]) / new_setosa_std[j]
versi_scaled.iloc[i, j] = (versi_scaled.iloc[i, j] - new_versi_mean[j]) / new_versi_std[j]
virgi_scaled.iloc[i, j] = (virgi_scaled.iloc[i, j] - new_virgi_mean[j]) / new_virgi_std[j]
# multiply by the covariance
setosa_scaled = setosa_scaled.dot(setosa_cov.values)
versi_scaled = versi_scaled.dot(versi_cov.values)
virgi_scaled = virgi_scaled.dot(virgi_cov.values)
# add back the mean of the original data to scale data correctly
for i in range(0, 50):
for j in range(0, 2):
setosa_scaled.iloc[i, j] = setosa_scaled.iloc[i, j] + setosa_mean[j]
versi_scaled.iloc[i, j] = versi_scaled.iloc[i, j] + versi_mean[j]
virgi_scaled.iloc[i, j] = virgi_scaled.iloc[i, j] + virgi_mean[j]
# look at the generated features
frames = [setosa_scaled, versi_scaled, virgi_scaled]
result = pd.concat(frames).reset_index(drop = True)
plt.figure(1)
plt.title("Newly generated feature")
scatter1 = plt.scatter(versi['petal-length'], versi['petal-width'], c='r')
scatter2 = plt.scatter(versi_scaled.iloc[:, 0], versi_scaled.iloc[:, 1], c='b')
plt.legend(['Old feature', 'New feature'], loc="best", title="Class")
plt.xlabel('Petal Length')
plt.ylabel('Petal Width')
# insert new features into dataframe
dataset.insert(6, 'feature-7', result[0])
dataset.insert(7, 'feature-8', result[1])
# -------------------------------------------------------------
# Feature Preprocessing / Outlier Removal
# -------------------------------------------------------------
# visualization with box plot to see outliers
plt.figure(2)
plt.title("Boxplot showing outliers")
dataset.boxplot()
plt.xticks(rotation = 45)
Q1 = dataset.quantile(.25)
Q3 = dataset.quantile(.75)
IQR = Q3 - Q1
"""
This section is commeted out, but it was to explore changing out the outliers
and seeing where they were located. I found that there were really only
visible outliers in the sepal width class and I decided to move forward without
removing them. I think they're a principal part of the dataset. There is even
some speculation that they were generated by a different process before being
added to the dataset.
#print(dataset < (Q1 - 1.5 * IQR)) or (dataset > (Q3 + 1.5 * IQR))
median = dataset['sepal-width'].median()
# replace the outliers with the median
#dataset['sepal-width'] = np.where(dataset['sepal-width'] <= dataset['sepal-width'].quantile(.05), median, dataset['sepal-width'])
#dataset['sepal-width'] = np.where(dataset['sepal-width'] >= dataset['sepal-width'].quantile(.95), median, dataset['sepal-width'])
#print(dataset < (Q1 - 1.5 * IQR)) or (dataset > (Q3 + 1.5 * IQR))
#plt.subplot(1, 2, 2)
#plt.title("After removing outliers")
#dataset.boxplot()
#plt.xticks(rotation = 45)
"""
# -------------------------------------------------------------
# Feature Ranking
# -------------------------------------------------------------
array = dataset.values
names = dataset.columns[0:8]
features = array.shape[1] - 1
# separate data by features and class label
X = array[:, 0:8]
Y = array[:, 8]
# comparing first two classes
feature_selection = [1, 2]
# distances
bh_dist = [0] * features
# find bhattacharyya distances of features and add result to bh_dist array
for i, name in enumerate(names):
X1 = np.array(X[:, i], dtype = np.float64)[Y == feature_selection[0]]
X2 = np.array(X[:, i], dtype = np.float64)[Y == feature_selection[1]]
bh_dist[i] = util.bhatta_cont(X1, X2)
# show distances
f.write("Feature ranking\n")
f.write(util.LINE)
for n, d in sorted(zip(names, bh_dist), key = lambda x: x[1], reverse = True):
distance = str("Bhattacharyya distance for: ") + str(n) + " " + str(d) + "\n"
f.write(distance)
# -------------------------------------------------------------
# Principal Component Analysis
# -------------------------------------------------------------
x = dataset.loc[:, names].values
# data has to be scaled
x = StandardScaler().fit_transform(x)
pca = PCA(n_components = 2)
principal_components = pca.fit_transform(x)
principal_df = pd.DataFrame(data = principal_components, columns = ['principal-component-1', 'principal-component-2'])
final_df = pd.concat([principal_df, dataset[['class']]], axis = 1)
plt.figure(4)
plt.xlabel('Principal Component 1')
plt.ylabel('Principal Component 2')
plt.title('2 component PCA')
targets = [1, 2, 3]
colors = ['r', 'g', 'b']
for target, color in zip(targets,colors):
indicesToKeep = final_df['class'] == target
plt.scatter(final_df.loc[indicesToKeep, 'principal-component-1'], final_df.loc[indicesToKeep, 'principal-component-2'], c = color, s = 50)
plt.legend(targets, loc = 'best')
# -------------------------------------------------------------
# Machine Learning Techniques
# -------------------------------------------------------------
f.write("Machine learning techniques:\n")
f.write(util.LINE)
################################################
# Expectation maximization
################################################
gmm = GaussianMixture(n_components = 3)
X = dataset.iloc[:, :2]
gmm.fit(X)
labels = gmm.predict(X)
# split up new dataframe by labels
X['labels'] = labels
d0 = X[X['labels'] == 0]
d1 = X[X['labels'] == 1]
d2 = X[X['labels'] == 2]
preds = pd.concat([d0, d1, d2]).reset_index()
preds['labels'] = preds['labels'] + 1
# count how many correct predictions we have
correct_preds = np.where(dataset['class'] == preds['labels'], True, False)
accuracy = np.count_nonzero(correct_preds) / 150
# plot
plt.figure(5)
plt.title("Expectation maximization")
plt.scatter(d0.iloc[:, 0], d0.iloc[:, 1], edgecolors ='r', facecolors = "none", marker = "o")
plt.scatter(d1.iloc[:, 0], d1.iloc[:, 1], edgecolors ='b', facecolors = "none", marker = "o")
plt.scatter(d2.iloc[:, 0], d2.iloc[:, 1], edgecolors ='g', facecolors = "none", marker = "o")
plt.scatter(X.iloc[0:50, 0], X.iloc[0:50, 1], c = 'r', marker = "x")
plt.scatter(X.iloc[50:100, 0], X.iloc[50:100, 1], c = 'b', marker = "x")
plt.scatter(X.iloc[100:150, 0], X.iloc[100:150, 1], c = 'g', marker = "x")
f.write("\nMeans for EM:\n")
f.write(str(gmm.means_))
f.write("\nLower bound for EM:\n")
f.write(str(gmm.lower_bound_))
f.write("\nIterations to convergence:\n")
f.write(str(gmm.n_iter_) + "\n")
f.write("\nNumber of correct predictions\n")
f.write(str(accuracy))
################################################
# Linear Discriminant Analysis
################################################
X = dataset.iloc[:, 0:7]
y = dataset.iloc[:, 8]
lda = LinearDiscriminantAnalysis(n_components = 2)
X_r = lda.fit(X, y).transform(X)
# plot
colors = ['navy', 'turquoise', 'darkorange']
plt.figure(6)
plt.title('LDA of IRIS dataset')
for color, i, target_name in zip(colors, [1, 2, 3], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], alpha = .8, color = color, label = target_name)
plt.legend(loc = 'best', shadow = False, scatterpoints = 1)
################################################
# MLPClassifier
################################################
# training and testing
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)
# try the perceptron first
per = Perceptron(random_state = 1, max_iter = 30, tol = 0.001)
per.fit(X_train, y_train)
yhat_train_per = per.predict(X_train)
yhat_test_per = per.predict(X_test)
f.write("\nPerceptron prediction\n")
f.write(util.LINE)
f.write(str(accuracy_score(y_train, yhat_train_per)) + "\n")
f.write(str(accuracy_score(y_test, yhat_test_per)) + "\n")
"""
Parameter choices:
- 50 iterations
- Stochastic gradient descent solver with a .1 learning rate, SGD converges well
and we do not want to end up on the other side of the function
- Activation function tanh converges well even on large datasets
Hidden layers not used but can be specifed by:
N_h = N_s / (alpha * (N_i + N_o)
N_s = amount of samples in training data
N_i = input layer neurons (features)
N_o = output layer neurons
alpha = scaling constant
"""
mlp = MLPClassifier(max_iter = 50, alpha = 1e-5, solver = 'sgd', verbose = 10, random_state = 1,
learning_rate_init = .1, activation = 'tanh')
mlp.fit(X_train, y_train)
yhat_train_mlp = mlp.predict(X_train)
yhat_test_mlp = mlp.predict(X_test)
f.write("\nMLPClassifier\n")
f.write(util.LINE)
f.write(str(accuracy_score(y_train, yhat_train_mlp)) + "\n")
f.write(str(accuracy_score(y_test, yhat_test_mlp)) + "\n")
################################################
# SVM
################################################
X = dataset.iloc[:, 0:2]
y = dataset.iloc[:, 8]
h = 0.02
# linear kernel
model = svm.SVC(kernel = 'linear', C = 1.0).fit(X, y)
x_min, x_max = X.iloc[:, 0].min() - 1, X.iloc[:, 0].max() + 1
y_min, y_max = X.iloc[:, 1].min() - 1, X.iloc[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# color plot
plt.figure(7)
plt.contour(xx, yy, Z)
plt.scatter(X.iloc[:, 0], X.iloc[:, 1], c = y, edgecolors = 'k')
plt.show() | [
"[email protected]"
] | |
ea02035d22a3e269d1369ed02fed370f7ef870cc | 25aac706af2a67be7025fce81255f17c2b285f8e | /tersersemestre_vectores/vectores.py | f56f913be2a53ffdd7136465cc8df3ff1c1bf493 | [] | no_license | orlandoacosta99/python | dc3fab8cb5b387de477345e9bdf5158e98f14a92 | 6080c71f0886e704efdf7fca6b673c586ecc9343 | refs/heads/master | 2020-04-24T10:13:15.748546 | 2019-05-28T15:55:23 | 2019-05-28T15:55:23 | 171,886,071 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,103 | py | import math
import statistics
def producto_escalar(escalar, vector):
"""
>>> producto_escalar(2, [1, 2, 3])
[2, 4, 6]
>>> producto_escalar(5,[2, 5, 1])
[10, 25, 5]
>>> producto_escalar(2, [2, 1])
[4, 2]
:param escalar: dato que multiplicara al vector
:param vector: datos a los cuales seran multiplicados
:return: retorna una lista a la cual hemos hecho el cambio
"""
res = []
cont = 0
while cont < len(vector):
res.append(escalar * vector[cont])
cont += 1
return res
def producto_escalar(escalar, vector):
"""
(num, vector) -> vector
>>> producto_escalar(2, [2, 1])
[4, 2]
>>> producto_escalar(2, [1, 2, 3])
[2, 4, 6]
>>> producto_escalar(5,[2, 5, 1])
[10, 25, 5]
:param escalar: dato que multiplicara al vector
:param vector: datos a los cuales seran multiplicados
:return: retorna una lista a la cual hemos hecho el cambio
"""
res = []
cont = 0
for i in vector:
res.append(vector[cont] * escalar)
cont += 1
return res
def suma_productos(nvector1, nvector2):
"""
(vector, vector) -> vector
>>> suma_productos([1, 2, 3], [2, 1, 3])
[3, 3, 6]
>>> suma_productos([4, 7, 1], [8, 5, 2])
[12, 12, 3]
>>> suma_productos([2, 4, 2], [3, 2, 8])
[5, 6, 10]
:param nvector1: ingresamos el primer vector para ser sumado
:param nvector2: ingreso del segundo vector
:return: se retornara un vector suma de los dos vectores anteriores
"""
resultado = []
contador = 0
while(contador < len(nvector1)):
resultado.append(nvector1[contador] + nvector2[contador])
contador += 1
return resultado
def producto_puntos (nvector1, nvector2):
"""
(vector, vector) -> vector
>>> producto_puntos([1, 2, 3],[2, 1, 3])
13
>>> producto_puntos([1, 2, -3], [-2, 4, 1])
3
>>> producto_puntos([2, 1, 2], [2, 4, 1])
10
:param nvector1: vector un el cual va ha ser multiplicado
:param nvector2: vector dos el cual va ha ser multiplicado
:return: se retornara la sumatoria total de los dos vectores multiplicados
"""
resultado = []
contador = 0
while (contador < len(nvector1)):
resultado.append(nvector1[contador] * nvector2[contador])
contador += 1
Suma = 0
for i in resultado:
Suma = Suma + i
return Suma
def elemento_mayor(nvector):
"""
(vector) -> num
>>> elemento_mayor([1, 2, 4, 2, 3])
4
>>> elemento_mayor([8, 5, 2])
8
>>> elemento_mayor([9, 6, 3, 12])
12
:param nvector: ingresamos un vector el cual va ser recorrido
:return: retornamos el elemento mayor del vector
"""
cont = 0
for num in nvector:
if num > cont:
cont = num
return cont
def elemento_menor(nvector):
"""
(vector) -> num
>>> elemento_menor([2, 4, 6, 7, 5])
2
>>> elemento_menor([1, 2, 5, 1, 6])
1
>>> elemento_menor([21, 42, 2, 12, 5])
2
:param nvector: ingresamos un vector el cual va ser recorrido
:return: retornamos el elemento menor del vector
"""
return min(nvector)
def prom(nvector):
"""
(vector) -> num
>>> prom([1, 2, 4, 2, 1, 2])
2.0
>>> prom([2, 1, 3, 5, 6, 2])
3.1666666666666665
>>> prom([2, 2, 2, 1, 5, 5, 6])
3.2857142857142856
:param nvector: ingresamos un vector el cual sumamos
:return: retornamos el resultado de la divicion
"""
resultado = []
contador = 0
while (contador < len(nvector)):
resultado.append(nvector[contador])
contador += 1
Suma = 0
for i in resultado:
Suma = Suma + i
resultado_promedio= Suma/len(resultado)
return resultado_promedio
def desviacion_est(nvector):
"""
(vector) -> num
>>> desviacion_est([2, 34, 3, 2 , 1, 4])
12.940891262454324
>>> desviacion_est([2, 4, 5, 6, 8, 1, 2])
2.516611478423583
>>> desviacion_est([3, 5, 6, 8, 1, 3])
2.503331114069145
:param nvector:
:return:
"""
return statistics.stdev(nvector)
def elemento_igual(nvector):
"""
(vector) -> vector
>>> elemento_igual([2, 5, 6, 7, 8])
'no hay elementos repetidos'
>>> elemento_igual([1, 2, 1, 5, 2, 1])
[1, 2]
>>> elemento_igual([2, 1, 3, 3, 2])
[3, 2]
:param nvector: ingresamos un vector
:return: retornamos si el vector tiene elementos iguales o un str
"""
repetido = []
unico = []
for i in nvector:
if i not in unico:
unico.append(i)
resultado=('no hay elementos repetidos')
else:
if i not in repetido:
repetido.append(i)
resultado = repetido
return resultado
def Norma_vec(nvector):
"""
(vector) -> num
>>> Norma_vec([2, 4, 1, 5])
6.782329983125268
>>> Norma_vec([2, 3, 2, 1])
4.242640687119285
>>> Norma_vec([1, -3, 5, 2, 2])
6.557438524302
:param nvector: ingresamos un vector el cual recorremos y sumamos
:return: retornamos la factorizacion del vector
"""
añadido = []
contador = 0
while (contador < len(nvector)):
añadido.append((nvector[contador])**2 )
contador += 1
Suma = 0
for i in añadido:
Suma = Suma + i
resultado_norma = math.sqrt(Suma)
return resultado_norma
def Moda_vec(nvector):
"""
(vector) -> vector
>>> Moda_vec([1, 2, 5, 2, 1, 3, 1])
[1]
>>> Moda_vec([2, 1, 2, 2, 5, 3, 3, 1])
[2]
>>> Moda_vec([1, 2, 3, 4, 5, 2, 4, 3])
[2, 3, 4]
:param nvector: ingresamos un vector el cual recoremos y pasamos los datos repetidos a countador
:return: retornamos los datos que mas aparecen en el vector
"""
repeticiones = 0
for i in nvector:
cont = nvector.count(i)
if cont > repeticiones:
repeticiones = cont
modas = []
for i in nvector:
cont = nvector.count(i)
if cont == repeticiones and i not in modas:
modas.append(i)
return modas
| [
"[email protected]"
] | |
530165d7b4ab11307df52f704542b8c2645200ab | d346c1e694e376c303f1b55808d90429a1ad3c3a | /easy/171.title_to_number.py | 286464e4ead7a4e9d3260afe5591ae0245f05c03 | [] | no_license | littleliona/leetcode | 3d06bc27c0ef59b863a2119cd5222dc94ed57b56 | 789d8d5c9cfd90b872be4a4c35a34a766d95f282 | refs/heads/master | 2021-01-19T11:52:11.938391 | 2018-02-19T03:01:47 | 2018-02-19T03:01:47 | 88,000,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | import string
from functools import reduce
class Solution(object):
def titleToNumber(self, s):
"""
:type s: str
:rtype: int
"""
#mine
dict_ = list(string.ascii_uppercase)
sum_ = 0
i = 1
for s_ in s:
sum_ += (dict_.index(s_)+1) * 26 ** (len(s)-i)
i+=1
return sum_
#easy_1
s = s[::-1]
sum = 0
for exp, char in enumerate(s):
sum += (ord(char) - 65 + 1) * (26 ** exp)
return sum
#easy_2
return reduce(lambda x,y:x*26+y,map(lambda x:ord(x)-ord('A')+1,s))
s = Solution()
a = s.titleToNumber('AAB')
print(a) | [
"[email protected]"
] | |
58ef573df239bbd37fb7eed6e14709be9c093eab | 8d8e19a371e32417e460e822e19dca1164d8b19d | /tango_with_django_project/tango_with_django_project/settings.py | 796f22db32c3ed64c514d78aa5df1228d36b5c73 | [] | no_license | JCassiere/tango-django-rango | 9291327881567188a6ebb4320fb5fe379164bc0d | ddb02c7d23450828230bd15e87ac004cf336e8f7 | refs/heads/master | 2021-01-11T16:00:37.657121 | 2017-03-24T00:54:13 | 2017-03-24T00:54:13 | 79,980,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,463 | py | """
Django settings for tango_with_django_project project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR, 'templates')
STATIC_DIR = os.path.join(BASE_DIR, 'static')
MEDIA_DIR = os.path.join(BASE_DIR, 'media')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u4p-@&vd#ux%4+=7fq!jwfx9pvidw!3_tw^k5w@p#$%h9k7+a3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rango',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tango_with_django_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR, ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'tango_with_django_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [STATIC_DIR, ]
# Media files
MEDIA_ROOT = MEDIA_DIR
MEDIA_URL = '/media/'
| [
"[email protected]"
] | |
484129dd68d44c1571c8e745f00a48b2ff2d626a | 2da85bac6fef09678cf4fac2bab7c3d59c0e8f32 | /juego.py | b03ef08df18479b0b5b5b1e64982bf6ba01b4c36 | [] | no_license | TheMindBreaker/PyFinal | dc197f4e12bf2ce8e3b90288489f85bced805464 | bda54f7d5e8cfd155c3992e2157384e203641f0b | refs/heads/master | 2020-03-09T12:24:05.274261 | 2018-04-09T14:32:11 | 2018-04-09T14:32:11 | 128,784,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,754 | py | #Okol & Developingo
import mono
from random import choice
lista_palabras = {
'cpu': 'Unidad central de procesamiento',
'sqli':'Es un error que te permite ejecutar consultas a la base de datos',
'php':'lenguaje de programacion para crear webs dinamicas',
'ruby on rails':'framework web de ruby',
'perl':'papa de PHP',
'django':'framework web de python',
'print':'funcion para imprimir en pantalla en casi cualquier lenguaje de scripting'
}
print ("""
Vamos a jugar ahorcado!!
Listo?
""")
raw_input('Enter para comenzar!')
palabra = choice(lista_palabras.keys())
intentos_lista = ['_']*len(palabra)
intentos_malos = 0
primer_juego = True
while True:
s = ''
for i in intentos_lista:
s += i+','
print s[:-1] #Imprimir la lista con la palabra del usuario
print mono.mono[intentos_malos]
if primer_juego:
print lista_palabras[palabra]
primer_juego = False
intento = raw_input('Intenta: ')
if intento == palabra:
print "Felicidades!! ganaste!!"
print 'La palabra es', palabra
break
elif intento in palabra and len(intento)==1:
for i in range(len(palabra)): #Reemplazar las letras
if palabra[i] == intento:
intentos_lista[i] = intento
#Comprobar que no haya ganado
for i in intentos_lista:
if i == '_':
break
else:
print "Felicidades!! ganaste!!"
print 'La palabra es', palabra
break
else:
intentos_malos += 1
if intentos_malos == 6:
print "PERDISTE!!"
print mono.mono[6]
break
print 'Ups te quedan', 6-intentos_malos, 'intentos'
print "FIN"
| [
"[email protected]"
] | |
b7a736d7e56c2d8ef1f2dcbc2f94322cd31d1168 | 28559fbbf0af5af697d5e8c61e9a1359058ffd09 | /properties_localizer.py | ddbf4f265762604e6880ed5c76a59f3d86188705 | [] | no_license | ralphchristianeclipse/CSVLocalizer | 4cb6d450f9df5d1a4db7782e0fe2c24c15e69dc1 | e0488a5cab69a2cb55fe4fd6c22180a53fb5077d | refs/heads/master | 2020-03-19T07:58:35.098485 | 2018-06-05T08:03:29 | 2018-06-05T08:03:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,837 | py | # -*- coding: utf-8 -*-
import os
import sys
import csv
def start_localize_properties(CURRENT_DIR, BASE_PATH, IN_PATH, OUT_PATH, LANG_KEYS):
base_out_dir = os.path.join(BASE_PATH, OUT_PATH)
# top most
if not os.path.exists(base_out_dir):
os.makedirs(base_out_dir)
# each languages
for lang in LANG_KEYS:
lang_path = os.path.join(base_out_dir, "{0}/".format(lang))
if not os.path.exists(lang_path):
os.makedirs(lang_path)
full_out_paths = [os.path.join(base_out_dir, "{0}/".format(langKey) + "string_{0}.properties".format(langKey)) for langKey in LANG_KEYS]
allwrites = [open(out_path, 'w') for out_path in full_out_paths]
for dirname, dirnames, filenames in os.walk(os.path.join(CURRENT_DIR, IN_PATH)):
for f in filenames:
filename, ext = os.path.splitext(f)
if ext != '.csv':
continue
fullpath = os.path.join(dirname, f)
print 'Localizing: ' + filename + ' ...'
with open(fullpath, 'rb') as csvfile:
[fwrite.write('// {0}\n'.format(filename)) for fwrite in allwrites]
reader = csv.reader(csvfile, delimiter=',')
iterrows = iter(reader);
next(iterrows) # skip first line (it is header).
for row in iterrows:
row_key = row[0]
# comment
if row_key[:2] == '//':
continue
row_values = [row[i+1] for i in range(len(LANG_KEYS))]
# if any row is empty, skip it!
if any([value == "" for value in row_values]):
[fwrite.write('\n') for idx, fwrite in enumerate(allwrites)]
else:
for idx, fwrite in enumerate(allwrites):
current_value = row_values[idx]
[fwrite.write('{key}={lang}\n'.format(key=row_key, lang=current_value))]
[fwrite.close() for fwrite in allwrites]
| [
"[email protected]"
] | |
24df9f3eff000842344a8569ab9b734af225eacd | 5df8dd1803b6b2bdc163f93eda8b59cb85838db0 | /Web_scraping/GetDownloaded/Scraping/torrentmovies.py | 6d6a6094bbf7a5867cbfb11f4faf287fb56b5fad | [] | no_license | sahil-sahu/GetDOWNLOADED | 2b9bba2ee2891266f1eea3338a49287c493e4315 | 812cc9a79af8f8c735666f06228d9a1c0a3c0945 | refs/heads/main | 2023-02-18T18:11:27.114691 | 2021-01-18T01:53:33 | 2021-01-18T01:53:33 | 330,064,184 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,460 | py | import scrapy
from scrapy.crawler import CrawlerProcess
from scrapy.utils.response import open_in_browser
from scrapy.http import FormRequest
dict = {
# 'BOT_NAME' : 'hyper_scraping',
# 'SPIDER_MODULES' : ['hyper_scraping.spiders'],
# 'NEWSPIDER_MODULE' : 'hyper_scraping.spiders',
'ROBOTSTXT_OBEY': False,
'DOWNLOADER_MIDDLEWARES': {
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
'scrapy_user_agents.middlewares.RandomUserAgentMiddleware': 40,
},
}
class QuotesSpider(scrapy.Spider):
name = 'quotes'
start_urls = [
'https://torrentmovies.co/?s=lootcase&ixsl=1',
]
'''def parse(self, response):
for quote in response.css('.imag'):
yield {
'name': quote.css('.search-live-field::attr("title")').get(),
'image': quote.css('.thumbnail img::attr("src")').get(),
}'''
def parse(self, response):
open_in_browser(response)
def parsjje(self, response):
x = input('Enter the Movie you are searching : ')
return FormRequest.from_response(response, formdata={'s': x}, callback=self.temp)
'''next_page = response.css('.a-last a::attr("href")').get()
if next_page is not None:
yield response.follow(next_page, self.parse)'''
process = CrawlerProcess(settings=dict)
process.crawl(QuotesSpider)
process.start()
| [
"[email protected]"
] | |
43312ed31266bee7fde72db44c48a9ee9de84f09 | 902a2f9636d9a435e7b3cb54dc2dda24c12e7677 | /s3_restore.py | c7fe00c263ca9279093435f6e580c839c381ac28 | [
"MIT"
] | permissive | koolhand/s3_glacier_restore | 853ed1850bccdca76d5b59fcb7250e5a2e7d6f61 | 0eea3dcebbd0f9fd9fa64f8e42be468b293d3fc4 | refs/heads/master | 2023-04-10T08:45:08.619778 | 2021-04-23T11:43:41 | 2021-04-23T11:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,865 | py | #!/usr/bin/python3
import sys
import time
import boto3
import queue
import logging
import argparse
import datetime
import threading
import multiprocessing
from os import path
from botocore.exceptions import ClientError
AWS_PROFILE = 'default'
PERCENT_QUEUE = queue.Queue()
def setup_logger(logfile):
logger = logging.getLogger(f's3_restore_{logfile}')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(logfile)
logger.addHandler(fh)
return logger
def read_file(fname):
'''read file per line to array'''
lines = []
with open(fname) as f:
for i, l in enumerate(f):
lines.append(l.replace('\n', ''))
return lines
def chunks(lst, n):
'''generator, yield successive n-sized chunks from lst.'''
for i in range(0, len(lst), n):
yield lst[i:i + n]
def diff(first, second):
'''diff between two arrays'''
second = set(second)
return [item for item in first if item not in second]
def refresh_credentials(thread_id=0):
session = boto3.session.Session(profile_name=AWS_PROFILE)
s3 = session.client('s3')
return s3
def request_retrieval(progress_logger, availability_logger, files, bucket_name, retain_days, tier, chunk_index):
'''
reqest object retrieval from supplied 'files' array
'files' array should contain s3 paths eg. 2018/06/10/file.txt
'''
s3_client = refresh_credentials(chunk_index)
counter = 0
for f in files:
try:
response = s3_client.restore_object(
Bucket=bucket_name,
Key=f,
RestoreRequest={
'Days': int(retain_days),
'GlacierJobParameters': {
'Tier': tier
}
}
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
print(f'{f} already available to download')
availability_logger.info(f)
elif response['ResponseMetadata']['HTTPStatusCode'] == 202:
progress_logger.info(f)
except ClientError as e:
code = e.response['Error']['Code']
if code == 'NoSuchKey':
print(f'{f} not found, skipping')
elif code == 'RestoreAlreadyInProgress':
print(f'{f} restore already in progress, ignoring')
progress_logger.info(f)
elif code == 'ExpiredToken':
s3_client = refresh_credentials(chunk_index)
else:
print(f'{f}: {e}')
counter += 1
actual_percent = counter / len(files)
PERCENT_QUEUE.put([chunk_index, actual_percent])
def check_files_availability(availability_logger, files, bucket_name, chunk_index):
'''does a HEAD request on files array to check if s3 object restore is already complete or is still in progress'''
s3_client = refresh_credentials(chunk_index)
counter = 0
for f in files:
try:
response = s3_client.head_object(Bucket=bucket_name, Key=f)
if 'x-amz-restore' in response['ResponseMetadata']['HTTPHeaders']:
x_amz_restore = response['ResponseMetadata']['HTTPHeaders']['x-amz-restore']
if 'ongoing-request="false"' in x_amz_restore: # false = restore complete, true = restore still in progress
availability_logger.info(f)
except ClientError as e:
code = e.response['Error']['Code']
if code == 'NoSuchKey':
print(f'{f} not found, skipping')
elif code == 'ExpiredToken':
s3_client = refresh_credentials(chunk_index)
else:
print(f'Exception occured: {e}')
counter += 1
actual_percent = counter / len(files)
PERCENT_QUEUE.put([chunk_index, actual_percent])
def print_percent_queue(percent_dict):
while PERCENT_QUEUE.empty() is False:
data = PERCENT_QUEUE.get(timeout=0.1)
percent_dict[data[0]] = data[1]
out_str = ''
total_percent = 0
for chunk_id, percent in percent_dict.items():
percent *= 100
total_percent += percent
out_str += f' T{chunk_id}: {percent}% '
if(len(percent_dict) > 0):
total_percent /= len(percent_dict)
out_str = f'Total: {total_percent:.2f}% [{out_str}]'
print(out_str)
def main_generate_list(bucket):
'''generates a file list from whole bucket (only files in glacier or deep_archive tier)'''
output_filename = f'{bucket}.objects'
if path.exists(output_filename):
input_overwrite_continue = input(f'File {output_filename} already exists and will be overwritten\nContinue? y/[n]: ')
if input_overwrite_continue != 'y':
return
s3_client = refresh_credentials()
glacier_objects = []
print('Listing objects to file')
try:
paginator = s3_client.get_paginator('list_objects_v2')
pages = paginator.paginate(Bucket=bucket)
last_count = 0
for page in pages:
for obj in page['Contents']:
if obj['StorageClass'] == 'GLACIER' or obj['StorageClass'] == 'DEEP_ARCHIVE':
glacier_objects.append(obj['Key'])
if len(glacier_objects) >= last_count+1000:
last_count = len(glacier_objects)
print(f'Found: {last_count}')
except ClientError as e:
print(e)
print(f'Total count: {len(glacier_objects)} glacier/deep_archive objects saved to {output_filename}')
with open(output_filename, 'w') as output_list:
for obj in glacier_objects:
output_list.write(f'{obj}\n')
def main_request_objects_restore(bucket, retain_for, retrieval_tier, thread_count):
object_list_filename = f'{bucket}.objects'
progress_logfile = f'{bucket}.progress'
availability_logfile = f'{bucket}.available'
progress_logger = setup_logger(progress_logfile)
availability_logger = setup_logger(availability_logfile)
progress_log = []
if path.exists(progress_logfile):
progress_log = read_file(progress_logfile)
availability_log = []
if path.exists(availability_logfile):
availability_log = read_file(availability_logfile)
print('')
lines = read_file(object_list_filename)
if len(progress_log) > 0:
prev_len = len(lines)
lines = diff(lines, progress_log)
print(f'Progress log found. Skipping {prev_len - len(lines)} entries')
if len(availability_log) > 0:
prev_len = len(lines)
lines = diff(lines, availability_log)
print(f'Availability log found. Skipping {prev_len - len(lines)} entries (restore is complete on these files)')
if len(lines) == 0:
print('All objects already requested, nothing to do')
sys.exit(1)
print(f'Will have to process {len(lines)} files')
if len(lines) < int(thread_count):
thread_count = len(lines)
split_by = max(int(len(lines) / int(thread_count)), 1)
est_hours = len(lines)/int(thread_count)/5/60/60 # 5 -> single thread can request approx 5 objects/s
est_hours_format = str(datetime.timedelta(hours=est_hours)).split('.')[0]
print(f'{thread_count} threads, {split_by} files per thread')
if input(f'This will take approximately { est_hours_format }\nContinue? (y/[n]): ') != 'y':
sys.exit(1)
threads = []
timer_start = time.time()
chunk_index = 0
for chunk in chunks(lines, split_by):
t = threading.Thread(target=request_retrieval, args=(progress_logger, availability_logger, chunk, bucket, retain_for, retrieval_tier, chunk_index), daemon=True)
t.start()
threads.append(t)
chunk_index += 1
percent_dict = {}
while any(thread.is_alive() for thread in threads):
print_percent_queue(percent_dict)
time.sleep(1)
print_percent_queue(percent_dict)
exec_time = str((time.time()-timer_start)).split('.')[0]
print(f'Execution took {exec_time}s')
def main_check_restore_status(bucket, thread_count):
object_list_filename = f'{bucket}.objects'
availability_logfile = f'bucket_{bucket}.available'
availability_logger = setup_logger(availability_logfile)
availability_log = []
file_list = []
if not path.exists(object_list_filename):
print(f'{object_list_filename} not found. Cancelling')
print('If you dont have any file with path list, run `Generate file list` option first')
return
print('')
file_list = read_file(object_list_filename)
if path.exists(availability_logfile):
availability_log = read_file(availability_logfile)
if len(availability_log) > 0:
prev_len = len(file_list)
file_list = diff(file_list, availability_log)
print(f'Availability log found. Skipping {prev_len - len(file_list)} entries (these files are ready for download)')
print(f'Will have to process {len(file_list)} files')
split_by = max(int(len(file_list) / int(thread_count)), 1)
est_hours = len(file_list)/int(thread_count)/14/60/60 # 5 -> single thread can request approx 14 objects/s
est_hours_format = str(datetime.timedelta(hours=est_hours)).split('.')[0]
print(f'{thread_count} threads, {split_by} files per thread')
if input(f'This will take approximately { est_hours_format }\nContinue? (y/[n]): ') != 'y':
sys.exit(1)
threads = []
timer_start = time.time()
chunk_index = 0
for chunk in chunks(file_list, split_by):
t = threading.Thread(target=check_files_availability, args=(availability_logger, chunk, bucket, chunk_index), daemon=True)
t.start()
threads.append(t)
chunk_index += 1
percent_dict = {}
while any(thread.is_alive() for thread in threads):
print_percent_queue(percent_dict)
time.sleep(0.1)
print_percent_queue(percent_dict)
print(f'Execution took {time.time()-timer_start}')
print('')
new_availability_list = read_file(availability_logfile)
new_file_list = read_file(object_list_filename)
print(f'{len(new_availability_list)} files are restored and ready for download')
print(f'{len(new_file_list)-len(new_availability_list)} files is still being restored')
def main():
global AWS_PROFILE
parser = argparse.ArgumentParser()
parser.add_argument('--bucket', required=True)
parser.add_argument('--aws-profile', default='default')
subparsers = parser.add_subparsers(dest='subcommand', required=True)
subparsers.add_parser('generate-object-list')
request_parser = subparsers.add_parser('request-objects-restore')
request_parser.add_argument('--retain-for', required=True, help='How long to keep objects restored')
request_parser.add_argument('--retrieval-tier', default='Standard', choices=['Standard', 'Bulk', 'Expedited'])
request_parser.add_argument('--thread-count', default=int(multiprocessing.cpu_count()))
check_parser = subparsers.add_parser('check-objects-status')
check_parser.add_argument('--thread-count', default=int(multiprocessing.cpu_count()))
args = parser.parse_args()
AWS_PROFILE = args.aws_profile
if args.subcommand == 'generate-object-list':
print('Command: Generate list of objects to restore from specified S3 bucket')
main_generate_list(args.bucket)
elif args.subcommand == 'request-objects-restore':
print('Command: Request restoration of objects')
main_request_objects_restore(args.bucket, args.retain_for, args.retrieval_tier, args.thread_count)
elif args.subcommand == 'check-objects-status':
print('Command: Check objects status to verify completeness')
main_check_restore_status(args.bucket, args.thread_count)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
9ff7493a5a324afa3b22631ef92d3ba9fe9cc82d | 2dc17d12ff6ea9794177c81aa4f385e4e09a4aa5 | /archive/90SubsetsII.py | 0f633535858662c676b52cec7c6c4552f462aadf | [] | no_license | doraemon1293/Leetcode | 924b19f840085a80a9e8c0092d340b69aba7a764 | 48ba21799f63225c104f649c3871444a29ab978a | refs/heads/master | 2022-10-01T16:20:07.588092 | 2022-09-08T02:44:56 | 2022-09-08T02:44:56 | 122,086,222 | 0 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 596 | py | # coding=utf-8
'''
Created on 2017�5�31�
@author: Administrator
'''
class Solution(object):
def subsetsWithDup(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
self.ans = []
def dfs(cur, nums, st):
self.ans.append(cur)
# print cur
for i in range(st + 1, len(nums)):
dfs(cur + [nums[i]], nums, i)
dfs([], nums, -1)
self.ans = map(lambda x:list(x), self.ans)
return self.ans
nums = [1, 2, 2]
print Solution().subsetsWithDup(nums)
| [
"[email protected]"
] | |
93ffe572cb1bc35c608e4df022228398ffaf503a | 968092a84a126f40df318450f5b5b8cfb8c5603e | /model.py | ebde0bf900729b68ba93e4048bf606db733a929f | [] | no_license | keep-innovation/ner | 3174fd8ea88483a4e64347bb80edb2f5ee8005b6 | 7b67ac5aaf143ab16bcc37a5efc203bd5c5dda3c | refs/heads/main | 2023-06-15T12:31:31.899933 | 2021-07-14T15:26:41 | 2021-07-14T15:26:41 | 385,986,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,660 | py | from config import Config
import tensorflow as tf
# import tensorflow.compat.v1 as tf
if Config().import_name == 'electra':
# eletra
from tf_utils.electra.model.modeling import BertModel, BertConfig, get_assignment_map_from_checkpoint
from tf_utils.electra.util import training_utils
from tf_utils.electra import configure_finetuning
elif Config().import_name == 'nazhe':
from tf_utils.nezha.modeling import BertModel, BertConfig, get_assignment_map_from_checkpoint # Nezha
else:
from tf_utils.bert_modeling import BertModel, BertConfig, get_assignment_map_from_checkpoint # roberta
from tensorflow.contrib.crf import crf_log_likelihood
from tensorflow.contrib.layers.python.layers import initializers
class Model:
def __init__(self, config):
self.config = config
# 喂入模型的数据占位符
# self.input_x_word = tf.placeholder(tf.int32, [None, None], name="input_x_word")
self.input_x_len = tf.placeholder(tf.int32, name='input_x_len')
self.segment_ids = tf.placeholder(tf.int32, [config.batch_size, config.sequence_length], name="token_ids_type")
# self.input_mask = tf.placeholder(tf.int32, [None, None], name='input_mask')
# self.input_relation = tf.placeholder(tf.int32, [None, None], name='input_relation') # 实体NER的真实标签
self.input_x_word = tf.placeholder(tf.int32, [config.batch_size, config.sequence_length], name="input_x_word")
self.input_mask = tf.placeholder(tf.int32, [config.batch_size, config.sequence_length], name='input_mask')
self.input_relation = tf.placeholder(tf.int32, [config.batch_size, config.sequence_length], name='input_relation') # 实体NER的真实标签
self.keep_prob = tf.placeholder(tf.float32, name='dropout_keep_prob')
self.is_training = tf.placeholder(tf.bool, None, name='is_training')
# BERT Embedding
self.init_embedding(bert_init=True)
output_layer = self.word_embedding
# 超参数设置
self.relation_num = self.config.relation_num
self.initializer = initializers.xavier_initializer()
self.embed_dense_dim = self.config.embed_dense_dim
self.dropout = self.config.dropout
self.model_type = self.config.model_type
print('Run Model Type:', self.model_type)
# idcnn的超参数
self.layers = [
{
'dilation': 1
},
{
'dilation': 1
},
{
'dilation': 2
},
]
self.filter_width = 3
self.num_filter = self.config.lstm_dim
self.embedding_dim = self.embed_dense_dim
self.repeat_times = 4
self.cnn_output_width = 0
# CRF超参数
used = tf.sign(tf.abs(self.input_x_word))
length = tf.reduce_sum(used, reduction_indices=1)
self.lengths = tf.cast(length, tf.int32)
self.batch_size = tf.shape(self.input_x_word)[0]
self.num_steps = tf.shape(self.input_x_word)[-1]
if self.model_type == 'bilstm':
lstm_inputs = tf.nn.dropout(output_layer, self.config.dropout)
# bi-directional lstm layer
bilstm_cell_fw = tf.contrib.rnn.LSTMCell(self.config.lstm_dim, name='fw') # 参数可调试
bilstm_cell_bw = tf.contrib.rnn.LSTMCell(self.config.lstm_dim, name='bw') # 参数可调试
output_layer_1 = tf.nn.bidirectional_dynamic_rnn(cell_fw=bilstm_cell_fw,
cell_bw=bilstm_cell_bw,
inputs=lstm_inputs,
sequence_length=None,
dtype=tf.float32)[0]
model_outputs = tf.concat([output_layer_1[0], output_layer_1[1]], axis=-1)
self.logits = self.project_layer(model_outputs)
elif self.model_type == 'gru':
print(self.model_type)
gru_inputs = tf.nn.dropout(output_layer, config.dropout)
# bi-directional gru layer
GRU_cell_fw = tf.contrib.rnn.GRUCell(config.gru_num) # 参数可调试
# 后向
GRU_cell_bw = tf.contrib.rnn.GRUCell(config.gru_num) # 参数可调试
output_layer_1 = tf.nn.bidirectional_dynamic_rnn(cell_fw=GRU_cell_fw,
cell_bw=GRU_cell_bw,
inputs=gru_inputs,
sequence_length=None,
dtype=tf.float32)[0]
model_outputs = tf.concat([output_layer_1[0], output_layer_1[1]], axis=-1)
self.logits = self.project_layer(model_outputs)
elif self.model_type == 'idcnn':
model_inputs = tf.nn.dropout(output_layer, self.dropout)
model_outputs = self.IDCNN_layer(model_inputs)
self.logits = self.project_layer_idcnn(model_outputs)
else:
raise KeyError
# 计算损失
self.loss = self.loss_layer(self.logits, self.lengths)
def project_layer(self, lstm_outputs, name=None):
"""
hidden layer between lstm layer and logits
:param lstm_outputs: [batch_size, num_steps, emb_size]
:return: [batch_size, num_steps, num_tags]
"""
with tf.name_scope("project" if not name else name):
with tf.name_scope("hidden"):
W = tf.get_variable("HW", shape=[self.config.lstm_dim * 2, self.config.lstm_dim],
dtype=tf.float32, initializer=self.initializer)
b = tf.get_variable("Hb", shape=[self.config.lstm_dim], dtype=tf.float32,
initializer=tf.zeros_initializer())
output = tf.reshape(lstm_outputs, shape=[-1, self.config.lstm_dim * 2])
hidden = tf.tanh(tf.nn.xw_plus_b(output, W, b))
# project to score of tags
with tf.name_scope("logits"):
W = tf.get_variable("LW", shape=[self.config.lstm_dim, self.relation_num],
dtype=tf.float32, initializer=self.initializer)
b = tf.get_variable("Lb", shape=[self.relation_num], dtype=tf.float32,
initializer=tf.zeros_initializer())
pred = tf.nn.xw_plus_b(hidden, W, b)
return tf.reshape(pred, [-1, self.num_steps, self.relation_num], name='pred_logits')
def IDCNN_layer(self, model_inputs, name=None):
"""
:param idcnn_inputs: [batch_size, num_steps, emb_size]
:return: [batch_size, num_steps, cnn_output_width]
"""
model_inputs = tf.expand_dims(model_inputs, 1)
with tf.variable_scope("idcnn" if not name else name):
shape = [1, self.filter_width, self.embedding_dim,
self.num_filter]
print(shape)
filter_weights = tf.get_variable(
"idcnn_filter",
shape=[1, self.filter_width, self.embedding_dim, self.num_filter],
initializer=self.initializer
)
layerInput = tf.nn.conv2d(model_inputs,
filter_weights,
strides=[1, 1, 1, 1],
padding="SAME",
name="init_layer")
finalOutFromLayers = []
totalWidthForLastDim = 0
for j in range(self.repeat_times):
for i in range(len(self.layers)):
dilation = self.layers[i]['dilation']
isLast = True if i == (len(self.layers) - 1) else False
with tf.variable_scope("atrous-conv-layer-%d" % i,
reuse=tf.AUTO_REUSE):
w = tf.get_variable(
"filterW",
shape=[1, self.filter_width, self.num_filter,
self.num_filter],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable("filterB", shape=[self.num_filter])
conv = tf.nn.atrous_conv2d(layerInput,
w,
rate=dilation,
padding="SAME")
conv = tf.nn.bias_add(conv, b)
conv = tf.nn.relu(conv)
if isLast:
finalOutFromLayers.append(conv)
totalWidthForLastDim += self.num_filter
layerInput = conv
finalOut = tf.concat(axis=3, values=finalOutFromLayers)
keepProb = tf.cond(self.is_training, lambda: 0.8, lambda: 1.0)
# keepProb = 1.0 if reuse else 0.5
finalOut = tf.nn.dropout(finalOut, keepProb)
finalOut = tf.squeeze(finalOut, [1])
finalOut = tf.reshape(finalOut, [-1, totalWidthForLastDim])
self.cnn_output_width = totalWidthForLastDim
return finalOut
def project_layer_idcnn(self, idcnn_outputs, name=None):
"""
:param lstm_outputs: [batch_size, num_steps, emb_size]
:return: [batch_size, num_steps, num_tags]
"""
with tf.name_scope("project" if not name else name):
# project to score of tags
with tf.name_scope("logits"):
W = tf.get_variable("PLW", shape=[self.cnn_output_width, self.relation_num],
dtype=tf.float32, initializer=self.initializer)
b = tf.get_variable("PLb", initializer=tf.constant(0.001, shape=[self.relation_num]))
pred = tf.nn.xw_plus_b(idcnn_outputs, W, b)
return tf.reshape(pred, [-1, self.num_steps, self.relation_num], name='pred_logits')
def loss_layer(self, project_logits, lengths, name=None):
"""
计算CRF的loss
:param project_logits: [1, num_steps, num_tags]
:return: scalar loss
"""
with tf.name_scope("crf_loss" if not name else name):
small = -1000.0
# pad logits for crf loss
start_logits = tf.concat(
[small * tf.ones(shape=[self.batch_size, 1, self.relation_num]), tf.zeros(shape=[self.batch_size, 1, 1])],
axis=-1)
pad_logits = tf.cast(small * tf.ones([self.batch_size, self.num_steps, 1]), tf.float32)
logits = tf.concat([project_logits, pad_logits], axis=-1)
logits = tf.concat([start_logits, logits], axis=1)
targets = tf.concat(
[tf.cast(self.relation_num * tf.ones([self.batch_size, 1]), tf.int32), self.input_relation], axis=-1)
self.trans = tf.get_variable(
name="transitions",
shape=[self.relation_num + 1, self.relation_num + 1], # 1
# shape=[self.relation_num, self.relation_num], # 1
initializer=self.initializer)
log_likelihood, self.trans = crf_log_likelihood(
inputs=logits,
tag_indices=targets,
# tag_indices=self.input_relation,
transition_params=self.trans,
# sequence_lengths=lengths
sequence_lengths=lengths + 1
) # + 1
return tf.reduce_mean(-log_likelihood, name='loss')
def init_embedding(self, bert_init=True):
"""
对BERT的Embedding降维
:param bert_init:
:return:
"""
with tf.name_scope('embedding'):
word_embedding = self.bert_embed(bert_init)
print('self.embed_dense_dim:', self.config.embed_dense_dim)
word_embedding = tf.layers.dense(word_embedding, self.config.embed_dense_dim, activation=tf.nn.relu)
hidden_size = word_embedding.shape[-1].value
self.word_embedding = word_embedding
print(word_embedding.shape)
self.output_layer_hidden_size = hidden_size
def bert_embed(self, bert_init=True):
"""
读取BERT的TF模型
:param bert_init:
:return:
"""
bert_config_file = self.config.bert_config_file
bert_config = BertConfig.from_json_file(bert_config_file)
# batch_size, max_seq_length = get_shape_list(self.input_x_word)
# bert_mask = tf.pad(self.input_mask, [[0, 0], [2, 0]], constant_values=1) # tensor左边填充2列
if self.config.import_name == 'electra':
bert_config = training_utils.get_bert_config(configure_finetuning.FinetuningConfig(
model_name=self.config.bert_file, data_dir=self.config.vocab_file
))
model = BertModel(
bert_config=bert_config, # electra
is_training=self.is_training, # 微调
input_ids=self.input_x_word,
input_mask=self.input_mask,
token_type_ids=self.segment_ids,
use_one_hot_embeddings=False)
else:
model = BertModel(
config=bert_config, # nezha and roberta
is_training=self.is_training, # 微调
input_ids=self.input_x_word,
input_mask=self.input_mask,
token_type_ids=self.segment_ids,
use_one_hot_embeddings=False)
layer_logits = []
print(model.get_all_encoder_layers())
if self.config.import_name == 'electra':
layer_logits = tf.layers.dense(
model.get_all_encoder_layers(), 1,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name="all_layer_logit"
)
print('-' * 100)
layer_dist = tf.nn.softmax(layer_logits)
print(layer_dist.shape)
layer_dist = tf.transpose(layer_dist, [1, 2, 3, 0]) # 转置矩阵
pooled_output = tf.matmul(layer_dist, tf.transpose(model.get_all_encoder_layers(), [1, 2, 0, 3]))
print(pooled_output.shape)
pooled_output = tf.squeeze(pooled_output, axis=2)
pooled_layer = pooled_output
print(pooled_layer.shape)
char_bert_outputs = pooled_layer
else:
for i, layer in enumerate(model.all_encoder_layers): # nezha and roberta
layer_logits.append(
tf.layers.dense(
layer, 1,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name="layer_logit%d" % i
)
)
layer_logits = tf.concat(layer_logits, axis=2) # 第三维度拼接
layer_dist = tf.nn.softmax(layer_logits)
seq_out = tf.concat([tf.expand_dims(x, axis=2) for x in model.all_encoder_layers], axis=2)
pooled_output = tf.matmul(tf.expand_dims(layer_dist, axis=2), seq_out)
pooled_output = tf.squeeze(pooled_output, axis=2)
pooled_layer = pooled_output
# char_bert_outputs = pooled_laRERyer[:, 1: max_seq_length - 1, :] # [batch_size, seq_length, embedding_size]
char_bert_outputs = pooled_layer
if self.config.use_origin_bert:
final_hidden_states = model.get_sequence_output() # 原生bert
self.config.embed_dense_dim = 768
else:
final_hidden_states = char_bert_outputs # 多层融合bert
self.config.embed_dense_dim = 512
tvars = tf.trainable_variables()
init_checkpoint = self.config.bert_file # './chinese_L-12_H-768_A-12/bert_model.ckpt'
assignment_map, initialized_variable_names = get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if bert_init:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
print(" name = {}, shape = {}{}".format(var.name, var.shape, init_string))
print('init bert from checkpoint: {}'.format(init_checkpoint))
return final_hidden_states | [
"[email protected]"
] | |
c777f93db227dc48d35f338270200f1c54d3dda4 | 1e7f48c3cc9173525a7171afabcc3af934ef8e7a | /Server.py | 0d9b91db99b983d410e8643c2ff6e488833de86b | [] | no_license | mcaim/Python-Email | a8bab4cb62abc20d3022bdc7dd16867efca4ccea | a2153550f86697fffd4d2f1354f330a0c34c87c3 | refs/heads/master | 2020-05-05T11:15:58.778559 | 2019-04-07T15:04:50 | 2019-04-07T15:04:50 | 179,982,141 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,047 | py | __author__ = 'Aidan McRitchie, [email protected], Onyen = mcaim'
import re
import sys
import string
#import _curses.ascii
from socket import *
special_characters = '"<>()[]\\.,;:@"'
space = ' '
printset = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&'()*+,-./:;<=>?@[\]^_`{|}~ "
syntax_501 = '501 Syntax error in parameters or arguments'
syntax_500 = '500 Syntax error: command unrecognized'
syntax_503 = '503 Bad sequence of commands'
ok = '250 ok'
data_ok = '354 Start mail input; end with <CRLF>.<CRLF>'
def mailfromcmd(line_list, line):
if line_list[0] != 'M':
return False
elif line_list[1] != 'A':
return False
elif line_list[2] != 'I':
return False
elif line_list[3] != 'L':
return False
if (whitespace(line_list[4]) == False):
return
i = 4
while whitespace(line_list[i]):
i = i+1
if line_list[i] != 'F':
return
else:
i = i+1
if line_list[i] != 'R':
return
else:
i = i+1
if line_list[i] != 'O':
return
else:
i = i+1
if line_list[i] != 'M':
return
else:
i = i+1
if line_list[i] != ':':
return
else:
i = i+1
while nullspace(line_list[i]):
i = i+1
# returns false or the index if true
current_index = rvspath(line_list, i)
if not current_index:
return
if line_list[current_index] == '\n':
return line[i+1:current_index - 1]
nullindex = current_index
#finds any nullspace after
while nullspace(line_list[nullindex]):
nullindex = nullindex + 1
if line_list[nullindex] != '\n':
#print_error('CLRF')
return False
#print('250 ok')
return line[i+1:current_index-1]
def rcpttocmd(line_list, line):
line_list = list(line)
if line_list[0] != 'R':
return False
elif line_list[1] != 'C':
return False
elif line_list[2] != 'P':
return False
elif line_list[3] != 'T':
return False
if (whitespace(line_list[4]) == False):
return False
i = 4
while whitespace(line_list[i]):
i = i + 1
if line_list[i] != 'T':
return False
else:
i = i + 1
if line_list[i] != 'O':
return False
else:
i = i + 1
if line_list[i] != ':':
return False
else:
i = i+1
while nullspace(line_list[i]):
i = i+1
# returns false or the index if true
current_index = rvspath(line_list, i)
if not current_index:
return
if line_list[current_index] == '\n':
return line[i + 1:current_index - 1]
nullindex = current_index
# finds any nullspace after
while nullspace(line_list[nullindex]):
nullindex = nullindex + 1
if line_list[nullindex] != '\n':
# print_error('CLRF')
return False
# print('250 ok')
return line[i + 1:current_index - 1]
def datacmd(line_list,line):
line_list = list(line)
if line_list[0] != 'D':
return False
elif line_list[1] != 'A':
return False
elif line_list[2] != 'T':
return False
elif line_list[3] != 'A':
return False
i = 4
if nullspace(line_list[4]):
while nullspace(line_list[i]):
i = i + 1
elif line_list[4] != '\n':
print('500 Syntax error: command unrecognized')
return False
#while nullspace(line_list[i]):
# i = i+1
if line_list[i] == '\n':
return True
else:
print('501 Syntax error in parameters or arguments')
return False
def whitespace(token):
if sp(token):
return True
else:
return False
def sp(token):
if token == ' ' or token == '\t':
return True
else:
return False
def nullspace(token):
if null(token) or whitespace(token):
return True
else:
return False
def null(token):
if token == '':
return True
else:
return False
def rvspath(token,index):
# if path true, return index, else return false
return path(token,index)
#can't have error here
def path(token,index):
current = index
if token[index] != '<':
#print_error('path')
return False
#if mailbox is valid returns current index, else returns false
current = mailbox(token,current+1)
if not current:
return False
# checks for last part of path
if token[current] != '>':
#print_error('path')
return False
return current + 1
def mailbox(token,index):
current = index
#check for local part has at least 1 char
if not localpart(token[current]):
#print_error('char')
return False
# looks for more valid chars in local part
while localpart(token[current]):
current = current + 1
#once the current index is not a valid char, checks to see if next index is @
if token[current] != '@':
#print_error('mailbox')
return False
current += 1
# now looking at domain
current = domain(token,current)
# if some part of domain was false, return false, else return current index
if not current:
return False
return current
def localpart(char):
specials = special_characters + space
if char in specials:
return False
if len(char) != len(char.encode()):
return False
printables = string.printable
if not (char in printables):
return False
if char == '\n':
return False
if char == '\t':
return False
# apparently doesn't work in python 2
'''if not char.isprintable():
return False'''
return True
def domain(token,index):
current = index
#check current index = letter
if not letter(token[current]):
#print_error('letter')
return False
# check next = letter/digit
current += 1
while letter(token[current]) or digit(token[current]):
current += 1
# check for ., if so, call domain again
if token[current] == '.':
current = domain(token,current+1)
# if no ., return index
return current
def letter(token):
pattern = re.compile('^[A-Za-z]')
if str(re.match(pattern,token)) == 'None':
return False
else:
return True
def digit(token):
pattern = re.compile('^[0-9]')
if str(re.match(pattern,token)) == 'None':
return False
else:
return True
def print_error(error):
print('ERROR -- '+error)
def parse():
for line in sys.stdin:
if line =='':
break
line_list = list(line)
if '\n' in line:
sys.stdout.write(line)
mailfromcmd(line_list,line)
else:
break
def mailcommand(line):
line_list = list(line)
if line_list[0] != 'M':
return False
elif line_list[1] != 'A':
return False
elif line_list[2] != 'I':
return False
elif line_list[3] != 'L':
return False
if (whitespace(line_list[4]) == False):
return False
i = 4
while whitespace(line_list[i]):
i = i+1
if line_list[i] != 'F':
return False
else:
i = i+1
if line_list[i] != 'R':
return False
else:
i = i+1
if line_list[i] != 'O':
return False
else:
i = i+1
if line_list[i] != 'M':
return False
else:
i = i+1
if line_list[i] != ':':
return False
else:
return True
def rcptcommand(line):
line_list = list(line)
if line_list[0] != 'R':
return False
elif line_list[1] != 'C':
return False
elif line_list[2] != 'P':
return False
elif line_list[3] != 'T':
return False
if (whitespace(line_list[4]) == False):
return False
i = 4
while whitespace(line_list[i]):
i = i + 1
if line_list[i] != 'T':
return False
else:
i = i + 1
if line_list[i] != 'O':
return False
else:
i = i + 1
if line_list[i] != ':':
return False
else:
return True
def datacommand(line):
line_list = list(line)
if line_list[0] != 'D':
return False
elif line_list[1] != 'A':
return False
elif line_list[2] != 'T':
return False
elif line_list[3] != 'A':
return False
else:
return True
def testrcpt():
line = read()
line_list = list(line)
sys.stdout.write(line)
print(rcptcommand(line))
# waiting for valid mail command
def waitformail(connectionSocket):
# state starts in mail from
state = 'mail from'
while True:
line = connectionSocket.recv(1024).decode() + '\n'
if line == 'QUIT\n':
quit_resp = '221 classroom.cs.unc.edu'
connectionSocket.send(quit_resp.encode())
connectionSocket.close()
#start_restart(get_serverSocket())
line_list = list(line)
#print(line_list)
#sys.stdout.write(line)
if not mailcommand(line):
if rcptcommand(line):
connectionSocket.send(syntax_503.encode())
continue
elif datacommand(line):
connectionSocket.send(syntax_503.encode())
continue
else:
connectionSocket.send(syntax_500.encode())
continue
if not mailfromcmd(line_list,line):
connectionSocket.send(syntax_501.encode())
else:
connectionSocket.send(ok.encode())
break
return True,mailfromcmd(line_list,line)
def waitforrcpt(connectionSocket):
while True:
line = connectionSocket.recv(1024).decode() + '\n'
if line == 'QUIT\n':
quit_resp = '221 classroom.cs.unc.edu'
connectionSocket.send(quit_resp.encode())
connectionSocket.close()
#start_restart(get_serverSocket())
line_list = list(line)
if not rcptcommand(line):
if mailcommand(line):
connectionSocket.send(syntax_503.encode())
continue
elif datacommand(line):
connectionSocket.send(syntax_503.encode())
continue
else:
connectionSocket.send(syntax_500.encode())
continue
if not rcpttocmd(line_list,line):
connectionSocket.send(syntax_501.encode())
else:
connectionSocket.send(ok.encode())
break
return True,rcpttocmd(line_list,line)
def socket_keeper():
return socket
def waitforrcptordata(connectionSocket):
data_or_rcpt = ''
while True:
line = connectionSocket.recv(1024).decode() + '\n'
if line == 'QUIT\n':
quit_resp = '221 classroom.cs.unc.edu'
connectionSocket.send(quit_resp.encode())
connectionSocket.close()
#start_restart(get_serverSocket())
line_list = list(line)
if not rcptcommand(line):
if mailcommand(line):
connectionSocket.send(syntax_503.encode())
continue
elif datacommand(line):
if not datacmd(line_list, line):
#print('500 Syntax error: command unrecognized')
continue
else:
#print('250 OK')
data_or_rcpt = "DATA"
break
else:
connectionSocket.send(syntax_500.encode())
continue
if not rcpttocmd(line_list,line):
connectionSocket.send(syntax_501.encode())
else:
data_or_rcpt = "RCPT"
break
return data_or_rcpt,rcpttocmd(line_list,line)
def waitforDATA(connectionSocket):
while True:
line = connectionSocket.recv(1024).decode()
line_list = list(line)
if not datacommand(line):
if rcptcommand(line):
print('503 Bad sequence of commands')
continue
elif mailcommand(line):
print('503 Bad sequence of commands')
continue
else:
print('500 Syntax error: command unrecognized')
continue
if not datacommand(line_list,line):
print('501 Syntax error in parameters or arguments')
else:
break
return True
##### SUPER IMPORTANT FUNCTION ####
# reads one line of stdin at a time
# checks for EOF and kills program when found
def read():
line = sys.stdin.readline()
if line == '':
sys.exit(0)
return line
# starts State Machine.... writes file then calls itself if valid email sequence reached
def start(connectionSocket,serverSocket):
#goal: pull out data from socket communication with client
mailfrom = ''
recipients = []
lines = ''
# initialize mail from state
bool, mailfrom = waitformail(connectionSocket)
#keep checking until valid mail from
while not bool:
bool, mailfrom = waitformail(connectionSocket)
bool, mailto = waitforrcpt(connectionSocket)
# keep checking until valid rcpt to
while not bool:
bool, mailto = waitforrcpt(connectionSocket)
# add rcpt to list of recipients
recipients.append(mailto)
# keep checking for either valid rcpt or valid data
# if another rcpt keep checking until data
while True:
rcpt_or_data,mailto = waitforrcptordata(connectionSocket)
# keep looking for valid rcpt or data command
while rcpt_or_data != 'DATA' and rcpt_or_data != 'RCPT':
rcpt_or_data = waitforrcptordata(connectionSocket)
# if line == rcpt
if rcpt_or_data == 'RCPT':
recipients.append(mailto)
connectionSocket.send(ok.encode())
# don't break cause could be more rcpt's
# if line == data
if rcpt_or_data == 'DATA':
connectionSocket.send(data_ok.encode())
break
# look for data and append to list
data = connectionSocket.recv(1024).decode()
lines = data.split('\n')
connectionSocket.send(ok.encode())
# file stuff
for recipient in recipients:
split = recipient.split('@')[1]
# make new file string for each recipient, writes string (file) to output at end
file = ''
# open new file...if already created, append new data to same file
output = open("./forward/{split}".format(split=split), "a+")
for line in lines:
if line == '.':
break
file += line + '\n'
output.write(file)
output.close()
#line = connectionSocket.recv(1024).decode() + '\n'
quit_resp = '221 classroom.cs.unc.edu'
connectionSocket.send(quit_resp.encode())
connectionSocket.close()
start_restart(serverSocket)
def data(connectionSocket):
line = connectionSocket.recv(1024).decode() + '\n'
print(line)
if line == 'QUIT\n':
quit_resp = '221 classroom.cs.unc.edu'
connectionSocket.send(quit_resp.encode())
connectionSocket.close()
server_start(get_port())
if line == '.\n':
print('here')
connectionSocket.send(ok.encode())
return True,''
return False,line
def parse_helo(msg,serverSocket):
if msg[0:5] != 'HELO ':
serverSocket.close()
start_restart(serverSocket)
if domain(msg, 5) == False:
serverSocket.close()
start_restart(serverSocket)
if msg[-1] != '\n':
serverSocket.close()
start_restart(serverSocket)
serverSocket.send((msg.strip('\n') + 'pleased to meet you\n').encode())
# loops server
def start_restart(serverSocket):
connectionSocket, addr = serverSocket.accept()
greeting = '220 classroom.cs.unc.edu'
# sends greeting to client
connectionSocket.send(greeting.encode())
# client send helo
helo = connectionSocket.recv(1024).decode()
parse_helo(helo,connectionSocket)
start(connectionSocket,serverSocket)
connectionSocket.close()
start_restart(serverSocket)
port = 8623
def set_port(port):
port = port
def get_port():
return port
def server_start(port):
serverPort = port
serverSocket = socket(AF_INET, SOCK_STREAM)
serverSocket.bind(('', serverPort))
serverSocket.listen(1)
#print('Server is ready')
start_restart(serverSocket)
def main():
port = int(sys.argv[1])
set_port(port)
server_start(port)
# start state machine...everything handled by this function
start(port)
main()
#print(False == False)
'''
MAIL FROM:<K0~ @N2X5R2>
MAIL FROM: <[email protected]>
MAIL FROM: <4j@e5.F7.Dq>
MAIL FROM: <g@c1x>
MAIL FROM:</ s@T6t>
MAIL FROM: <#@b3>
MAIL FROM:<X@K5dJ>
MAIL FROM: <[email protected]>
MAIL FROM:<D@qx>
MAIL FROM:<@S08>
MAIL FROM:<[email protected]>
'''
| [
"[email protected]"
] | |
6dfbf07b6dea937b2bc5553d2aa83cbd9c13c8b0 | 9dfbe905aae1478318ca9a581a7e97e69b749d7d | /python/day04.py | 48a3611dc7724429c24444617fac970a3bb42ce8 | [] | no_license | chriscummings100/aoc2020 | 41c738b15731a0cd930a249586584cb1a943604e | 592084ccf967f80c3a46f89bae7b7b0fe9fc1ff9 | refs/heads/main | 2023-01-29T05:23:33.917324 | 2020-12-12T17:17:26 | 2020-12-12T17:17:26 | 320,051,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,632 | py | import re
curr_entry = {}
all_entries = []
with open("day04input.txt") as f:
while True:
line = f.readline()
if not line:
break
line = line.strip()
if len(line) == 0:
all_entries.append(curr_entry)
curr_entry = {}
else:
line_entries = line.split(" ")
for x in line_entries:
matches = re.match(r"(\w\w\w)\:(.*)", x)
curr_entry[matches.group(1)] = matches.group(2)
all_entries.append(curr_entry)
valid = 0
#byr (Birth Year) - four digits; at least 1920 and at most 2002.
#iyr (Issue Year) - four digits; at least 2010 and at most 2020.
#eyr (Expiration Year) - four digits; at least 2020 and at most 2030.
#hgt (Height) - a number followed by either cm or in:
#If cm, the number must be at least 150 and at most 193.
#If in, the number must be at least 59 and at most 76.
#hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f.
#ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth.
#pid (Passport ID) - a nine-digit number, including leading zeroes.
#cid (Country ID) - ignored, missing or not.
def checkyear(val, min, max):
if not val:
return False
if not re.match(r"^\d\d\d\d$",val):
print(val)
return False
ival = int(val)
if ival < min or ival > max:
return False
return True
def checkheight(val):
if not val:
return False
match = re.match(r"^(\d+)([a-z]{2})$", val)
if not match:
return False
if match.group(2) == "cm":
val = int(match.group(1))
return val >= 150 and val <= 193
elif match.group(2) == "in":
val = int(match.group(1))
return val >= 59 and val <= 76
else:
return False
def checkhair(val):
if not val:
return False
return re.match(r"^\#[0-9a-f]{6}$",val)
def checkeye(val):
if not val:
return False
return val in ["amb", "blu", "brn", "gry", "grn", "hzl", "oth"]
def checkid(val):
if not val:
return False
return re.match(r"^\d{9}$", val)
for entry in all_entries:
if not checkyear(entry.get("byr"), 1920, 2002):
continue
if not checkyear(entry.get("iyr"), 2010, 2020):
continue
if not checkyear(entry.get("eyr"), 2020, 2030):
continue
if not checkheight(entry.get("hgt")):
continue
if not checkhair(entry.get("hcl")):
continue
if not checkeye(entry.get("ecl")):
continue
if not checkid(entry.get("pid")):
continue
valid += 1
print(valid)
#print(all_entries) | [
"[email protected]"
] | |
b296def624467a03808f1a77ad01caba6e298e77 | 2e335f7db34b0b80e114d02a3ae02ee485aa2560 | /cozy/structures/arrays.py | c87f5ab3b8e1e0901cd7d5022477adb032b8ab3a | [
"Apache-2.0"
] | permissive | MostAwesomeDude/cozy | 6a3d60d4a7da9bc95bcc4f5f20645ac3e0a8d725 | e7b0ace2915c54b1176fc4d3eed289ede109a058 | refs/heads/master | 2020-03-24T11:17:51.860989 | 2018-07-26T22:33:37 | 2018-07-26T22:33:37 | 142,681,384 | 0 | 0 | Apache-2.0 | 2018-07-28T13:56:19 | 2018-07-28T13:56:19 | null | UTF-8 | Python | false | false | 564 | py | from cozy.common import declare_case
from cozy.syntax import Type, Exp, Stm
TArray = declare_case(Type, "TArray", ["t"])
EArrayCapacity = declare_case(Exp, "EArrayCapacity", ["e"])
EArrayLen = declare_case(Exp, "EArrayLen", ["e"])
EArrayGet = declare_case(Exp, "EArrayGet", ["a", "i"])
EArrayIndexOf = declare_case(Exp, "EArrayIndexOf", ["a", "x"])
SArrayAlloc = declare_case(Stm, "SArrayAlloc", ["a", "capacity"])
SArrayReAlloc = declare_case(Stm, "SArrayReAlloc", ["a", "new_capacity"])
SEnsureCapacity = declare_case(Stm, "SEnsureCapacity", ["a", "capacity"])
| [
"[email protected]"
] | |
755cb6cacb99a472555a45b426dd7575ffa1159f | 395ab72edfc78710334b7c1a44550890ae474de9 | /fitmodel.py | 9dff86a557818cded390f213a1ea8d665b325450 | [
"MIT"
] | permissive | deapplegate/wtgpipeline | 20862a34c08a27dc3e09fde8f9185c590dceac43 | 9693e8562022cc97bf5a96427e22965e1a5e8497 | refs/heads/master | 2023-07-01T04:06:05.340473 | 2021-07-27T18:44:34 | 2021-07-27T18:44:34 | 100,309,025 | 1 | 2 | null | 2017-10-17T17:43:07 | 2017-08-14T20:59:18 | Python | UTF-8 | Python | false | false | 8,782 | py | ###################################
# Utilities for fitting models
#
# Based on solution by abeardmore found on http://code.google.com/p/pyminuit/issues/detail?id=6
#
# Modified and extended by Douglas Applegate
###################################
import numpy
import minuit
import math, inspect
import scipy.stats as stats
###############################
__cvs_id__ = "$Id: fitmodel.py,v 1.2 2010-07-02 23:08:47 dapple Exp $"
###############################
###############################
# Statistical Distribution Look-up functions
###############################
def chisq_exceeds_prob(chisq, dof):
'''
Probability that chisq exceeds value, given degrees of freedom dof
'''
return stats.chi2.sf(chisq, dof)
###
def f_test_exceeds_prob(chisq_old, dof_old, chisq_new, dof_new):
'''
Probability that the improvement in a fit by adding extra parameters is random
'''
deltaDOF = dof_old - dof_new
F = (chisq_old - chisq_new)/(deltaDOF*chisq_new/dof_new)
return stats.f.sf(F, deltaDOF, dof_new)
###############################
# Common Models
###############################
def ConstantModel(x, a0):
return a0
#######
def LinearModel(x, a0, a1):
return a0 + a1*x
########
def QuadraticModel(x, a0, a1, a2):
return a0 + a1*x + a2*x**2
########
class PolynomialModel(object):
"""
Creates a polynomial model of the form
a0 + a1*x + a2*x**2 + ...
where the order parameter controls which orders are included
"""
def __init__(self, order):
'''
order is a list of positive integers specifying polynomial order to include
0: constant, 1: linear, 2: quadratic, etc.
Does not include lower order terms implicitly (ie specify [0,1,2], etc
'''
self.order = order
self.basis = {}
for o in order:
param = 'a%d' % o
def base(x, a, order=o):
return a*(x**order)
self.basis[param] = base
self.params=self.basis.keys()
def __call__(self, x, *params, **keyword_params):
for key, val in zip(self.params, params):
keyword_params[key] = val
sum = 0.
for key, val in keyword_params.iteritems():
sum += self.basis[key](x, val)
return sum
###########
def PowerLawModel(x, alpha, beta):
return alpha*x**beta
###########
def GaussianModel(x, A, mu, sigma):
z = (x - mu) / sigma
return A*numpy.exp(-0.5*z**2)
###############################
# Statistical Fuctions for Minimization
###############################
def ChiSqStat(ydata, yerr, ymodel):
"""
Returns the chi-square given arrays of ydata, yerr, and ymodel values.
"""
chisquared = ((ydata - ymodel)/yerr)**2
stat = chisquared.sum()
return stat
####################
def CStat(ydata, yerr, ymodel):
"""
Returns the cstat a la xspec given arrays of data and model values.
This is a -2.0 log likelihood statistic.
"""
lmodel = numpy.zeros(ymodel.size)
lmodel[ymodel <= 0.0] = -32.
lmodel[ymodel > 0.0] = numpy.log(ymodel[ymodel > 0.0])
ldata = numpy.zeros(ydata.size)
ldata[ydata <= 0.0] = -32.0
ldata[ydata > 0.0] = numpy.log(ydata[ydata > 0.0])
# fitstat = ymodel - ydata + ydata * (ldata - lmodel)
fitstat = ymodel + ydata * ((ldata - lmodel) - 1.0)
stat = 2.0* fitstat.sum()
return stat
###############################
# Fitting Class -- Use to perform minimizations
###############################
class FitModel:
"""
Fits a generic model (provided by the class Model to data (numpy arrays
xdata and ydata), with a fit statistic provided by StatFunc.
"""
def __init__(self, xdata, ydata, yerr, model,
statfunc = ChiSqStat, guess = []):
self.xdata = numpy.array(xdata, dtype=numpy.float64)
self.ydata = numpy.array(ydata, dtype=numpy.float64)
self.yerr = numpy.array(yerr, dtype=numpy.float64)
self.model = model
self.statfunc = statfunc
self.guess = guess
self.fcn = FCN(self.xdata, self.ydata, self.yerr, model, statfunc)
self.m = minuit.Minuit( self.fcn )
self.params = self.m.parameters
if self.guess == []:
self.guess = numpy.ones(len(self.params))
for param, value in zip(self.params, self.guess):
self.m.values[param] = value
self.m.errors[param] = math.fabs(value) * 0.05
self.m.strategy = 1
self.m.tol = 1.0
self.have_fit = False
def fixed(self, fparams):
"""
Fix or unfix the parameters specified in the dictionary fparams, which
contain True or False values.
"""
for key in fparams.keys():
self.m.fixed[key] = fparams[key]
def limits(self, lparams):
"""
Set limits given by the parameters in the dictionary lparams.
"""
for key in lparams.keys():
self.m.limits[key] = lparams[key]
def fit(self, printmode = 0):
"""
Call migrad to fit the model to the data.
Set printmode = 1 to monitor the progress of the fitting.
"""
self.m.printMode = printmode
self.par_vals = {}
self.ymodel = None
try :
self.m.migrad()
print "fval = %g, nfcn %d" % (self.m.fval, self.m.ncalls)
self.m.migrad()
print "fval = %g, nfcn %d" % (self.m.fval, self.m.ncalls)
print "Fit parameters : "
print self.m.values
self.par_vals = self.m.values
# calculate the best fit model
self.ymodel = self.model( self.xdata, **self.m.values )
self.statval = self.m.fval
self.have_fit = True
except minuit.MinuitError :
# reset have_fit if migrad fails
self.have_fit = False
def uncert(self, nsigma = 1.0):
"""
Calculate the parameter uncertainties at the nsigma**2
confidence level. E.g. for one parameter of interest
nsigma = 1.0 for 68%
1.645 for 90%
2.0 for 95.45%
3.0 for 99.73%
"""
if not(self.have_fit) :
print "Warning: uncert requires a valid fit."
return
# in case minos fails
self.m.hesse()
print "Hesse errors : "
print self.m.errors
self.par_err = {}
for key in self.m.values.keys():
if (self.m.fixed[key] == True):
continue
try:
self.m.minos(key, -nsigma)
self.m.minos(key, nsigma)
error = (self.m.merrors[key, -nsigma],
self.m.merrors[key, nsigma])
except minuit.MinuitError :
print "Caught MinuitError: Minos failed. using Hesse error."
print "Only really valid for a well behaved fitting FCN !"
error = self.m.errors[key] * nsigma
self.par_err[key] = error
print "Parameter errors :"
print self.par_err
def corr_matrix(self):
"""
Display the fit parameter correlation matrix."
"""
if not(self.have_fit) :
print "Warning: uncert requires a valid fit."
return
print "Correlation matrix :"
print numpy.array(self.m.matrix(correlation=True))
#####################################
# Utilities
####################################
def FCN(x,y,yerr, model, statfunc):
"""
Calculates the fitting FCN for pyMinuit(2) given the data (xdata & ydata)
and model (class Model, with a tuple of initial parameters, params),
using the class StatFunc to calculate the statistic.
"""
#assumes model is a function with first arg being X values
if inspect.isfunction(model):
params = inspect.getargspec(model)[0][1:]
elif hasattr(model, '__call__'):
args = inspect.getargspec(model.__call__)[0]
if len(args) < 3:
paramAttr = inspect.getargspec(model.__call__)[1]
params = getattr(model, paramAttr)
else:
params = args[2:]
paramstring = ','.join(params)
class_template = '''class fitclass(object):
def __init__(self, x, y, yerr, model, statfunc):
self.x = x
self.y = y
self.yerr = yerr
self.model = model
self.statfunc = statfunc
def __call__(self, %s):
return self.statfunc(self.y, self.y, self.model(self.x, %s))
''' % (paramstring, paramstring)
exec class_template
return fitclass(x,y,yerr,model,statfunc)
| [
"[email protected]"
] | |
f657b49a94c741116b559b970566f1865e4e630f | efec061962750decbb293c382871f71832f2c07a | /src/plonetheme/markiezenhof/setuphandlers.py | 35c024fd67b68d24d72900301ec6ca67088be97b | [] | no_license | plone-ve/plonetheme.markiezenhof | 8a2670b0ce5fd3c819e74f9db6934c64d3379b5b | b1cf9f6f452b423d1dcae26c7460cd380577a4d8 | refs/heads/master | 2023-08-28T00:43:17.625799 | 2016-06-10T12:29:49 | 2016-06-10T12:29:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | # -*- coding: utf-8 -*-
from Products.CMFPlone.interfaces import INonInstallable
from zope.interface import implementer
@implementer(INonInstallable)
class HiddenProfiles(object):
def getNonInstallableProfiles(self):
"""Hide uninstall profile from site-creation and quickinstaller"""
return [
'plonetheme.markiezenhof:uninstall',
]
def post_install(context):
"""Post install script"""
# Do something at the end of the installation of this package.
def uninstall(context):
"""Uninstall script"""
# Do something at the end of the uninstallation of this package.
| [
"[email protected]"
] | |
4c4b8aaaada1da35461d159d800be8a304d45f8b | 8a0d2c985ead725a209812a4dd2935373e1c05e7 | /vt-subdomains.py | 3ed074bb37465af3ab08df4710544621c6034213 | [] | no_license | smed79/vt-subd-scraper | 77fe7e7d34b2356c764a9c9db9cae3ec67919061 | 8ae04dc19a94845d7096833437b201a411dd40e0 | refs/heads/master | 2023-03-17T16:26:08.748594 | 2018-10-07T18:57:20 | 2018-10-07T18:57:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,019 | py | #!/usr/bin/env python3
import requests
from os import environ
import os
import sys
import json
def main(domain, apikey):
url = 'https://www.virustotal.com/vtapi/v2/domain/report'
params = {'apikey':apikey,'domain':domain}
try:
response = requests.get(url, params=params)
jdata = response.json()
domains = sorted(jdata['subdomains'])
except(KeyError):
print("No domains found for %s" % domain)
exit(0)
except(requests.ConnectionError):
print("Could not connect to www.virtustotal.com", file=sys.stderr)
exit(1)
for domain in domains:
print(domain)
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: python3 vt-subdomains.py domain.com", file=sys.stderr)
sys.exit(1)
domain = sys.argv[1]
if environ.get('VTAPIKEY'):
apikey = os.environ['VTAPIKEY']
else:
print("VTAPIKEY environment variable not set. Quitting.", file=sys.stderr)
sys.exit(1)
main(domain, apikey)
| [
"[email protected]"
] | |
2f82445e68f7fd36002f38d4b614262803b8f378 | de9af73c37ba970fcbda9594243ccd4dfa3ba66e | /torchtools/tt/__init__.py | 9dab24e285b500dd5cc4c6cd713f668ce67609cc | [
"MIT"
] | permissive | TianyuanYu/BGNN-AAAI | 8ae04e162e54dfc5c8ec20ef826524b906b1e01c | 16bd260b93009be27932415e74ce1b3128215d92 | refs/heads/master | 2022-10-29T07:08:28.175190 | 2020-06-18T09:26:49 | 2020-06-18T09:26:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | from torchtools.tt.arg import _parse_opts
from torchtools.tt.utils import *
from torchtools.tt.layer import *
from torchtools.tt.logger import *
from torchtools.tt.stat import *
from torchtools.tt.trainer import *
# global command line arguments
arg = _parse_opts()
| [
"[email protected]"
] | |
6f755efdd77859e41445a0f5e96e81a88f8644f3 | 5246838f884449a95aadd8fed71d1b1fc29f333c | /two/2.1.py | 3456c268a40c75ee9f1506f3c9fade20caf50a9f | [] | no_license | chenjb04/PythonCookbook | 046396915f614c5090442f100adf8b0696e5de3e | 9e906aab39f7799d6d53b768aac01616badd830f | refs/heads/master | 2020-05-01T14:21:50.781637 | 2019-03-31T10:22:52 | 2019-03-31T10:22:52 | 177,517,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | # -*- coding:utf-8 -*-
__author__ = 'ChenJiaBao'
__date__ = '2019/3/29 15:31'
"""
对任意多的分隔符拆分字符串
re.split()
"""
import re
line = 'absd fjkd; afed, fjrel,asdf, foo'
ret = re.split(r'[;,\s]\s*', line)
print(ret) | [
"[email protected]"
] | |
6e9c807ed2d9091213791471b2b26664ff2558fe | ee3b17d703903909628f23bbba862ec396ffd9e5 | /create_dataset.py | bae188b263ca60324231776b83532ddd0af3e6e2 | [] | no_license | Kanagaraj-NN/fault-localization | 84b9e492f4ad382efee3601b4074e9b5b799ce05 | cad86c74bb9c995438c74a2e534e9cd99c5f9e07 | refs/heads/master | 2020-03-13T15:35:29.650619 | 2018-10-29T13:52:44 | 2018-10-29T13:52:44 | 131,179,956 | 0 | 0 | null | 2018-04-26T16:03:16 | 2018-04-26T16:03:15 | null | UTF-8 | Python | false | false | 5,327 | py | import argparse
import csv
import os
PROJECTS = ['Closure', 'Lang', 'Chart', 'Math', 'Mockito', 'Time']
PROJECT_BUGS = [
[str(x) for x in range(1, 134)],
[str(x) for x in range(1, 66)],
[str(x) for x in range(1, 27)],
[str(x) for x in range(1, 107)],
[str(x) for x in range(1, 39)],
[str(x) for x in range(1, 28)]
]
FORMULA = {'barinel', 'dstar2', 'jaccard', 'muse', 'ochiai', 'opt2', 'tarantula'}
class Dataset(object):
def __init__(self, formula, num_lines):
self.base_formula = formula
self.rows = {}
self.num_lines = num_lines
for project in PROJECTS:
self.rows[project] = {}
def to_csv(self, output_csv):
"""
Write dataset to output csv file
Parameters
----------
output_csv : str
Output file to write the dataset to
"""
output = []
columns = 'project,bug,'
columns += ','.join(['line_%s' % (i+1) for i in range(self.num_lines)])
for formula in FORMULA:
for i in range(self.num_lines):
columns += ',line_%s_%s' % (i+1, formula)
output.append(columns)
for project in PROJECTS:
for bug in self.rows[project]:
output.append(self.rows[project][bug].to_csv())
with open(output_csv, 'w') as fwriter:
fwriter.write('\n'.join(output))
def __len__(self):
return self.num_lines
class Row(object):
"""
The row class holds information about a specific row. Mainly, it holds information about the project, bug_id,
lines affected, suspiciousness scores for each line given by that formula
"""
def __init__(self, project, bug_id):
self.project = project
self.bug_id = bug_id
self.lines = []
self.data = {}
for formula in FORMULA:
self.data[formula] = []
def to_csv(self):
lines_output = ','.join(self.lines)
suspiciousness = []
for formula in FORMULA:
susp = ','.join([str(x) for x in self.data[formula]])
suspiciousness.append(susp)
return '%s,%s,%s,' % (self.project, self.bug_id, lines_output) + ','.join(suspiciousness)
def add_rows_for_formula(dataset, data_dir, formula):
"""
Add rows for a particular formula to the dataset
Parameters
----------
dataset : Dataset
an object of the type dataset with dataset.lines already populated
data_dir : str
the data directory for the suspiciousness files
formula : str
formula to add for
Returns
-------
None
"""
for project, bugs in zip(PROJECTS, PROJECT_BUGS):
for bug in bugs:
data = {}
input_file = os.path.join(data_dir, '%s-%s-%s-sorted-susp' % (project, bug, formula))
with open(input_file) as freader:
csvreader = csv.DictReader(freader)
for line in csvreader:
data[line['Line']] = float(line['Suspiciousness'])
for line in dataset.rows[project][bug].lines:
if line in data:
dataset.rows[project][bug].data[formula].append(data[line])
else:
dataset.rows[project][bug].data[formula].append(0.0)
def create_dataset(data_dir, formula, num_lines):
"""
Create a dataset object and add rows from the sorted suspiciousness value file to it
Parameters
----------
data_dir : str
the data directory for the suspiciousness files
formula : str
formula to use as the base
num_lines : str
number of lines to read from the csv
Returns
-------
Dataset
a dataset object
"""
dataset = Dataset(formula, num_lines)
for project, bugs in zip(PROJECTS, PROJECT_BUGS):
for bug in bugs:
row = Row(project=project, bug_id=bug)
input_file = os.path.join(data_dir, '%s-%s-%s-sorted-susp' % (project, bug, formula))
with open(input_file) as freader:
csvreader = csv.DictReader(freader)
for line in csvreader:
row.lines.append(line['Line'])
row.data[formula].append(float(line['Suspiciousness']))
if len(row.lines) == num_lines:
break
dataset.rows[project][bug] = row
return dataset
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--formula', required=True, choices=FORMULA,
help='Base formula to use while creating the dataset')
parser.add_argument('-d', '--data-dir', required=True, help='Data directory with all sorted suspiciousness values')
parser.add_argument('-n', '--num-lines', required=True, type=int, help='Number of lines to consider')
parser.add_argument('-o', '--output-dir', required=True, help='Output directory to write dataset to')
args = parser.parse_args()
dataset = create_dataset(args.data_dir, args.formula, args.num_lines)
for formula in FORMULA:
if formula != args.formula:
add_rows_for_formula(dataset, args.data_dir, formula)
dataset.to_csv(os.path.join(args.output_dir, 'dataset-%s-%s.csv' % (args.formula, args.num_lines)))
| [
"[email protected]"
] | |
9bb4f05412faa3982c82de55fedc200a7790cc48 | c06efd90533c51c2b29b7e92cd13723388de25ee | /actions/listRbacAuthorizationV1alpha1RoleForAllNamespaces.py | 19afa194e46fdb73582c2cf0fc1d052e06ecc134 | [] | no_license | ajohnstone/stackstorm-kubernetes | 490e4a73daad3713d7c5b5b639d5f30ff1ab3e58 | 99ffad27f5947583a2ab1b56e80c06003d014c47 | refs/heads/master | 2021-01-11T23:29:49.642435 | 2016-12-07T13:20:34 | 2016-12-07T13:20:34 | 78,588,572 | 0 | 0 | null | 2017-01-11T00:48:59 | 2017-01-11T00:48:59 | null | UTF-8 | Python | false | false | 1,017 | py | from lib import k8s
from st2actions.runners.pythonrunner import Action
class listRbacAuthorizationV1alpha1RoleForAllNamespaces(Action):
def run(self,config_override=None,fieldSelector=None,labelSelector=None,pretty=None,resourceVersion=None,timeoutSeconds=None,watch=None):
myk8s = k8s.K8sClient(self.config)
args = {}
if config_override is not None:
args['config_override'] = config_override
if fieldSelector is not None:
args['fieldSelector'] = fieldSelector
if labelSelector is not None:
args['labelSelector'] = labelSelector
if pretty is not None:
args['pretty'] = pretty
if resourceVersion is not None:
args['resourceVersion'] = resourceVersion
if timeoutSeconds is not None:
args['timeoutSeconds'] = timeoutSeconds
if watch is not None:
args['watch'] = watch
return (True, myk8s.runAction('listRbacAuthorizationV1alpha1RoleForAllNamespaces', **args))
| [
"[email protected]"
] | |
12d0188fb6fd2916853aa544d5ca05a6481a4047 | 9eed44fb62d895b083b648a01e27f4aa6fae2880 | /cnn/mnist/mnist_main.py | 4d2ad0ca4557ac5b566745810982950400545f48 | [] | no_license | koibiki/tf_estimator_learn | 6ac839fc4ce3a3d0f2cde8e97f0cfd73949d8490 | d79d3a0d5b19d0c92bce24a656c1197b94c35b66 | refs/heads/master | 2020-05-07T14:27:14.721816 | 2019-04-10T14:09:29 | 2019-04-10T14:41:20 | 180,595,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,892 | py | import tensorflow as tf
from cnn.mnist.net.cnn_net import MnistCnn
import numpy as np
tf.logging.set_verbosity(tf.logging.INFO)
def cnn_model_fn(features, labels, mode):
mnist_cnn = MnistCnn()
logits = mnist_cnn(features)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1, name="class_tensor"),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.PREDICT,
predictions=logits,
export_outputs={
"translate": tf.estimator.export.PredictOutput(logits)
})
elif mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unuse_argu):
# Load training and eval data
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
train_data = mnist.train.images # Returns np.array
train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
eval_data = mnist.test.images # Returns np.array
eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)
# Create the Estimator
mnist_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="/tmp/mnist_convnet_model")
# Set up logging for predictions
# Log the values in the "Softmax" tensor with label "probabilities"
tensors_to_log = {"classes": "class_tensor"}
logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=50)
# Train the model
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": train_data},
y=train_labels,
batch_size=512,
num_epochs=None,
shuffle=True)
mnist_classifier.train(input_fn=train_input_fn, steps=20000, hooks=[logging_hook])
# Evaluate the model and print results
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": eval_data}, y=eval_labels, num_epochs=1, shuffle=False)
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
print(eval_results)
if __name__ == "__main__":
tf.app.run()
| [
"[email protected]"
] | |
ae51d1913b4b2065ba5070e60c6b33337f9e56d2 | fc186ff017d25ba9391f4a26e9e707d74fb16093 | /recursion/N-th-Fibonacci-Number/solution3.py | 976806fa77467a843abe4200a408b02fc9ffaa84 | [] | no_license | SivaAkhil/DataStructures-and-Algorithms | 9178fbdb89eda717ea0df7a8693179decbb98c57 | 5b6e2b0b46ea59ee6e0f7c91f2b7d129780fd0d2 | refs/heads/main | 2023-06-29T00:22:21.633970 | 2021-08-03T09:46:48 | 2021-08-03T09:46:48 | 387,405,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | # this is iterative solution
# time O(N)
# space O(1)
def ntfib(n):
lastTwo = [0, 1]
counter = 3
while counter <= n:
nextfib = lastTwo[0] + lastTwo[1]
lastTwo[0] = lastTwo[1]
lastTwo[1] = nextfib
counter += 1
if n > 1:
return lastTwo[1]
else:
lastTwo[0]
print(ntfib(50))
| [
"[email protected]"
] | |
9f1846dba8512da311219e14da9510b5ba99eba1 | fe8360d9284d8156cd557d3a757645c11849cdd9 | /models/coverage_tests.py | 4e1b6442279a580d20a09444f28840390ef1316d | [] | no_license | hvanreenen/fhir-rest-server | 5a1a5bcb9a3477d9f9d133c263f61ba202db5741 | 36ae55706aba0fdfcf084dbb24bd8c73929b3e0f | refs/heads/master | 2021-01-10T23:45:06.793874 | 2016-10-20T09:57:04 | 2016-10-20T09:57:04 | 70,390,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,403 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 on 2016-10-07.
# 2016, SMART Health IT.
import os
import io
import unittest
import json
from . import coverage
from .fhirdate import FHIRDate
class CoverageTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Coverage", js["resourceType"])
return coverage.Coverage(js)
def testCoverage1(self):
inst = self.instantiate_from("coverage-example-2.json")
self.assertIsNotNone(inst, "Must have instantiated a Coverage instance")
self.implCoverage1(inst)
js = inst.as_json()
self.assertEqual("Coverage", js["resourceType"])
inst2 = coverage.Coverage(js)
self.implCoverage1(inst2)
def implCoverage1(self, inst):
self.assertEqual(inst.dependent, 1)
self.assertEqual(inst.id, "7546D")
self.assertEqual(inst.identifier[0].system, "http://xyz.com/codes/identifier")
self.assertEqual(inst.identifier[0].value, "AB9876")
self.assertEqual(inst.period.end.date, FHIRDate("2012-03-17").date)
self.assertEqual(inst.period.end.as_json(), "2012-03-17")
self.assertEqual(inst.period.start.date, FHIRDate("2011-03-17").date)
self.assertEqual(inst.period.start.as_json(), "2011-03-17")
self.assertEqual(inst.plan, "11024")
self.assertEqual(inst.subPlan, "D15C9")
self.assertEqual(inst.text.div, "<div>A human-readable rendering of the coverage</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.code, "EHCPOL")
self.assertEqual(inst.type.display, "extended healthcare")
self.assertEqual(inst.type.system, "http://hl7.org/fhir/v3/ActCode")
def testCoverage2(self):
inst = self.instantiate_from("coverage-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Coverage instance")
self.implCoverage2(inst)
js = inst.as_json()
self.assertEqual("Coverage", js["resourceType"])
inst2 = coverage.Coverage(js)
self.implCoverage2(inst2)
def implCoverage2(self, inst):
self.assertEqual(inst.dependent, 1)
self.assertEqual(inst.id, "9876B1")
self.assertEqual(inst.identifier[0].system, "http://benefitsinc.com/certificate")
self.assertEqual(inst.identifier[0].value, "12345")
self.assertEqual(inst.period.end.date, FHIRDate("2012-05-23").date)
self.assertEqual(inst.period.end.as_json(), "2012-05-23")
self.assertEqual(inst.period.start.date, FHIRDate("2011-05-23").date)
self.assertEqual(inst.period.start.as_json(), "2011-05-23")
self.assertEqual(inst.plan, "CBI35")
self.assertEqual(inst.sequence, 1)
self.assertEqual(inst.subPlan, "123")
self.assertEqual(inst.text.div, "<div>A human-readable rendering of the coverage</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.code, "EHCPOL")
self.assertEqual(inst.type.display, "extended healthcare")
self.assertEqual(inst.type.system, "http://hl7.org/fhir/v3/ActCode")
| [
"[email protected]"
] | |
c8f014085dc60b59132c67403fa5d7c84661e430 | 3152274ae39760dc1962504e2b4b9b39e885b338 | /circle_area.py | 754b8174d44d3a89cd89b15a7c912941b7fd8f16 | [] | no_license | JoeltonLP/circlo_area | a8ce919bb2dea71be0aa694dfa0e96555fcc1d35 | 7b3ff55d505c7b06e9d3089cc05cf08d27f039aa | refs/heads/main | 2023-04-09T12:54:59.556766 | 2021-04-13T19:39:31 | 2021-04-13T19:39:31 | 355,948,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | #!/usr/bin/python3
import sys
from math import pi
def area_circle(raio):
raio = pi * float(raio) ** 2
return raio
def help():
print('needs an argument\n')
print('sintaxe:')
print('use: {} <raio>\n' .format(sys.argv[0]))
if __name__ == '__main__':
if len(sys.argv) < 2:
help()
else:
area = area_circle(sys.argv[1])
print('Area Circle: {:.2f}' .format(area))
| [
"[email protected]"
] | |
d511d554edfff742d4b47863aab36c2676babf88 | aa0270b351402e421631ebc8b51e528448302fab | /sdk/network/azure-mgmt-network/generated_samples/network_manager_get.py | 2dcc0bfecc4e973785a2db207ad947fb4a3f5f17 | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | fangchen0601/azure-sdk-for-python | d04a22109d0ff8ff209c82e4154b7169b6cb2e53 | c2e11d6682e368b2f062e714490d2de42e1fed36 | refs/heads/master | 2023-05-11T16:53:26.317418 | 2023-05-04T20:02:16 | 2023-05-04T20:02:16 | 300,440,803 | 0 | 0 | MIT | 2020-10-16T18:45:29 | 2020-10-01T22:27:56 | null | UTF-8 | Python | false | false | 1,561 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.network import NetworkManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-network
# USAGE
python network_manager_get.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = NetworkManagementClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-0000-0000-0000-000000000000",
)
response = client.network_managers.get(
resource_group_name="rg1",
network_manager_name="testNetworkManager",
)
print(response)
# x-ms-original-file: specification/network/resource-manager/Microsoft.Network/stable/2022-09-01/examples/NetworkManagerGet.json
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
7760fe3621617197d17783aec54d43f5126e894e | eea07e52aa5304b4a0613c625dc79f49c8356cc7 | /deploy.py | dd29d1cfb5e3cc587531ea94e0d95daf7439105a | [
"MIT"
] | permissive | sanedragon/dot | 3abf717288bbbf809e6a6122d8235b3a28f20c61 | fb165af44a2dbd0813c3053710917cd7a0509193 | refs/heads/master | 2021-01-23T11:59:14.897176 | 2013-02-03T19:19:32 | 2013-02-03T19:19:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,246 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Intelligently and interactively installs dot files from the same directory using
symbolic links from the home directory.
'''
import os
import sys
import shutil
files_to_ignore = (
'deploy.py',
'LICENSE',
'README.md'
)
possible_ssh_private_key_files = (
'id_rsa',
'id_dsa',
'hypodermia.pem'
)
def main():
home_dir = os.path.expanduser('~')
repo_dir = os.path.dirname(os.path.realpath(__file__))
print 'Home directory: %s' % home_dir
print 'Repo directory: %s' % repo_dir
dot_files = os.listdir(repo_dir)
dot_files.sort()
replace_all = False
for dot_file in dot_files:
if dot_file.startswith('.') or dot_file in files_to_ignore:
continue
# git shouldn't have a private key (correctly so)
# so it needs to be moved aside and restored after
if dot_file == '.ssh':
for key_file in possible_ssh_private_key_files:
full_key_file = os.path.join(home_dir, '.ssh', key_file)
temp_key_file = os.path.join(home_dir, key_file)
if os.path.exists(full_key_file):
os.rename(full_key_file, temp_key_file)
full_dot_file = os.path.join(repo_dir, dot_file)
proposed_link_file = os.path.join(home_dir, '.' + dot_file)
try:
if os.path.exists(proposed_link_file):
if os.path.islink(proposed_link_file):
if os.readlink(proposed_link_file) == full_dot_file:
print 'Skipping already deployed dot file: %s' % proposed_link_file
continue
if replace_all:
answer = 'y'
else:
answer = raw_input('Overwrite? %s [ynaq]: ' % proposed_link_file).strip()
if answer == 'q':
print 'Quitting without overwriting %s' % proposed_link_file
sys.exit()
elif answer == 'n':
print 'Skipping %s' % proposed_link_file
continue
elif answer in ('a', 'y'):
if answer == 'a':
replace_all = True
print full_dot_file + ' => ' + proposed_link_file
try:
os.remove(proposed_link_file)
except OSError:
shutil.rmtree(proposed_link_file)
os.symlink(full_dot_file, proposed_link_file)
else:
print 'Did not understand input. Quitting.'
sys.exit()
else:
print full_dot_file + ' => ' + proposed_link_file
os.symlink(full_dot_file, proposed_link_file)
finally:
if dot_file == '.ssh':
for key_file in possible_ssh_private_key_files:
full_key_file = os.path.join(home_dir, '.ssh', key_file)
temp_key_file = os.path.join(home_dir, key_file)
if os.path.exists(temp_key_file):
os.rename(temp_key_file, full_key_file)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
108a2a5962b56c5693ee9fcf5027dd1c4c1a5272 | f225c95cf67c0b9c1278ba2376197ab747c91ae1 | /app_config/__init__.py | bea8bfcf231d0c4c783713ca61cd889f37025b98 | [] | no_license | pkrasnyuk/python-RESTfull-WebService | fafb91394af93616596b7e826100000704f8dbf5 | 6789ddd646fcf79f10811ec6ab68d445490e41a9 | refs/heads/master | 2023-08-03T23:16:21.366715 | 2023-07-15T01:21:42 | 2023-07-25T08:47:02 | 189,635,904 | 2 | 1 | null | 2023-08-02T02:27:38 | 2019-05-31T17:49:24 | Python | UTF-8 | Python | false | false | 2,494 | py | import json
import os
from app_config.config import Config
def load_configuration(config_file_path):
if config_file_path and os.path.isfile(config_file_path):
with open(config_file_path, "r") as config_file:
try:
load_result = json.dumps(json.load(config_file))
return __obj_creator(json.loads(load_result))
except Exception as e:
print(e)
return __obj_creator(None)
def __obj_creator(json_object):
config_host = None
config_port = 0
config_connection_string = None
config_db_name = None
config_security_private_key = None
config_token_expiry = 0
config_logging_name = None
config_logging_file = None
if json_object is not None:
if 'app' in json_object and 'host' in json_object['app']:
config_host = json_object['app']['host']
if 'app' in json_object and 'port' in json_object['app']:
config_port = int(json_object['app']['port'])
if 'db' in json_object and 'connectionString' in json_object['db']:
config_connection_string = json_object['db']['connectionString']
if 'db' in json_object and 'dbName' in json_object['db']:
config_db_name = json_object['db']['dbName']
if 'security' in json_object and 'privateKey' in json_object['security']:
config_security_private_key = json_object['security']['privateKey']
if 'security' in json_object and 'tokenExpiry' in json_object['security']:
config_token_expiry = json_object['security']['tokenExpiry']
if 'logging' in json_object and 'loggingName' in json_object['logging']:
config_logging_name = json_object['logging']['loggingName']
if 'logging' in json_object and 'loggingFile' in json_object['logging']:
config_logging_file = json_object['logging']['loggingFile']
return Config(
host=config_host,
port=config_port,
connection_string=config_connection_string,
db_name=config_db_name,
private_key=config_security_private_key,
token_expiry=config_token_expiry,
logging_name=config_logging_name,
logging_file=config_logging_file)
def __main():
config_file_path = "../config.json"
print(load_configuration(config_file_path))
if __name__ == '__main__':
__main()
| [
"[email protected]"
] | |
fb0fa69a4a05906e134ce9a2a9602ffc95ad2807 | b9ff5e3bdc9be013590ef8438cc5b6c143e757f0 | /code/products/migrations/0008_auto_20180620_2013.py | 3e0a502566c25cb03b32c4078592be97e8be2e3e | [] | no_license | lethisa/django2ecom | b8792789337d6e727dda65b4307bc58a02cb43db | 65373d1b057cabfac23b881c1f86f3eb4a59b570 | refs/heads/master | 2020-03-21T00:25:18.335192 | 2018-07-06T03:20:54 | 2018-07-06T03:20:54 | 137,892,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | # Generated by Django 2.0.6 on 2018-06-20 13:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0007_product_slug'),
]
operations = [
migrations.AlterField(
model_name='product',
name='slug',
field=models.SlugField(),
),
]
| [
"[email protected]"
] | |
f0fed322463f67b3e7e37bc823ba6f1b32059ddc | 7cb3274199793f9c5f193d43c92d832e20668f89 | /trainmodel.py | f441223bfbe3e8bea8a0cef9b82c40d9c3e88a8b | [] | no_license | anuj2110/FaceRecognition | 51956c71ee197edbc48a4ddc2d668098add91c5e | fe419ebe79d2deb259bab29fb867d001b3af6cd3 | refs/heads/master | 2021-05-21T04:07:11.919720 | 2020-04-13T12:42:02 | 2020-04-13T12:42:02 | 252,535,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,378 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 3 13:21:54 2020
@author: Anuj
"""
from tensorflow.keras import layers as l
from tensorflow.keras.models import Model,Sequential
from tensorflow.keras.applications.vgg16 import VGG16
import numpy as np
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
from glob import glob
from tensorflow.keras.preprocessing import image
import matplotlib.pyplot as plt
train_path = "./Images/train"
test_path = "./Images/test"
folders = os.listdir(train_path)
labels = len(folders)
target_size = [224,224]
train_generator = ImageDataGenerator(rescale=1/255,
horizontal_flip = True,
shear_range = 0.2,
zoom_range = 0.2)
test_generator = ImageDataGenerator(rescale=1/255)
vgg = VGG16(input_shape=target_size+[3], weights = 'imagenet', include_top=False)
for layer in vgg.layers:
layer.trainable = False
x = l.Flatten()(vgg.output)
prediction = l.Dense(labels,activation = 'softmax')(x)
model = Model(inputs = vgg.input,outputs = prediction)
model.summary()
# tell the model what cost and optimization method to use
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
training_set = train_generator.flow_from_directory(train_path,
target_size = (224, 224),
batch_size = 32,
class_mode = 'categorical')
test_set = test_generator.flow_from_directory(test_path,
target_size = (224, 224),
batch_size = 32,
class_mode = 'categorical')
r = model.fit_generator(
training_set,
validation_data=test_set,
epochs=5,
steps_per_epoch=len(training_set),
validation_steps=len(test_set)
)
# loss
plt.plot(r.history['loss'], label='train loss')
plt.plot(r.history['val_loss'], label='val loss')
plt.legend()
plt.savefig('LossVal_loss.png')
plt.show()
# accuracies
plt.plot(r.history['accuracy'], label='train acc')
plt.plot(r.history['val_accuracy'], label='val acc')
plt.legend()
plt.savefig('AccVal_acc.png')
plt.show()
model.save('facefeatures_new_model.h5') | [
"[email protected]"
] | |
6878cac300bf18e2e450cd1656b0b26693b23a7b | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/Vulkan/BufferCreateInfo.py | 5d76890125f871cb5c2cb76be92e0dd89aa0a4f7 | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 4,209 | py | # encoding: utf-8
# module gi.repository.Vulkan
# from /usr/lib64/girepository-1.0/Vulkan-1.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
class BufferCreateInfo(__gi.Struct):
# no doc
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(BufferCreateInfo), '__module__': 'gi.repository.Vulkan', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'BufferCreateInfo' objects>, '__weakref__': <attribute '__weakref__' of 'BufferCreateInfo' objects>, '__doc__': None})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(BufferCreateInfo)
| [
"[email protected]"
] | |
8cf6db9dd4b6aa7154a6d86c2408d2b5eaa07ed3 | 0db19410e9751790af8ce4a0a9332293e379c02f | /mmpose/datasets/transforms/__init__.py | 7ccbf7dac2822a8b8d093366c2632ee81c9d88f9 | [
"Apache-2.0"
] | permissive | open-mmlab/mmpose | 2c9986521d35eee35d822fb255e8e68486026d94 | 537bd8e543ab463fb55120d5caaa1ae22d6aaf06 | refs/heads/main | 2023-08-30T19:44:21.349410 | 2023-07-04T13:18:22 | 2023-07-04T13:18:22 | 278,003,645 | 4,037 | 1,171 | Apache-2.0 | 2023-09-14T09:44:55 | 2020-07-08T06:02:55 | Python | UTF-8 | Python | false | false | 971 | py | # Copyright (c) OpenMMLab. All rights reserved.
from .bottomup_transforms import (BottomupGetHeatmapMask, BottomupRandomAffine,
BottomupResize)
from .common_transforms import (Albumentation, GenerateTarget,
GetBBoxCenterScale, PhotometricDistortion,
RandomBBoxTransform, RandomFlip,
RandomHalfBody)
from .converting import KeypointConverter
from .formatting import PackPoseInputs
from .loading import LoadImage
from .pose3d_transforms import RandomFlipAroundRoot
from .topdown_transforms import TopdownAffine
__all__ = [
'GetBBoxCenterScale', 'RandomBBoxTransform', 'RandomFlip',
'RandomHalfBody', 'TopdownAffine', 'Albumentation',
'PhotometricDistortion', 'PackPoseInputs', 'LoadImage',
'BottomupGetHeatmapMask', 'BottomupRandomAffine', 'BottomupResize',
'GenerateTarget', 'KeypointConverter', 'RandomFlipAroundRoot'
]
| [
"[email protected]"
] | |
e4103af9c334ebda338051406002d470f160085f | e8bc319b26f4ca69e363b81194da3692fc9900b9 | /120.三角形最小路径和.py | b799f63d281ad9d47b8119434723fd39a785e344 | [] | no_license | chxii/leetcode | 4b5c2d5acc4c10d93a3c1e2d9773d38590b5408f | a1c54a867ffdf0690e26e73999f8efc518fef442 | refs/heads/master | 2022-04-11T15:56:37.123797 | 2020-04-07T08:37:55 | 2020-04-07T08:37:55 | 238,866,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 716 | py | #
# @lc app=leetcode.cn id=120 lang=python3
#
# [120] 三角形最小路径和
#
# @lc code=start
class Solution:
def minimumTotal(self, triangle: List[List[int]]) -> int:
if not triangle or len(triangle) == 0:
return 0
m = len(triangle)
n = len(triangle[-1])
dp = [[float('inf')] * (n) for _ in range(m)]
dp[0][0] = triangle[0][0]
for i in range(1, m):
for j in range(i+1):
# print(i, j)
if j - 1 >= 0:
dp[i][j] = triangle[i][j] + min(dp[i-1][j-1], dp[i-1][j])
else:
dp[i][j] = triangle[i][j] + dp[i-1][j]
return min(dp[-1])
# @lc code=end
| [
"[email protected]"
] | |
9f437daeae8ef5eed4fff4417f18ad21366df054 | 3ee0c019a7b10a7a78dfc07d61da5d2b3cf3ad27 | /191113/swep_2117_홈방범서비스.py | 8a97c9aad3e9acbaff3dc1f0a96086db81eb8033 | [] | no_license | JiminLee411/algorithms | a32ebc9bb2ba4f68e7f80400a7bc26fd1c3a39c7 | 235834d1a50d5054f064bc248a066cb51c0835f5 | refs/heads/master | 2020-06-27T01:37:55.390510 | 2019-11-14T08:57:16 | 2019-11-14T08:57:16 | 199,811,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,182 | py | import sys
sys.stdin = open('swep_2117.txt', 'r')
from collections import deque
delta = ((1, 0), (-1, 0), (0, 1), (0, -1))
def find(x, y):
global homeCnt
visited = [[0 for _ in range(N + 1)] for _ in range(N + 1)]
visited[x][y] = 1
cnt = city[x][y]
Q = deque()
Q.append([r, c])
k = 1
while Q:
x, y = Q.popleft()
if visited[x][y] == N + 1:
break
for dx, dy in delta:
nx, ny = x + dx, y + dy
if nx < 0 or ny < 0 or nx >= N or ny >= N:
continue
if not visited[nx][ny]:
visited[nx][ny] = visited[x][y] + 1
if visited[nx][ny] > k:
k = visited[nx][ny]
Q.append([nx, ny])
if city[nx][ny]:
cnt += 1
if cnt*M - (k*k + (k-1)*(k-1)) >= 0 and homeCnt < cnt:
homeCnt = cnt
T = int(input())
for tc in range(1, T + 1):
N, M = map(int, input().split())
city = [list(map(int, input().split())) for _ in range(N)]
homeCnt= 1
for r in range(N):
for c in range(N):
find(r, c)
print('#{} {}'. format(tc, homeCnt)) | [
"[email protected]"
] | |
8db5bfa3d73b5f07725a2a441166c5d20a4219dc | 15781159d59e07209382c0c560ec75497186bd27 | /project2/plot.py | 3253df8877ebf22f41f058e6227082c6035ea1c9 | [] | no_license | dougshidong/mech516 | cfad3569ef6fd50b0054bf913315d3cdfca0a62d | 8358f5429881441dd9809170afeaf3db93c72e7b | refs/heads/master | 2021-01-10T23:03:21.914212 | 2016-12-02T21:33:59 | 2016-12-02T21:33:59 | 70,628,442 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,627 | py | #!/usr/bin/env python
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
fname = ['Godunov','HLLC','MacCormack']
inname = 'exact.dat'
xe, re, ue, pe = np.loadtxt(inname, dtype = np.float64, skiprows=1, unpack=True)
for caseid in range(3):
inname = fname[caseid] + '.dat'
outname = fname[caseid] + '.pdf'
x, r, u, p = np.loadtxt(inname, dtype = np.float64, skiprows=1, unpack=True)
pp = PdfPages(outname)
fig, axarr = plt.subplots(3, sharex=True)
axarr[0].set_title(fname[caseid])
axarr[0].plot(xe, re, '-k')
axarr[0].plot(x, r, 'or',mec='r',mfc='none',ms=5)
axarr[0].set_ylabel(r'\rho')
axarr[1].plot(xe, ue, '-k')
axarr[1].plot(x, u, 'og',mec='g',mfc='none',ms=5)
axarr[1].set_ylabel(r'u')
axarr[2].plot(xe, pe, '-k')
axarr[2].plot(x, p, 'ob',mec='b',mfc='none',ms=5)
axarr[2].set_ylabel(r'p')
axarr[2].set_xlabel(r'x')
axarr[0].set_ylim([0.75, 1.6])
axarr[1].set_ylim([-0.02, 0.40])
axarr[2].set_ylim([0.9, 2.1])
for i in range(3):
axarr[i].axvline(x= 8.655,color='k',ls='-', label='Contact Surface')
axarr[i].axvline(x= -41.75,color='k',ls='--',label='Left Rarefaction Head')
axarr[i].axvline(x= -31.35,color='k',ls='-.',label='Left Rarefaction Tail')
axarr[i].axvline(x= 35.35,color='k',ls=':', label='Right Shockwave')
axarr[0].legend(loc=2, fontsize = 'xx-small')
plt.tight_layout()
pp.savefig(bbx_inches = 'tight')
pp.close()
| [
"[email protected]"
] | |
90472afe2ddab6686099aefea75222231352131e | 45870a80cbe343efe95eb9e8d0bd47c8c88353d1 | /特殊的函数/venv/Lib/site-packages/tensorflow/contrib/data/python/ops/interleave_ops.py | 2485c0d22098deb12eed5864ddec8218fe5687a9 | [] | no_license | pippichi/IntelliJ_PYTHON | 3af7fbb2c8a3c2ff4c44e66736bbfb7aed51fe88 | 0bc6ded6fb5b5d9450920e4ed5e90a2b82eae7ca | refs/heads/master | 2021-07-10T09:53:01.264372 | 2020-07-09T13:19:41 | 2020-07-09T13:19:41 | 159,319,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,052 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Non-deterministic dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import readers
from tensorflow.python.util import deprecation
def parallel_interleave(map_func,
cycle_length,
block_length=1,
sloppy=False,
buffer_output_elements=None,
prefetch_input_elements=None):
"""A parallel version of the `Dataset.interleave()` transformation.
`parallel_interleave()` maps `map_func` across its input to produce nested
datasets, and outputs their elements interleaved. Unlike
@{tf.data.Dataset.interleave}, it gets elements from `cycle_length` nested
datasets in parallel, which increases the throughput, especially in the
presence of stragglers. Furthermore, the `sloppy` argument can be used to
improve performance, by relaxing the requirement that the outputs are produced
in a deterministic order, and allowing the implementation to skip over nested
datasets whose elements are not readily available when requested.
Example usage:
```python
# Preprocess 4 files concurrently.
filenames = tf.data.Dataset.list_files("/path/to/data/train*.tfrecords")
dataset = filenames.apply(
tf.contrib.data.parallel_interleave(
lambda filename: tf.data.TFRecordDataset(filename),
cycle_length=4))
```
WARNING: If `sloppy` is `True`, the order of produced elements is not
deterministic.
Args:
map_func: A function mapping a nested structure of tensors to a `Dataset`.
cycle_length: The number of input `Dataset`s to interleave from in parallel.
block_length: The number of consecutive elements to pull from an input
`Dataset` before advancing to the next input `Dataset`.
sloppy: If false, elements are produced in deterministic order. Otherwise,
the implementation is allowed, for the sake of expediency, to produce
elements in a non-deterministic order.
buffer_output_elements: The number of elements each iterator being
interleaved should buffer (similar to the `.prefetch()` transformation for
each interleaved iterator).
prefetch_input_elements: The number of input elements to transform to
iterators before they are needed for interleaving.
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
"""
def _apply_fn(dataset):
return readers.ParallelInterleaveDataset(
dataset, map_func, cycle_length, block_length, sloppy,
buffer_output_elements, prefetch_input_elements)
return _apply_fn
@deprecation.deprecated(
None, "Use `tf.contrib.data.parallel_interleave(..., sloppy=True)`.")
def sloppy_interleave(map_func, cycle_length, block_length=1):
"""A non-deterministic version of the `Dataset.interleave()` transformation.
`sloppy_interleave()` maps `map_func` across `dataset`, and
non-deterministically interleaves the results.
The resulting dataset is almost identical to `interleave`. The key
difference is that if retrieving a value from a given output iterator would
cause `get_next` to block, that iterator will be skipped, and consumed
when next available. If consuming from all iterators would cause the
`get_next` call to block, the `get_next` call blocks until the first value is
available.
If the underlying datasets produce elements as fast as they are consumed, the
`sloppy_interleave` transformation behaves identically to `interleave`.
However, if an underlying dataset would block the consumer,
`sloppy_interleave` can violate the round-robin order (that `interleave`
strictly obeys), producing an element from a different underlying
dataset instead.
Example usage:
```python
# Preprocess 4 files concurrently.
filenames = tf.data.Dataset.list_files("/path/to/data/train*.tfrecords")
dataset = filenames.apply(
tf.contrib.data.sloppy_interleave(
lambda filename: tf.data.TFRecordDataset(filename),
cycle_length=4))
```
WARNING: The order of elements in the resulting dataset is not
deterministic. Use `Dataset.interleave()` if you want the elements to have a
deterministic order.
Args:
map_func: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to a
`Dataset`.
cycle_length: The number of input `Dataset`s to interleave from in parallel.
block_length: The number of consecutive elements to pull from an input
`Dataset` before advancing to the next input `Dataset`. Note:
`sloppy_interleave` will skip the remainder of elements in the
`block_length` in order to avoid blocking.
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
"""
def _apply_fn(dataset):
return readers.ParallelInterleaveDataset(
dataset,
map_func,
cycle_length,
block_length,
sloppy=True,
buffer_output_elements=None,
prefetch_input_elements=None)
return _apply_fn
| [
"[email protected]"
] | |
4f44ff65f94163b3fd4e6aed042fb3f16285a7c3 | ae92e7e1a3e66059e81da62dca274ea664fa3568 | /eight-queens/main_demo3.py | 2367d71070d386ecdcadece789ef2d77ad5050a6 | [] | no_license | nasihs/omelette | a90b6f80e4025be8bbfd608953773f5e1e862836 | 135e199c12d53d36ec05e3a1b615a478aab71611 | refs/heads/master | 2021-09-20T12:15:56.344038 | 2018-08-09T13:53:27 | 2018-08-09T13:53:27 | 141,831,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,620 | py | # -*- coding:utf-8 -*-
"""
自动前进
加入了多线程通信
加入了自动计算步数
"""
import cv2
import numpy as np
import sys
import subprocess
import identify_demo as id
from multiprocessing import Process, Queue
def count_blocks():
pass
def move_forward(temp, fwd, q):
flag = 1
while True:
coor = q.get(True)
if temp == 0:
# 停车
ser.write([0x5A]) # 头帧
ser.write([0x01]) # 方向
ser.write([0x00]) # 速度
break
# 平移
if fwd:
# 前进
ser.write([0x5A]) # 头帧
ser.write([0x0])
pass
else:
# 后退
# ser.write()
# ser.write()
pass
# 判断是否下一格
if flag == 1 and (coor[1] == center[1]):
flag = 0
temp -= 1
#print (temp)
if coor[1] != center[1]:
flag = 1
else:
flag = 0
def move_right(temp, rht, q):
flag = 1
while True:
coor = q.get(True)
if temp == 0:
# 停车
# ser.write()
# ser.write()
break
# 平移
if rht:
# 右移
# ser.write()
# ser.write()
pass
else:
# 左移
# ser.write()
# ser.write()
pass
# 判断是否下一格
if flag == 1 and (coor[1] == center[1]):
flag = 0
temp -= 1
#print (temp)
if coor[1] != center[1]:
flag = 1
else:
flag = 0
def recognize(q):
print ("camera initializing...")
cap0 = cv2.VideoCapture(0)
cap0.set(3,320)
cap0.set(4,240)
if cap0.isOpened:
print ("cameras are opened")
else:
print ("cameras are not opened")
print ("program exiting...")
sys.exit()
print ("FPS0:", cap0.get(5))
print ("cameras successfully initialized")
while True:
try:
ret0, frame0 = cap0.read()
except:
print ("cap0.read failed")
cap0.release()
sys.exit()
#ret, frame = cap0.read()
#frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
try:
cv2.imshow("camera0", frame0)
except:
print ("capture failed")
cap0.release()
print ("exiting...")
sys.exit()
try:
diff1, diff2 = (id.identify_mid(frame0))
except:
print("identify_mid failed")
cap0.release()
sys.exit()
if diff1 > 255:
diff1 = 255
d1 = int(diff1)
d2 = int(diff2)
#coor = (d1, d2)
#print (coor)
q.put((d1, d2))
if cv2.waitKey(5) & 0xFF == ord("q"):
cap0.release()
cv2.destroyAllWindows()
break
ret, frame = cap0.read()
print (ret)
#frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow("camera01", frame)
diff1, diff2 = id.identify_mid(frame)
#print(diff1)
if diff1 > 255:
diff1 = 255
d1 = int(diff1)
d2 = int(diff2)
print (d1,d2)
#list = []
ser.write([0xA5])
ser.write([d1])
ser.write([d2])
#time.sleep(0.2)
q.put((d1, d2))
if cv2.waitKey(1) == ord("q"):
cap0.release()
cv2.destroyAllWindows()
break
def auto_move(q):
for i in steps:
# coor = q.get(True)
forward = i[0] < 0
temp_x = abs(i[0])
right = i[1] > 0
temp_y = abs(i[1])
move_forward(temp_x, forward, q)
move_right(temp_y, right, q)
'''
print (i)
while True:
# dx, dy为正:向左或下平移
if i[0] != 0:
temp = abs(i[0])
for k in range(temp):
# 向左移动
# ser.write (OxA5)
'''
if __name__ == "__main__":
# 设定摄像头中心坐标
center = (160, 120)
ser = serial.Serial('/dev/ttyAMA0', 115200, timeout = 3)
"""
#初始化摄像头、串口
cap1 = cv2.VideoCapture(0)
cap1.set(3,300)
cap1.set(4,300)
#ser = serial.Serial("dev/ttyS0", 115200, timeout = 3)
'''
#识别起点
ret, frame = cap1.read()
origin = [id.identify_num(frame)]
'''
"""
# origin为入场位置
origin = [57]
#输入路径
#route = [41, 55, 39, 22]
route1 = [42, 26, 32, 8]
# 总路径
route = origin + route1
print ("总路径为:", route)
# 棋盘坐标系
map = {"1": (1, 1), "2": (2, 1), "3": (3, 1), "4": (4, 1),
"5": (5, 1), "6": (6, 1), "7": (7, 1), "8": (8, 1),
"9": (1, 2), "10": (2, 2), "11": (3, 2), "12": (4, 2),
"13": (5, 2), "14": (6 ,2), "15": (7, 2), "16": (8, 2),
"17": (1, 3), "18": (2, 3), "19": (3, 3), "20": (4, 3),
"21": (5, 3), "22": (6, 3), "23": (7, 3), "24": (8, 3),
"25": (1, 4), "26": (2, 4), "27": (3, 4), "28": (4, 4),
"29": (5, 4), "30": (6, 4), "31": (7, 4), "32": (8, 4),
"33": (1, 5), "34": (2, 5), "35": (3, 5), "36": (4, 5),
"37": (5, 5), "38": (6, 5), "39": (7, 5), "40": (8, 5),
"41": (1, 6), "42": (2, 6), "43": (3, 6), "44": (4, 6),
"45": (5, 6), "46": (6, 6), "47": (7, 6), "48": (8, 6),
"49": (1, 7), "50": (2, 7), "51": (3, 7), "52": (4, 7),
"53": (5, 7), "54": (6, 7), "55": (7, 7), "56": (8, 7),
"57": (1, 8), "58": (2, 8), "59": (3, 8), "60": (4, 8),
"61": (5, 8), "62": (6, 8), "63": (7, 8), "64": (8, 8)}
# 从route中一个坐标移动到下一个坐标为一个step
steps = []
for i in range(1, len(route)):
x2 = map[str(route[i])][0]
x1 = map[str(route[i - 1])][0]
dx = x2 - x1
y2 = map[str(route[i])][1]
y1 = map[str(route[i - 1])][1]
dy = y2 - y1
steps.append((dx, dy))
# dx, dy为每次移动时x, y轴移动的格数
print ("步骤:", steps)
q = Queue()
p_rec = Process(target = recognize, args = (q,))
p_mov = Process(target = auto_move, args = (q, steps))
p_rec.start()
p_mov.start()
p_mov.join()
p_rec.terminate()
print ("done")
sys.exit()
| [
"[email protected]"
] | |
81a97264c5de5a15b96cb8468452246dc403a2ef | 1157e03573c8a1310e7145a3d6426ab79bdc4681 | /utils/label_check.py | 35e9db3dc3f2acbca3d68d2ff1345fb33da87d0a | [] | no_license | Interesting6/SegySegUNet | 3a5d2568398e72032cbed03c89ff60737c9dca4b | 91dc6475dd0c94d4e727a4dc2a7802a2d559832c | refs/heads/main | 2023-02-23T10:41:27.232883 | 2021-01-17T04:30:20 | 2021-01-17T04:30:20 | 330,310,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | import matplotlib.pyplot as plt
import numpy as np
import segyio
import os
data_dir = "/home/cym/Datasets/StData-12/F3_block/"
data_path = os.path.join(data_dir, "F3Seis_IL190_490_Amplitude.segy")
label_path = os.path.join(data_dir, "F3Seis_IL190_490_Label.segy")
data_cube = np.transpose(segyio.cube(data_path), (0,2,1))
label_cube = np.transpose(segyio.cube(label_path), (0,2,1))
label2 = label_cube[1]
print(label2[:50, :10])
print("-------")
print(label2[:50, -10:])
| [
"[email protected]"
] | |
39bc582e2ac16c9c823548cbeff5e7f1e3705111 | 2893460027b0f109f8d0e2a6aaf3b2f03befd642 | /media.py | df39668871b60ce68997834c86305c3a006cbf48 | [] | no_license | ldsz6524/Programs | f7f4efba9fe2b2f428a4cdce65bbd861c5c153f8 | ea6310a1fe52642e629875ba7d2f215c5a4c2b12 | refs/heads/master | 2022-04-11T17:59:57.119771 | 2020-03-12T05:17:29 | 2020-03-12T05:17:29 | 113,326,209 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | import webbrowser
class Movie():
'''This class provides a way to store movie related information'''
def __init__(self, movie_title, poster_image, trailer_youtube):
'''This method is uesd to initialise the object'''
'''Line 8 to 10 are the definition of instance variables'''
self.title = movie_title
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
def show_trailer(self):
'''Show the trailer of the movie'''
webbrowser.open(self.trailer_youtube_url)
| [
"[email protected]"
] | |
c84a441156c8b3291af36939bd3f090075b2f96c | afa85dbb2e496b6357ffe4e163163cea76095e35 | /113mon.py | 8c3432a3c6bde889f77d36084b139c3e4f950b00 | [] | no_license | RaghulHari/pythonprogramming | f276d0669fe40d5c7bc0bff27f7192e3547a2375 | 29bdff7c6c038494ce396503acec832ac7bf888a | refs/heads/master | 2020-05-30T06:28:39.459731 | 2019-10-03T07:33:08 | 2019-10-03T07:33:08 | 189,580,304 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | l=input()
l=l.split("/")
a=l[0]
b=l[1]
c=l[1]
e=len(a)
f=len(b)
g=len(c)
if e==2 and f==2 and g==4 and int(l[0])>0 and int(l[0])<32 and int(l[1])>=1 and int(l[1])<13:
print("yes")
else:
print("no")
| [
"[email protected]"
] | |
8212533adce5b873f82bb889796cd14c74185f25 | fd18d9737b8a48c452272cc525397757e3b05bd3 | /wirfi_app/migrations/0022_presetfilter.py | fd6a4d698ad24dfe8cd1e47cd182e762ebe7da62 | [] | no_license | rameshdhungana/wirfi-backend | 7551081bf1ccac4d6b8977b6f778001f5f96e9b2 | a1afcb3543dfbbe582383532174e3f84a30712b6 | refs/heads/main | 2023-01-02T13:23:44.041954 | 2019-01-31T10:17:05 | 2019-01-31T10:17:05 | 305,905,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 995 | py | # Generated by Django 2.0.7 on 2018-09-07 08:42
from django.conf import settings
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wirfi_app', '0021_remove_device_priority'),
]
operations = [
migrations.CreateModel(
name='PresetFilter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('filter_type', models.PositiveIntegerField()),
('filter_keys', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None)),
('sort_type', models.PositiveIntegerField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
8d1500fc9d9743384c3cf312d133450010f67d6f | 8afae7ea1b1129edbcba6558cc0e1197fcf9d868 | /data/acs_pums/analyze_energy.py | 98f1a3cf52cd7af1f8eb309dec83f39c915a7376 | [] | no_license | mjstevens777/energy-portal | bb3f89d4fcb29f778df2940d394995fe274b59c4 | 4dbd59972091058320da00818b2256e4a16826aa | refs/heads/master | 2021-01-21T04:44:33.205235 | 2016-06-08T22:40:35 | 2016-06-08T22:40:35 | 55,736,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,153 | py | import csv
from collections import defaultdict
vars = ['ELEP', 'GASP', 'FULP']
data = {}
for var in vars:
data[var] = defaultdict(float)
data[var + '_weight'] = defaultdict(float)
puma_ids = set()
with open('acs_pums/data/energy_usage_individual.csv') as f:
for i, row in enumerate(csv.DictReader(f)):
if i % 10000 == 0:
print('.', end='', flush=True)
puma_id = row['puma_id']
puma_ids.add(puma_id)
for var in ['ELEP', 'GASP', 'FULP']:
value = row[var]
if value.isdigit() and int(value) >= 4:
weight = float(row['WGTP'])
data[var][puma_id] += float(value) * weight
data[var + '_weight'][puma_id] += weight
with open('acs_pums/energy_by_puma.csv', 'w') as f:
writer = csv.DictWriter(f, ['puma_id'] + vars)
writer.writeheader()
for puma_id in puma_ids:
row = {'puma_id': puma_id}
for var in vars:
if puma_id in data[var]:
row[var] = data[var][puma_id] / data[var + '_weight'][puma_id]
else:
row[var] = None
writer.writerow(row)
| [
"[email protected]"
] | |
2670bf4dde4523bdc076aa6d44c2a8acca432221 | 91d101db50816f2cda918998fd1b70396f27a143 | /mllib_trial_pca.py | 5a1866eacaa8c8d3eb96b253c080fd33c5b7d80e | [] | no_license | shah-deven/Reducing-Inequalities | b6d93068b4125b58362fe095cb5001d0b0d3b236 | d67b24e8ecd4f5547f06e622351130c644efd126 | refs/heads/master | 2020-03-10T17:12:44.198588 | 2018-04-14T07:52:30 | 2018-04-14T07:52:30 | 129,494,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,210 | py | from pyspark import SparkContext, SQLContext
from pyspark.mllib.regression import LabeledPoint, RidgeRegressionWithSGD
from pyspark.mllib.linalg.distributed import RowMatrix
from pyspark.mllib.linalg import Vector
import numpy as np
def parsePoint(line):
values = []
# if line.split(",")[1] != 'income':
for x in line.replace(",", " ").split(" "):
try:
values.append(float(x))
except:
pass
if len(values) != 0:
return LabeledPoint(values[0], values[1:])
'''values = [float(x) for x in line.replace(",", " ").split(" ")]
return LabeledPoint(values[0], values[1:])'''
sc = SparkContext()
sql = SQLContext(sc)
#train_data = sc.textFile("./reduced-inequalities/flattened_with_years.csv")
#data = sc.read.format("csv").option("header", "true").load("./flattened_2002_1000.csv")
train_data = (sql.read.format("csv").option("header","true").load('./flattened_with_years.csv'))
train_data_y = train_data[train_data.columns[0]]
train_data = train_data[train_data.columns[1:]]
row_data = train_data.rdd
vector_data = row_data.map(lambda x: np.array(x))
#parsed_data_train = train_data.map(parsePoint)
#print(vector_data.collect())
matrix = RowMatrix(vector_data)
#print(matrix.collect())
pc = matrix.computePrincipalComponents(7)
#print(pc)
projected_data_train = matrix.multiply(pc)
#print("here:", projected_data_train)
train_data_y = train_data_y.rdd
projected_data_train = projected_data_train.rdd
train_data = train_data_y.join(projected_data_train)
print(train_data.collect())
df_train = projected_data_train.map(lambda x: LabeledPoint(x[0], x[1:]))
#print(df_train)
model = RidgeRegressionWithSGD.train(df_train)
print(model.weights)
print(parsedData.take(3))
test_data = sc.textFile("./reduced-inequalities/flattened_2016_with_year.csv")
parsed_data_test = test_data.map(parsePoint)
test_matrix = RowMatrix(parsed_data_test)
projected_data_test = test_matrix.multiply(pc)
valuesAndPreds = projected_data_test.map(lambda p: (p[0], model.predict(p[1:])))
print(valuesAndPreds.take(3))
mse = valuesAndPreds.map(lambda v, p: (v - p)**2).reduce(lambda x,y: x + y) / valuesAndPreds.count()
print("Mean Squared Error: ", mse)
| [
"[email protected]"
] | |
6191ce97580edf97f30469eba4fddaa91ca01a42 | 6a46949273edb67a5962f88a85c93612c0cb4810 | /constants.py | abea9333fe550eb4920a795a163c2698779e608a | [] | no_license | Jonasori/Outdated-Disk-Modeling | 1c3e22bfca4b6b4e6882a8887b3fb9fac1d7529f | 1964a2f543870cb112421eb32ed3a725e0acf842 | refs/heads/master | 2020-03-11T01:16:22.500038 | 2018-09-27T03:36:02 | 2018-09-27T03:36:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,032 | py | """A place to put all the junky equations I don't want elsewhere.
FOR GRID SEARCH.
"""
from astropy.constants import c
from astropy.io import fits
import numpy as np
import datetime
c = c.to('km/s').value
mol = 'hco'
# These frequencies come from Splatalogue and are different than those
# embedded in, for example, the uvf file imported as hdr below
# which gives restfreq(hco) = 356.72278845870005
lines = {'hco': {'restfreq': 356.73422300,
'jnum': 3,
'rms': 1,
'chanstep_freq': 1 * 0.000488281,
'baseline_cutoff': 0,
'chan0_freq': 355.791034,
'spwID': 1},
'hcn': {'restfreq': 354.50547590,
'jnum': 3,
'rms': 1,
'chanstep_freq': 1 * 0.000488281,
'baseline_cutoff': 0,
'chan0_freq': 354.2837,
'spwID': 0},
'co': {'restfreq': 345.79598990,
'jnum': 2,
'rms': 1,
'chanstep_freq': -1 * 0.000488281,
'baseline_cutoff': 35,
'chan0_freq': 346.114523,
'spwID': 2},
'cs': {'restfreq': 342.88285030,
'jnum': 6,
'rms': 1,
'chanstep_freq': -1 * 0.000488281,
'baseline_cutoff': 30,
'chan0_freq': 344.237292,
'spwID': 3}
}
"""
headers = {'hco': {'im': fits.getheader('./data/hco/hco.fits'),
'vis': fits.getheader('./data/hco/hco.uvf')},
'hcn': {'im': fits.getheader('./data/hcn/hcn.fits'),
'vis': fits.getheader('./data/hco/hcn.uvf')},
'co': {'im': fits.getheader('./data/co/co.fits'),
'vis': fits.getheader('./data/co/co.uvf')},
'cs': {'im': fits.getheader('./data//cs.fits'),
'vis': fits.getheader('./data/cs/cs.uvf')}
}
"""
# DATA FILE NAME
def get_data_path(mol, short_vis_only=False):
"""Get the path to the data files for a given line."""
dataPath = './data/' + mol + '/' + mol
if short_vis_only is True:
dataPath += '-short' + str(lines[mol]['baseline_cutoff'])
return dataPath
dataPath = get_data_path(mol, short_vis_only=False)
# What day is it? Used to ID files.
months = ['jan', 'feb', 'march', 'april', 'may', 'june',
'july', 'aug', 'sep', 'oct', 'nov', 'dec']
td = datetime.datetime.now()
today = months[td.month - 1] + str(td.day)
# DEFAULT VALUES
# Column density [low, high]
col_dens = [1.3e21/(1.59e21), 1e30/(1.59e21)]
# Freeze out temp (K)
Tfo = 19
# Midplane temperature (K)
Tmid = 15
# Atmospheric temperature (K)
Tatm = 100
# Temp structure power law index ( T(r) ~ r^qq )
Tqq = -0.5
# Stellar mass, in solar masses [a,b]
m_star = [3.5, 0.4]
# Disk mass, in solar masses [a,b]
m_disk = [0.078, 0.028]
# Inner disk radius, in AU
r_in = [1., 1.]
# Outer disk radius, in AU
r_out = [500, 300]
# Handedness of rotation
rotHand = [-1, -1]
# Offsets (from center), in arcseconds
# centering_for_olay.cgdisp is the file that actually makes the green crosses!
# Williams values: offsets = [[-0.0298, 0.072], [-1.0456, -0.1879]]
# Fit values:
offsets = [[0.0002, 0.032], [-1.006, -0.318]]
# Williams vals: vsys = [10.55, 10.85]
vsys = [9.95, 10.75]
other_params = [col_dens, Tfo, Tmid, m_star, m_disk, r_in, rotHand, offsets]
def obs_stuff(mol):
"""Get freqs, restfreq, obsv, chanstep, both n_chans, and both chanmins.
Just putting this stuff in a function because it's ugly and line-dependent.
"""
jnum = lines[mol]['jnum']
# Dig some observational params out of the data file.
hdr = fits.getheader(dataPath + '.uvf')
restfreq = lines[mol]['restfreq']
# restfreq = hdr['CRVAL4'] * 1e-9
# Get the frequencies and velocities of each step
# {[arange(nchans) + 1 - chanNum] * chanStepFreq) + ChanNumFreq} * Hz2GHz
# [-25,...,25] * d_nu + ref_chan_freq
freqs = ( (np.arange(hdr['naxis4']) + 1 - hdr['crpix4']) * hdr['cdelt4'] + hdr['crval4']) * 1e-9
obsv = c * (restfreq-freqs)/restfreq
chan_dir = lines[mol]['chanstep_freq']/np.abs(lines[mol]['chanstep_freq'])
chanstep = -1 * chan_dir * np.abs(obsv[1]-obsv[0])
# chanstep = c * (lines[mol]['chanstep_freq']/lines[mol]['restfreq'])
# Find the largest distance between a point on the velocity grid and sysv
# Double it to cover both directions, convert from velocity to chans
# The raytracing code will interpolate this (larger) grid onto the smaller
# grid defined by nchans automatically.
nchans_a = int(2*np.ceil(np.abs(obsv-vsys[0]).max()/np.abs(chanstep))+1)
nchans_b = int(2*np.ceil(np.abs(obsv-vsys[1]).max()/np.abs(chanstep))+1)
chanmin_a = -(nchans_a/2.-.5) * chanstep
chanmin_b = -(nchans_b/2.-.5) * chanstep
n_chans, chanmins = [nchans_a, nchans_b], [chanmin_a, chanmin_b]
return [vsys, restfreq, freqs, obsv, chanstep, n_chans, chanmins, jnum]
# The end
| [
"[email protected]"
] | |
ad0a0089f5111ace12566437673580d3d18754df | 851b465959f5afbdae433714c2cc0000b8cb2b09 | /MINI2/member/models.py | 981439578adc5fc95b6dcf39652d3821718661cb | [] | no_license | haeinyy/webproject_communitysite | 944cf7b09b9a746297c8f53f4fb837641246ffc0 | 47ebbcb47fe348592d66e9574044d058a72c030e | refs/heads/main | 2023-06-20T05:03:44.311740 | 2021-07-19T08:58:41 | 2021-07-19T08:58:41 | 379,854,526 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,286 | py | from django.db import models
# Create your models here.
# pk = user_phone
class Member(models.Model):
user_name = models.CharField(max_length=15,
verbose_name="사용자이름")
user_pw = models.CharField(max_length=15,
verbose_name="비밀번호")
user_phone = models.CharField(max_length=15,
verbose_name="휴대폰번호",
primary_key=True)
c_date = models.DateTimeField() # 날짜 - 자동으로 넣어줄것이다.
def __str__(self):
return self.user_phone
# 별도로 테이블명을 지정하고 싶을 때
class Meta:
db_table = 'member_member' # SQLite에 보이는 테이블이름
### 추가된부분 ###
# 프로필 추가
class Profile(models.Model):
user_name = models.OneToOneField(Member, on_delete=models.CASCADE)
# User - Profile을 1:1로 연결 ;; phone으로 해야되나
description = models.TextField(blank=True)
nickname = models.CharField(max_length=40, blank=True)
image = models.ImageField(blank=True) #,upload_to="profile/%Y/%m"
# imagefield 이용하려면 pip install pillow 패키지 설치
# blank=True 값 채워넣지 않아도 되는 속성 | [
"[email protected]"
] | |
f408b88581308159c47ed8e101bf9fac61db48b9 | 29beed260f7292a65b1a2ad9cbe710255029005f | /blog/migrations/0001_initial.py | 67a643c0cbc1a9d830c2f3ac2665822521917154 | [] | no_license | petervargaofficial/prvniblog | 11b2f98c3c378a858f2ca29bdfde5c4f78526fb2 | 9c16ab08166df04c1b1e521ee9776ee390cc0198 | refs/heads/master | 2020-06-10T03:06:29.053911 | 2016-12-10T15:27:26 | 2016-12-10T15:27:26 | 76,111,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-10 13:51
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
f1910d4e4c8d87f5aeb016310db8387103e97414 | 19d6b5b3692bd8bd62f3c1638c725a68c35f5469 | /blog.py | d33c62fa373353475f2d526a6c5b77c3b897478c | [] | no_license | jinkangcheng/Python | 1345999e69288569377025c0dfbb49809151ceb8 | 211ea738d7e9cd0c95432e42b8baabcc1a88db00 | refs/heads/master | 2021-01-10T09:06:44.063828 | 2016-01-05T15:02:33 | 2016-01-05T15:02:33 | 48,080,197 | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 2,423 | py | #!/usr/bin/python
#coding:utf8
from flask import Flask, render_template, url_for, request,redirect,make_response,session
import os,MySQLdb
app = Flask(__name__)
app.secret_key='afjlsjfowflajflkajfkjfkaljf'
user_list = ['admin','anonymous','py']
imagepath = os.path.join(os.getcwd(),"static/images")
@app.route('/')
def index():
username = request.cookies.get('username')
if not username:
username = u'请先登录'
islogin = session.get('islogin')
nav_list = [u'首页',u'经济',u'文化',u'科技',u'娱乐']
blog = {'title':'welcome to my blog','content':'hello, welcome to my blog.'}
blogtag = {'javascript':10,"python":20,"shell":5}
img = url_for('static', filename="images/cat.jpg")
return render_template('index.html', nav_list=nav_list, username=username, blog = blog, blogtag = blogtag, img=img, islogin=islogin)
@app.route('/reg', methods=['GET','POST'])
def regist():
if request.method == 'POST':
username = request.form['username']
conn = MySQLdb.connect(user='root',passwd='admin',host='127.0.0.1')
conn.select_db('blog')
curr = conn.cursor()
sql = 'insert into `user` (`id`,`username`) values (%d,"%s")' % (1,username)
curr.execute(sql)
conn.commit()
curr.close()
conn.close()
return "user %s regist ok!" % request.form['username']
else:
#request.args['username']
return render_template('regist.html')
@app.route('/upload', methods=['GET','POST'])
def upload():
if request.method == 'POST':
username = request.form['username']
file = request.files['img']
filename = file.filename
file.save(os.path.join(imagepath,filename))
return "<img src='static/images/%s' alt=''/>" % filename
else:
return render_template('upload.html')
@app.route('/login/', methods=['GET','POST'])
def login():
if request.method == 'POST':
username = request.form.get('username')
if username in user_list:
response = make_response(redirect('/'))
response.set_cookie('username', value=username, max_age=300)
session['islogin'] = '1'
return response
else:
session['islogin'] = '0'
return redirect('/login/')
else:
return render_template('login.html')
if __name__ == '__main__':
app.run(debug=True,host='0.0.0.0',port=5000) | [
"[email protected]"
] | |
d65aabe6a818c51ba0ee19a2b996ca3bf098aaea | f13ef014a60930f7571faa4e817ba393605e1d15 | /sistema/sistema/urls.py | 82ed756e7fc089a0531c445caafae3c00606dfe0 | [] | no_license | Robert321/Sistema2Sprint2 | b977e7947eb07ff406b9df4347a23b4dbaa7c3c1 | 663dac6ecc967fb6b675f5525fe16c0b253bd2ad | refs/heads/master | 2021-01-19T00:24:23.649139 | 2014-07-09T13:27:54 | 2014-07-09T13:27:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,842 | py | from django.conf.urls import patterns, include, url
from django.conf import settings
from django.contrib import admin
from django.conf.urls.static import static
from django.views.generic import RedirectView
admin.autodiscover()
from sistema.apps.registro.views import *
urlpatterns = patterns('',
url(r'^media/(?P<path>.*)$','django.views.static.serve',{'document_root':settings.MEDIA_ROOT,}),
)
urlpatterns += patterns(
'sistema.apps.registro.views',
# Examples:
# url(r'^$', 'sistema.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^$','index_view',name='vista_principal'),
#url(r'^contacto/$','contacto_view',name='vista_contacto'),
url(r'^vercontacto/$','VerContacto',name='ver_contacto'),
url(r'^agregarestudiante/','addEstudiante',name='vista_estudiante'),
url(r'^menuestudiante/(?P<id>\d+)/$','MenuEstudiante',name='vista_menuestudiante'),
url(r'^menudocente/(?P<id>\d+)/$','MenuDocente',name='vista_menudocente'),
url(r'^agregardocente/','addDocente',name='vista_docente'),
url(r'^loginEstudiante/$','login_view_Estudiante',name='vista_login_estudiante'),
url(r'^logoutEstudiante/$','logout_view_Estudiante',name='vista_logout_estudiante'),
url(r'^loginDocente/$','login_view_Docente',name='vista_login_docente'),
url(r'^logoutDocente/$','logout_view_Docente',name='vista_logout_docente'),
url(r'^editarestudiante/(?P<id>\d+)/$',editar_estudiante),
url(r'^editardocente/(?P<id>\d+)/$',editar_docente),
url(r'^vermaterias/(?P<id>\d+)/$',VerMaterias),
url(r'^agregarrelcarest/(?P<id>\d+)/$',addRelCarEst),
url(r'^agregarrelestmat/(?P<id>\d+)/$',addRelEstMat),
url(r'^agregarrelestmat/(?P<id>\d+)/$',addRelEstMat),
url(r'^programacion/(?P<id>\d+)/$',programacion),
url(r'^carrera/$','addCarrera',name='vista_carrera'),
url(r'^agregarmateria/$',addMateria),
url(r'^agregarnotas/$',addNotas),
url(r'^agregarrelcarmat/$',addRelCarMat),
url(r'^registro/exito/$',exito),
url(r'^ver/carreras/$','VerCarrera',name='vista_carrera2'),
url(r'^autor/$',"addAutor",name='Autor'),
url(r'^login/$','login_view',name='vista_login'),
url(r'^logout/$','logout_view',name='vista_logout'),
url(r'^menudirector/$',"MenuDirector",name="vista_menu_director"),
url(r'^agregarrelcardoc/$',AsignacionDocente),
url(r'^agregarreldocmat/$',AsignacionDocenteMateria),
url(r'^nota/$',notas),
#url(r'^mapas/$',mapas),
#url(r'^uploads/$','upload_file',name="uploads"),
#url(r'^list/$','upload_file',name="uploads"),
)
urlpatterns += patterns('sistema.apps.registro.views',
url(r'^list/$', 'list', name='list'),
url(r'^libros/$', 'libros', name='libros'),
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
] | |
7b4edb1a9aaa2df00d07914dfe7aad3f4f5d8a26 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02407/s010066877.py | e8955e076ce82566c91dd3f2b32b6e8cc5421855 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | n = input()
a = list(map(lambda x : int(x), input().split(" ")))
for i, a_ in enumerate(reversed(a)):
if i == 0:
print("%d" % a_, end="")
else:
print(" %d" % a_, end="")
print() | [
"[email protected]"
] | |
14079e30fa26cf1e221d4368be6681f4a6455963 | 2cc638b403001e9418ffdd9aeaa367a81e11faa8 | /code/DeepGPs/models/GP/GPlib.py | be4815bb680d4427711bb89a3dd843c139b2dc69 | [] | no_license | mhavasi/MPhil_Project | f0a51d5efcaa141a2b839c13502a2fae79f5dc6c | 312122922d0b364ab34350015aab61e71a9812ee | refs/heads/master | 2021-03-27T19:44:38.783555 | 2017-10-06T14:23:55 | 2017-10-06T14:23:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,876 | py | from ..AbstractModel import AbstractModel
import GPy
import time
import tensorflow as tf
import numpy as np
class GPlib(AbstractModel):
# The training_targets are the Y's which are real numbers
def __init__(self,
training_data,
training_targets,
modelParams):
self.training_data = training_data
self.training_targets = training_targets
self.n_points = training_data.shape[0]
self.input_d = training_data.shape[1]
self.output_d = training_targets.shape[1]
self.kern = 'rbf'
if ('kern' in modelParams):
self.kern = modelParams['kern']
self.reset()
def reset(self):
self.models = []
for i in range(0, self.output_d):
if (self.kern == 'matern'):
kernel = GPy.kern.Matern52(input_dim=self.input_d, ARD=True)
else:
kernel = GPy.kern.RBF(input_dim=self.input_d, ARD=True)
model = GPy.models.GPRegression(self.training_data, self.training_targets[:, i:i+1],kernel)
model.optimize_restarts(num_restarts = 10)
model.optimize(messages=False)
#print(kernel)
self.models.append(model)
def addPoint(self, x, y):
self.training_data = np.vstack((x, self.training_data))
self.training_targets = np.vstack((y, self.training_targets))
self.reset()
def predictBatch(self, test_data):
means = np.array([[]]*test_data.shape[0])
vars = np.array([[]]*test_data.shape[0])
for model in self.models:
mean, var = model.predict(test_data, full_cov=False)
means = np.concatenate((means, mean), axis=1)
vars = np.concatenate((vars, var.reshape((-1, 1))), axis=1)
return means, vars
| [
"[email protected]"
] | |
b9717f3483c03ebfdc8246efed5f98a7a5d0adfc | 523ddad20a1a541be5bb0fa55dc4df21e85e644a | /neuralNet.py | 8b867f9327607bd6b6c244ae0f2f1a97296d60dd | [] | no_license | alv16106/NeuralNet | b1d353e738fe82a0982598be76b96c82a3aae6c4 | 45257fa3b4453e26cdac19b92e8ba9d1dd8fb008 | refs/heads/master | 2020-05-17T05:17:22.434786 | 2019-05-10T03:28:00 | 2019-05-10T03:28:00 | 183,530,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,441 | py | import numpy as np
import utils
import data
import pickle
class Network(object):
def __init__(self, shape):
# Cuantas layers queremos
self.num_layers = len(shape)
self.shape = shape
# Inicialización random de pesos
self.weights = [np.random.randn(y, x+1)
for x, y in zip(shape[:-1], shape[1:])]
def GD(self, x, y, alpha, max_iter = 10000, treshold = 0.001):
iterations = 0
current_cost = 100000
# Llegamos a las iteraciones maximas o nuestro costo es mas peque;o que el threshold
while((iterations < max_iter) and (current_cost > treshold)):
current_cost, deltas = self.backProp(x, y, 10)
# Actualizamos pesos
self.weights[0] = self.weights[0] - (alpha * deltas[0])
self.weights[1] = self.weights[1] - (alpha * deltas[1])
s = np.concatenate((np.ravel(deltas[0]), np.ravel(deltas[1])))
current_cost = np.linalg.norm(s)
iterations += 1
print('Iteration' + str(iterations))
return self.weights
def Cost(self, h, y, lmbda):
m = len(y)
# Funcion de costo y*log(h) - (1-y)*log(1-h)
J = (np.multiply(-y, np.log(h)) - np.multiply((1 - y), np.log(1 - h))).sum() / m
# Tomar en cuenta el learning rate + lmda/2m * suma de thetas^2
J += (float(lmbda) / (2 * m)) * (np.sum(np.power(self.weights[0][:, 1:], 2)) + np.sum(np.power(self.weights[1][:, 1:], 2)))
return J
def feedForward(self, X):
m = X.shape[0]
# Bias
ones = np.ones((m,1))
# A;adir el bias
a1 = np.hstack((ones, X))
z2 = a1 @ self.weights[0].T
a2 = np.hstack((ones, utils.sigmoid(z2)))
z3 = a2 @ self.weights[1].T
# Sacar la hipotesis
h = utils.sigmoid(z3)
return a1, z2, a2, z3, h
def predict(self, X):
h = self.feedForward(X)[4]
return h
def loadWeights(self, new_weights):
self.weights = new_weights
def backProp(self, X, y, lmbda):
ones = np.ones(1)
a1, z2, a2, z3, h = self.feedForward(X)
J = self.Cost(h,y,lmbda)
m = X.shape[0]
delta1 = np.zeros(self.weights[0].shape) # (3, 6)
delta2 = np.zeros(self.weights[1].shape) # (3, 4)
ones = np.ones((m,1))
diff = h - y
z2 = np.hstack((ones, z2)) # (5,4)
d2 = np.multiply(np.dot(self.weights[1].T, diff.T).T, utils.sigmoid_prime(z2)) # (5000, 26)
delta1 += np.dot((d2[:, 1:]).T, a1)
delta2 += np.dot(diff.T, a2)
delta1 = delta1 / m
delta2 = delta2 / m
# Añadir la regularización, pero no al bias
delta1[:, 1:] = delta1[:, 1:] + (self.weights[0][:, 1:] * lmbda) / m
delta2[:, 1:] = delta2[:, 1:] + (self.weights[1][:, 1:] * lmbda) / m
return J, [delta1, delta2]
net = Network([784, 100, 10])
x, y, test, y_t, cv, y_cv = data.load_data(2000, 200)
print(test.shape)
y_d = utils.vectorized_result(y, 10)
weights = net.GD(x, y_d, 1, 20, 0.39)
print('Test accuracy')
print(utils.get_accuracy(net.predict(test), y_t))
print('CV accuracy')
print(utils.get_accuracy(net.predict(cv), y_cv))
pickle.dump(weights, open('weights2.npy', "wb"))
""" x, y, test, y_t, cv, y_cv = data.load_data(2000, 200)
net = Network([784, 25, 10])
weights = np.load('weights4.npy', allow_pickle=True)
net.loadWeights(weights)
y_d = utils.vectorized_result(y, 10)
net.GD(x, y_d, 1, 300, 0.1)
print('Test accuracy')
print(utils.get_accuracy(net.predict(test), y_t))
print('CV accuracy')
print(utils.get_accuracy(net.predict(cv), y_cv))
pickle.dump(weights, open('weights5.npy', "wb")) """ | [
"[email protected]"
] | |
804cea13ed938a7fde7a5ed8b23008394fe60ae2 | d1633816e7ab93b4a00e37946dce2f93eddd66ae | /ps1/ps1.py | 54cb6a7c74672788c960134ada6232c24f2767a1 | [] | no_license | RitterGT/ComputerVision | d15affc7b25df9f646ea4c3cb958c8013c09e6a2 | ad27ac548385365f3302314d4a78b1397b7eb072 | refs/heads/master | 2021-01-22T01:58:05.347050 | 2015-11-08T22:31:27 | 2015-11-08T22:31:27 | 42,410,753 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,196 | py | import cv2
import numpy as np
def part1():
cv2.imwrite("output/ps1-1-a-1.png", cv2.imread("input/Image1.png"))
cv2.imwrite("output/ps1-1-a-2.png", cv2.imread("input/Image2.png"))
return
def part2():
image1 = cv2.imread("output/ps1-1-a-1.png")
b,g,r = cv2.split(image1)
#Swap red and blue
cv2.imwrite("output/ps1-2-a-1.png", cv2.merge((r,g,b)))
#store green channel
cv2.imwrite("output/ps1-2-b-1.png", g)
#store red channel
cv2.imwrite("output/ps1-2-c-1.png", r)
return
def part3():
image1 = cv2.imread("output/ps1-1-a-1.png")
image2 = cv2.imread("output/ps1-1-a-2.png")
img1_blue, img1_green, img1_red = cv2.split(image1)
img2_blue, img2_green, img2_red = cv2.split(image2)
#get the center of each green channel
img1_centerHeight = img1_green.shape[0] / 2
img1_centerWidth = img1_green.shape[1] / 2
img2_centerHeight = img2_green.shape[0] / 2
img2_centerWidth = img2_green.shape[1] / 2
centerSquare = img1_green[img1_centerHeight - 50 : img1_centerHeight + 50 : 1, img1_centerWidth-50 : img1_centerWidth+50 : 1]
img2_green[img2_centerHeight - 50 : img2_centerHeight + 50 : 1, img2_centerWidth-50 : img2_centerWidth+50 : 1] = centerSquare
cv2.imwrite("output/ps1-3-a-1.png", img2_green)
return
def part4():
image1 = cv2.imread("output/ps1-1-a-1.png")
img1_blue, img1_green, img1_red = cv2.split(image1)
min = np.min(img1_green)
max = np.max(img1_green)
mean = np.mean(img1_green)
std = np.std(img1_green)
#Subtract the mean from all pixels,
# then divide by standard deviation,
# then multiply by 10 (if your image is 0 to 255)
# or by 0.05 (if your image ranges from 0.0 to 1.0). Now add the mean back in.
img1_green = img1_green.astype(np.float64)
out = ((((img1_green - mean)/std) * 10) + mean)
out = out.clip(0, 255).astype(np.uint8)
cv2.imwrite("output/ps1-4-b-1.png", out)
shift = np.copy(img1_green)
shift = np.roll(shift, -2, axis=1)
shift[:, -2] = 0
shift[:, -1] = 0
cv2.imwrite("output/ps1-4-c-1.png", shift)
# Subtract the shifted version of img1_green from the original img1_green, and save the difference image.
diff = np.clip((img1_green - shift), 0, 255).astype(np.uint8)
cv2.imwrite("output/ps1-4-d-1.png", diff)
def part5():
image1 = cv2.imread("output/ps1-1-a-1.png")
img1_blue, img1_green, img1_red = cv2.split(image1)
sigma = 16
rand_num = np.random.randn(img1_green.shape[0], img1_green.shape[1]) * sigma
img1_green_alter = np.copy(img1_green)
img1_green_alter = img1_green_alter.astype(np.float64)
img1_green_alter += rand_num
img1_green_alter = img1_green_alter.clip(0, 255).astype(np.uint8)
cv2.imwrite("output/ps1-5-a-1.png",cv2.merge((img1_blue, img1_green_alter, img1_red)))
img1_blue_alter = np.copy(img1_blue)
img1_blue_alter = img1_blue_alter.astype(np.float64)
img1_blue_alter += rand_num
img1_blue_alter = img1_blue_alter.clip(0,255).astype(np.uint8)
cv2.imwrite("output/ps1-5-b-1.png",cv2.merge((img1_blue_alter, img1_green, img1_red)))
return
part1()
part2()
part3()
part4()
part5() | [
"[email protected]"
] | |
d9e35c58ef81da07d57d505c560402b03b7a66eb | 85b7431db3b2f90ec7749a3f7d7755bd9f09092d | /SeleniumEssentialTraining/04_02/Radiobuttons.py | 0348d096fecfa768c2b52f0776eef06b29e9c0b6 | [] | no_license | tcd1558/ScriptingForTesters | db4f8260123459a9406fbbbe4306c76817d83589 | 2a6aa6aa6dedc18cb74c7dd0987d170854f27588 | refs/heads/main | 2023-04-07T05:22:22.808921 | 2021-04-14T22:45:24 | 2021-04-14T22:45:24 | 342,373,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,952 | py | # This script opens a webdriver (Chrome) to access
# the test website https://formy-project.herokuapp.com
# enters a string and clicks on a button
# Then it sleeps for 60 seconds and closes the browser
# webdriver is needed to start the browser driver e.g. chromedriver
from selenium import webdriver
# Add the sleep function
from time import sleep
# Add special characters, e.g. RETURN to control web pages
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# Set a variable where to find the chromedriver executable.
ChromeDriver='/Users/marco/PycharmProjects/ScriptingForTesters_TM/chromedriver'
HTML='https://formy-project.herokuapp.com/radiobutton'
# Create a new instance of ChromeDriver
driver = webdriver.Chrome(executable_path=ChromeDriver)
# And now use the driver to open the google website
driver.get(HTML)
# Find the text input element by its name
Radiobutton1 = driver.find_element_by_id('radio-button-1')
Radiobutton2 = driver.find_element_by_css_selector("input[value='option2']")
Radiobutton3 = driver.find_element_by_xpath('/html/body/div/div[3]/input')
sleep(5)
Radiobutton2.click()
sleep(5)
Radiobutton3.click()
sleep(5)
Radiobutton1.click()
sleep(5)
HTML='https://formy-project.herokuapp.com/checkbox'
driver.get(HTML)
checkbox1 = driver.find_element_by_id('checkbox-1')
checkbox2 = driver.find_element_by_css_selector("input[value='checkbox-2']")
checkbox3 = driver.find_element_by_xpath("//*[@id=\"checkbox-3\"]")
sleep(5)
checkbox1.click()
sleep(5)
checkbox2.click()
sleep(5)
checkbox3.click()
sleep(5)
checkbox1.click()
sleep(5)
checkbox2.click()
sleep(5)
checkbox3.click()
sleep(5)
# sleep 60 seconds to observe the result
print("Sleeping for visual insprection")
sleep(60)
driver.quit()
| [
"[email protected]"
] | |
369c85f97223d4a424164aea2c749e27cfaff3c0 | 32fc5324804cb67fe93102121cef95160d95e997 | /main/form/tree/wrapper.py | 061dcb6e49f23d0d0e8ef0732a4912a0d17ffe34 | [] | no_license | fu2re/jsondb | 69b982db395f184ba24811cfb6d29b40ba899e16 | bd870a8df97a37c96df1a00c605dae6cf4a3636e | refs/heads/master | 2021-01-19T22:01:45.389053 | 2012-08-27T08:31:23 | 2012-08-27T08:31:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,565 | py | # -*- coding: utf-8 -*-
from PyQt4 import QtCore, QtGui, Qt
from project import QProjectItem
from table import QProjectTableItem
from data.projects_manager import projectManager
class QProjectTree(QtGui.QTreeWidget):
"""
Левая панель, дерево вида:
проект -> таблица
"""
def __init__(self, root, parent):
QtGui.QTreeWidget.__init__(self, parent)
self.root = root
self.setHeaderLabel(u'Проекты')
def _currentItem(self):
item = self.currentItem()
if not item:
item = self.topLevelItem(0)
elif item.parent():
item = item.parent()
return item
def contextMenuEvent(self, event):
"""
Context menu of project item, shows with right click
"""
item = self.itemAt(event.pos())
if item:
menu = item._build_menu(self)
self._caction = menu.exec_(self.mapToGlobal(event.pos()))
menu._do(self._caction)
def _update(self):
"""
Обновляет данные о проектах и их таблицах
перестраивает их отображение
"""
self.clear()
for project in projectManager.projects:
try:
name = '%s [%s]' % (project.name, project.data.summary()[0])
except:
name = project.name
item = QProjectItem(self.root, self, [name])
item._project = project
if not hasattr(project, 'data'):
item._disable()
elif not project.data:
item._disable()
else:
project.data.project_name = self.root.stg.value(
"proj/%s/name" % project.name, project.name
).toPyObject()
for k, v in project.data.table.items():
table = QProjectTableItem(self.root, self, [str(k)])
table._table = v
table._project = project
item.addChild(table)
self.addTopLevelItem(item)
self.show_warnings(project)
self.sortByColumn(0, QtCore.Qt.AscendingOrder)
def show_warnings(self, project):
for table_name, table_errors in project.data.errors.items():
for doc_id, doc_errors in table_errors.items():
for error in doc_errors:
self.root._message('WARNING: Document %s.%s has error in field %s' % (table_name, doc_id, error)) | [
"[email protected]"
] | |
22718770c683b7f8769e1f964ec84b760f44aa20 | 0420c3de754604feac524cda55d23fa0c1fa1306 | /Sublime Text 2/Backup/20130223154609/sublemacspro/sbp_mark.py | 08f2049cae66e67bfb73486c3b90aa157d1416ff | [
"BSD-3-Clause"
] | permissive | jgeller819/dotfiles | 93341ea4040d8a942b6391b2c92f5299c6950261 | c78153a57b0ab6ad95bcb0f23a522a0135e8b7a8 | refs/heads/master | 2021-01-25T03:27:30.841815 | 2014-06-24T03:55:32 | 2014-06-24T03:55:32 | 20,668,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,069 | py | import sublime
import sublime_plugin
# Remove any existing marks
#
class SbpCancelMarkCommand(sublime_plugin.TextCommand):
def run(self, edit, **args):
m = self.view.get_regions("mark")
# Get current selection:
currentSel = self.view.sel()[0]
if m:
self.view.erase_regions("mark")
self.view.sel().clear()
self.view.sel().add(sublime.Region(currentSel.b, currentSel.b))
class SbpSetMarkCommand(sublime_plugin.TextCommand):
def run(self, edit):
m = self.view.get_regions("mark")
self.view.run_command("sbp_cancel_mark")
mark = [s for s in self.view.sel()]
if m != mark:
self.view.add_regions("mark", mark, "mark", "dot",
sublime.HIDDEN | sublime.PERSISTENT)
class SbpSwapWithMarkCommand(sublime_plugin.TextCommand):
def run(self, edit):
old_mark = self.view.get_regions("mark")
mark = [s for s in self.view.sel()]
self.view.add_regions("mark", mark, "mark", "dot",
sublime.HIDDEN | sublime.PERSISTENT)
if len(old_mark):
self.view.sel().clear()
for r in old_mark:
self.view.sel().add(r)
class SbpSelectToMarkCommand(sublime_plugin.TextCommand):
def run(self, edit):
mark = self.view.get_regions("mark")
num = min(len(mark), len(self.view.sel()))
regions = []
for i in xrange(num):
regions.append(self.view.sel()[i].cover(mark[i]))
for i in xrange(num, len(self.view.sel())):
regions.append(self.view.sel()[i])
self.view.sel().clear()
for r in regions:
self.view.sel().add(r)
class SbpDeleteToMark(sublime_plugin.TextCommand):
def run(self, edit):
self.view.run_command("copy")
self.view.run_command("sbp_add_to_kill_ring", {"forward": False})
#self.view.run_command("sbp_select_to_mark")
self.view.run_command("left_delete")
self.view.run_command("sbp_cancel_mark")
#
# If a mark has been set, color the region between the mark and the point
#
class SbpEmacsMarkDetector(sublime_plugin.EventListener):
def __init__(self, *args, **kwargs):
sublime_plugin.EventListener.__init__(self, *args, **kwargs)
# When text is modified, we cancel the mark.
def on_modified(self, view):
#view.erase_regions("mark")
pass
def on_selection_modified(self, view):
mark = view.get_regions("mark")
num = min(len(mark), len(view.sel()))
regions = []
for i in xrange(num):
regions.append(view.sel()[i].cover(mark[i]))
for i in xrange(num, len(view.sel())):
regions.append(view.sel()[i])
view.sel().clear()
for r in regions:
view.sel().add(r)
def on_query_context(self, view, key, operator, operand, match_all):
if key == "sbp_emacs_has_mark":
if operator == sublime.OP_EQUAL:
return len(view.get_regions("mark")) > 0
| [
"[email protected]"
] | |
3dd1b32431110ef79c1c370c4226d35bac027777 | 082f4fc478b554d2257440edb1a31a17bb805c72 | /Video-Person-ReID/data_util/create_metadata_files.py | da1002a9970c0e823708a02852a4d91496d9cc36 | [
"MIT"
] | permissive | anurag3/2019-CVPR-AIC-Track-2-UWIPL | 0d637c9fe707609bec29dbe3b36704caa50b4f36 | 61ee2c96611e10fe51a52033b1cd0e2804d544ca | refs/heads/master | 2021-03-04T20:42:47.246524 | 2020-04-04T00:06:20 | 2020-04-04T00:06:20 | 246,063,049 | 0 | 0 | MIT | 2020-04-04T00:06:22 | 2020-03-09T14:49:53 | null | UTF-8 | Python | false | false | 3,103 | py | from os import listdir, mkdir
from os.path import join, split, isfile, isdir
image_sets = [
'query',
'test',
]
dummys = [
'',
#'_dummy',
]
models = [
'v2m100',
]
aic_track2_dir = '/path_to_aic19-track2-reid/'
for model in models:
for image_set in image_sets:
for dummy in dummys:
print((model, image_set, dummy))
# parse metadata probability from file
metadatas = []
with open(aic_track2_dir + 'prob_%s_%s.txt'%(model, image_set), 'r') as f:
for i, line in enumerate(f):
line = line.strip()
if i % 4 == 0:
metadatas.append([])
else:
l = line.rfind('[')
r = line.find(']')
if l == -1 and r == -1:
metadatas[-1].append(line.strip())
elif l < r:
metadatas[-1].append(line[l+1:r].strip())
else:
print('invalid line: ' + line)
if len(metadatas[-1]) == 0:
metadatas = metadatas[:-1]
print('images in metadatas: %d' % len(metadatas))
# read image filenames from file
img_orders = {}
with open(aic_track2_dir + 'imglist_%s_%s.txt'%(model, image_set), 'r') as f:
for i, line in enumerate(f):
pos = line.find('.jpg')
imgid = line[pos-6:pos]
#print(imgid)
if imgid in img_orders:
print('duplicate images: '+imgid)
img_orders[imgid] = i
print('images in image list: %d' % len(img_orders))
image_path = aic_track2_dir + 'image_%s_deepreid%s' % (image_set, dummy)
metadata_path = aic_track2_dir + 'metadata_%s_%s_deepreid%s' % (model, image_set, dummy)
mkdir(metadata_path)
pids = [f for f in listdir(image_path) if isdir(join(image_path, f))]
pids.sort()
for pid in pids:
print(pid)
pid_path = join(metadata_path, pid)
pid_path_img = join(image_path, pid)
mkdir(pid_path)
cids = [f for f in listdir(pid_path_img) if isdir(join(pid_path_img, f))]
for cid in cids:
cid_path = join(pid_path, cid)
cid_path_img = join(pid_path_img, cid)
mkdir(cid_path)
imgs = [f for f in listdir(cid_path_img) if isfile(join(cid_path_img, f)) and f[-4:] == '.jpg']
for img in imgs:
imgname = img[:-4]
imgid = imgname.split('_')[-1]
metadata_file = join(cid_path, imgname+'.txt')
with open(metadata_file, 'w') as file:
for metadata in metadatas[img_orders[imgid]]:
file.write(metadata+'\n')
| [
"[email protected]"
] | |
0a8f1ad2849f4fd538ae144669fe95ff27917f08 | d41d18d3ea6edd2ec478b500386375a8693f1392 | /plotly/validators/layout/_showlegend.py | 627f4d0cd8ad3a72ac09cad8f153f99e1ff91981 | [
"MIT"
] | permissive | miladrux/plotly.py | 38921dd6618650d03be9891d6078e771ffccc99a | dbb79e43e2cc6c5762251537d24bad1dab930fff | refs/heads/master | 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 | MIT | 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null | UTF-8 | Python | false | false | 422 | py | import _plotly_utils.basevalidators
class ShowlegendValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name='showlegend', parent_name='layout', **kwargs
):
super(ShowlegendValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='legend',
role='info',
**kwargs
)
| [
"[email protected]"
] | |
f0c6c67f26d32148b533776cf2bb3ab0dec518e8 | bd4d9fdf6deaa88d98493e1db50fccf785b09a0d | /Nest2PagerDuty.py | 77501f71c9bc15d9fdc9bec4c2ebf212fd4befd4 | [] | no_license | ophirr/Nest2PagerDuty | 42ebae1c8a07a8163cbaf03065c43ace58ea1915 | 3716214d3384304792222cc89adad38a691a5b20 | refs/heads/master | 2021-05-19T17:24:09.964814 | 2020-04-20T03:31:28 | 2020-04-20T03:31:28 | 252,046,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,716 | py |
import gmail
import json
import requests
import re
from sekret import api_key, GNAME, GP, ROUTING_KEY, INCIDENT_KEY
extsub = ''
nest_url = ''
try:
from BeautifulSoup import BeautifulSoup
except ImportError:
from bs4 import BeautifulSoup
# Login to gmail, yes this mechanism is insecure
g = gmail.login(GNAME, GP)
def trigger_nest_incident():
# Triggers a PagerDuty incident without a previously generated incident key
# Uses Events V2 API - documentation: https://v2.developer.pagerduty.com/docs/send-an-event-events-api-v2
header = {
"Content-Type": "application/json"
}
payload = { # Payload is built with the least amount of fields required to trigger an incident
"routing_key": ROUTING_KEY,
"event_action": "trigger",
"dedup_key": INCIDENT_KEY,
"payload": {
"summary": extsub,
"source": "Nest Camera Infra",
"severity": "warning",
"class": "security"
},
"links": [{
"href": nest_url,
"text": ">>> CLICK HERE to view the footage <<<"
}]
}
response = requests.post('https://events.pagerduty.com/v2/enqueue',
data=json.dumps(payload),
headers=header)
if response.json()["status"] == "success":
print('Incident Created')
else:
print(response.text) # print error message if not successful
def trigger_sar_incident():
# Triggers a PagerDuty incident without a previously generated incident key
# Uses Events V2 API - documentation: https://v2.developer.pagerduty.com/docs/send-an-event-events-api-v2
header = {
"Content-Type": "application/json"
}
payload = { # Payload is built with the least amount of fields required to trigger an incident
"routing_key": ROUTING_KEY,
"event_action": "trigger",
"dedup_key": INCIDENT_KEY,
"payload": {
"summary": extsub,
"source": "KCSAR",
"severity": "critical",
"class": "search and rescue"
}
}
response = requests.post('https://events.pagerduty.com/v2/enqueue',
data=json.dumps(payload),
headers=header)
if response.json()["status"] == "success":
print('Incident Created')
else:
print(response.text) # print error message if not successful
if not (g.logged_in):
print '\n' + 'not logged in'
else :
print '\n' + 'logged in, yay'
unread = g.mailbox('NestAlerts').mail(unread = 'True', prefetch = 'True')
unread = g.mailbox('NestAlerts').mail(unread = 'True')
for number in unread:
number.fetch()
nest_html = number.html
if number.fr == "Team20th <[email protected]>":
if nest_html is not None:
soup = BeautifulSoup(nest_html, features="html.parser")
for link in soup.findAll('a', attrs={'href': re.compile("^https://home.nest.com/camera")}):
# print link.get('href')
nest_url = link.get('href')
spans = soup.find_all('span')
for span in spans:
extsub = span.text
break
#
# Let's check out any attachments
#goodies = number.attachments
#image = goodies[0]
#print "URL - '" + nest_url + "'"
#print "SUBJECT - '" + extsub + "'"
trigger_nest_incident()
else:
if (number.subject == "[ESAR]") or ("King County" in number.subject):
extsub = number.body
trigger_sar_incident()
# Mark message as read
number.read()
if not unread:
print "No new alerts, exiting" + "\n"
| [
"[email protected]"
] | |
51b265658ed77898b0443e969d95d3fb4c4e3a52 | 4329dea0118b0665551695c50f6fcf4e58cd60e1 | /BenchmarkingThesis/dataStructures/CTData.py | 2535684c14de3b5b328626a02cdba29fc676fa48 | [] | no_license | SirEdrick/Spectral-CT-Thesis | 5a5586bdd0068140a21a888d40fed12206556da7 | e7ace4eb01bb85a89cb30eebf7edc0b552a5a307 | refs/heads/main | 2023-06-04T22:42:05.867100 | 2021-06-27T18:36:07 | 2021-06-27T18:36:07 | 380,811,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,348 | py | '''
Created on 19 sept. 2018
@author: Wail Mustafa
'''
import sys
import os
import string
import h5py
#from libtiff import TIFF
from PIL import Image
import numpy
import itertools
import warnings
import time
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import imageio
class CTData(object):
def __init__(self, datapath_head, sub_dir, file_name):
self.datapath_head = datapath_head
self.sub_dir = sub_dir
self.file_name = file_name
#self.file_ext = 'h5'
# data order inside the class should be unified
# any manipulation of dimeisons should be done right before it is needed
# also, when loaded we should ensure that the dimensions are respected
# projection data: [Energy][Pixel][Slice][Angle]
# reconstuction data: [X][Y][Slice][Energy]
# This how it seems to be done now but we can consider change that to make Energey and Slice at the same dimension for both projection & reconstruction
self.data = None # consider make this protected
self.z_load_slice = slice(None,None,None) # for partial (random access) loading
self.load_channels = slice(None,None,None) # for partial (random access) loading
self.load_angles = slice(None,None,None) # for partial (random access) loading
self.processed_dir = 'processed' # put everything under 'processed' directory
def setLoadSliceZ(self, start, stop, step=None):
if stop <0:
return
self.z_load_slice = slice(start, stop, step)
def setLoadChannels(self, start, stop, step=None):
if stop <0:
return
self.load_channels = slice(start, stop, step)
def getDirPath(self):
return self.datapath_head + os.path.sep + self.processed_dir + os.path.sep + self.sub_dir
def getFilePath(self, file_ext):
return self.getDirPath() + os.path.sep + self.file_name + "." + file_ext
#def useH5(self):
# self.file_ext = 'h5'
#def useMat(self):
# self.file_ext = 'mat'
def loadData(self, file_ext):
if file_ext is None:
file_ext = "h5"
if "h5" in file_ext:
try:
self.loadDataH5(self.getFilePath(file_ext))
except:
print(self.getFilePath(file_ext))
warnings.warn('Cound not open file, trying .mat')
#self.loadDataMat()
elif "mat" in file_ext:
self.loadDataMat()
else:
raise ValueError('File extension is not recognized.')
print (self.__class__.__name__ + " loaded dims" + str(self.data.shape))
#-------------------------------checked
def loadDataH5Silce(self, value):
self.data = numpy.array(value[self.load_channels,self.z_load_slice,:], order='F').transpose()
def loadDataH5(self, data_path):
print ('loading: '+data_path)
print("here...")
f = h5py.File(data_path,'r')
print(f)
value = f['data']['value']
print ("file data shape: " + str(value.shape))
#raise SystemExit
#self.data = numpy.array(value[:,self.z_load_slice,:], order='F').transpose()
self.loadDataH5Silce(value)
#self.data = numpy.array(value[:,1:5,:], order='F').transpose()
#self.data = self.data.reshape((1,) + self.data.shape)
#self.data = numpy.array(value[:,self.z_load_slice,:,:], order='F').transpose()
print ("loaded data shape: " + str(self.data.shape))
print("THIS WAS THE SHAPE OF H5 DATA LOADED")
f.close()
return True
def saveData(self, file_ext = None):
minVal = numpy.nanmin(self.data)
maxVal = numpy.nanmax(self.data)
print("saveData minVal: " + str(minVal))
print("saveData maxVal: " + str(maxVal))
if file_ext is None:
file_ext = "h5"
dir_path = self.getDirPath()
if not os.path.exists(dir_path):
os.makedirs(dir_path)
if "h5" in file_ext:
self.saveDataH5(self.getFilePath(file_ext))
elif "tiff" or 'png' in file_ext:
dir_path_images = dir_path + '/' + self.file_name
if not os.path.exists(dir_path_images):
os.makedirs(dir_path_images)
self.saveDataAsImages(dir_path_images, file_ext)
else:
raise ValueError('File extension is not recognized.')
def saveDataH5(self, data_path):
f = h5py.File(data_path,'w')
data_group = f.create_group('data')
data_group.create_dataset('value', data=self.data.transpose())
f.close()
#@abc.abstractmethod
#TODO: consider using abc package to define functions as abstract
def loadDataMat(self):
raise NotImplementedError("Please Implement this method")
# The following methods has to be abstract now because reconstruction data has a different dimensionality
def convertTo4D(self):
raise NotImplementedError("Please Implement this method")
def selectSlices(self, slices):
raise NotImplementedError("Please Implement this method")
def selectChannels(self, channels):
raise NotImplementedError("Please Implement this method")
def averageChannels(self):
raise NotImplementedError("Please Implement this method")
def removeNans(self):
raise NotImplementedError("Please Implement this method")
def __del__(self):
del self.data
def saveImageAsTiff(self, channelPath, I, sliceIndex):
fname = channelPath + os.path.sep + ("image_z%04d.tif" % (sliceIndex))
sliceFile = TIFF.open(fname, 'w')
sliceFile.write_image(I.astype(numpy.float16))
sliceFile.close()
def saveImageAsGreyScale(self, channelPath, I, sliceIndex):
#self.i = self.i+1
fname = channelPath + os.path.sep + ("image_z%04d.png" % (sliceIndex) )
#fname = channelPath + os.path.sep + '_'+ ("image_z%04d_%01d.png" % (sliceIndex, self.i) )
#I8 = (((I - I.min()) / (I.max() - I.min())) * 255.9).astype(numpy.uint8)
#I = I.clip(min=0)
#I8 = (I*255.9).astype(numpy.uint8)
I8 = (((I - I.min()) / (I.max() - I.min())) * 256).astype(numpy.uint8)
I8 = 255-I8
img = Image.fromarray(I8)
img.save(fname)
def show_image_ch_basic(self, ax, plot_slice_id, ch, plot_max):
myplt = ax.imshow(self.data[:,:,plot_slice_id,ch].squeeze(), cmap='gist_yarg',norm=mcolors.PowerNorm(gamma=0.6))
#for spine in ax.spines.values():
# if ch == 8 or ch == 20:
# spine.set_edgecolor('red')
myplt.set_clim(0,plot_max)
ax.set_xticks([])
ax.set_yticks([])
#plt.colorbar(myplt, ax=ax)
#ax.axis('off')
#generate_patches(patches, patches_colors, ax)
return myplt
def show_image_ch_mont(self, ax, plot_slice_id, ch, plot_max):
myplot = self.show_image_ch_basic(ax, plot_slice_id, ch, plot_max)
#sub_text = 'TV = ' + str(int(image_data.TV[plot_slice_id,ch]))
#sub_text = sub_text + '\n' + 'MAE = ' + str(int(100*image_data.MAE[plot_slice_id,ch]))
#sub_text = sub_text + '\n' + 'SSIM = ' + str(int(100*image_data.SSIM[plot_slice_id,ch]))
#ax.set_xlabel(sub_text)
ax.set_visible(True)
return myplot
def plot_images_montage(self, axs, row_no, plot_slice_id, plot_max, kevs_used, ch_step=4):
#image_data.data = image_data.data[2:98,2:98,:,:]
ch= self.data.shape[3]
#print(image_data.data.shape)
#print(ch)
fig_counter = 0
for ch_i in itertools.islice(itertools.count(),0,ch,ch_step):
#print(ch_i)
ax = axs[row_no,fig_counter]
#ax = plt.subplot(fig_rows, fig_cols, fig_counter)
energy = str(kevs_used[ch_i]) + ' keV'
myplot = self.show_image_ch_mont(ax, plot_slice_id, ch_i, plot_max)
if fig_counter==0:
if not hasattr(self, 'Lname'):
self.Lname = ''
ax.set_ylabel(self.Lname, fontsize=12)
if row_no==0:
ax.set_title(energy, fontsize=12)
fig_counter+=1
return myplot
def save_images_montage_slice(self, plot_slice_id, kevs_used):
ch_step = 1
#ch = 15#30
#imagenr = plot_slice_id
fig_rows = 2
fig_cols = 32
#fig_data = plt.figure()
fig, axs = plt.subplots(fig_rows,fig_cols, dpi=100)
slice_text = ("_slice_z%04d" % (plot_slice_id))
fig.suptitle('slice: '+slice_text, y=0.1)
fig_data = fig
fig.set_figheight(2)
fig.set_figwidth(40)
numpy.vectorize(lambda axs:axs.set_visible(False))(axs)
p_min, plot_max = numpy.percentile(self.data[:,:,plot_slice_id,:], (5, 99.9))
myplot = self.plot_images_montage(axs, 0, plot_slice_id, plot_max, kevs_used, ch_step=ch_step)
fig.colorbar(myplot, ax=axs.ravel().tolist(), fraction=0.046, pad=0.04)
#dir_path = self.getDirPath()
#save_path = dir_path+os.path.sep+self.Lname+slice_text+'.png'
#print(save_path)
#fig.savefig(save_path, bbox_inches='tight')
fig_data.canvas.draw()
plot_image = numpy.frombuffer(fig_data.canvas.tostring_rgb(), dtype=numpy.uint8)
print(plot_image.shape)
plot_image = plot_image.reshape(fig_data.canvas.get_width_height()[::-1] + (3,))
print(plot_image.shape)
return plot_image
def save_images_montage(self):
slice_no = self.data.shape[2]
#slice_no = 2
print(slice_no)
ch_no = self.data.shape[3]
kevs = numpy.round(numpy.linspace(start=20, stop=160, num=128),decimals=1)
channels_used = list(numpy.linspace(start=self.load_channels.start, stop=self.load_channels.stop, num=ch_no).astype(int))
kevs_used = kevs[channels_used]
plot_images = list()
for slice_i in itertools.islice(itertools.count(),0,slice_no,1):
plot_image = self.save_images_montage_slice(slice_i, kevs_used)
plot_images.append(plot_image)
fps = 100
imageio.mimwrite(self.getDirPath()+ '/reconstruction_movie' + '.gif', plot_images, fps=fps)
class ProjectionData(CTData):
def reduceProjNo(self, new_no, geostruct):
projection_resolution = geostruct["range_angle"]/geostruct["nproj"]; # angles between consecutive projections before reduction
projection_resolution_new = geostruct["range_angle"]/new_no;
sampling_step = projection_resolution_new/projection_resolution;
new_indices = [0] * new_no
for j in range(new_no):
new_indices[j] = int(sampling_step*j)
#print(new_indices)
#new_indices = round(new_indices);
#print(new_indices)
self.data = self.data[:,:,:,new_indices]
geostruct["nproj"] = new_no
def setLoadAngles(self, geostruct, ang_no, ang_start=0, ang_sep=None):
if ang_sep == None:
projection_resolution = geostruct["range_angle"]/geostruct["nproj"] # angles between consecutive projections before reduction
print("projection_resolution",projection_resolution)
projection_resolution_new = geostruct["range_angle"]/ang_no
print("projection_resolution_new",projection_resolution_new)
ang_sep = int(projection_resolution_new/projection_resolution)
print("ang_sep",ang_sep)
#ang_start = 1;
#ang_sep = 15;
#ang_no = 12;
ang_span = ang_sep*(ang_no-1)+ang_start
ang_span = int(ang_span)
#ang_start = 359-ang_span
#ang_span = 359
self.load_angles = slice(ang_start, ang_span+1, ang_sep)
geostruct["nproj"] = ang_no
geostruct["range_angle"] = ang_span
print("load_angles",self.load_angles)
print(geostruct)
def loadDataH5Silce(self, value):
self.data = numpy.array(value[self.load_angles, self.z_load_slice,:,self.load_channels], order='F').transpose()
def convertTo4D(self):
if len(self.data.shape) == 3: # 2D data
self.data = self.data.reshape((self.data.shape[0], self.data.shape[1], 1 , self.data.shape[2]))
print(self.__class__.__name__ + " coverted to 4D, new dims" + str(self.data.shape))
def selectSlices(self, slices = [200]):
if set(slices).issubset(set(range(0, self.data.shape[2]))):
self.data = self.data[:,:,slices,:]
def selectChannels(self, channels = [63]):
if set(channels).issubset(set(range(0, self.data.shape[0]))):
self.data = self.data[channels,:,:,:]
else:
raise SystemExit
def averageChannels(self):
self.data = self.data.mean(0, keepdims = True)
def removeNansChris(self):
# christian impelmentation
# remove Nans and Infs
nshape = self.data.shape
valueCorrections = numpy.zeros(nshape[0],dtype=numpy.long)
nanArray = numpy.isnan(self.data)
infArray = numpy.isinf(self.data)
for E_index in itertools.islice(itertools.count(),0,nshape[0]):
mask = numpy.isfinite(self.data[E_index,:,:,:])
maxVal = numpy.nanmax(self.data[E_index,:,:,:][mask])
for D_index in itertools.islice(itertools.count(),0,nshape[1]):
for Z_index in itertools.islice(itertools.count(),0,nshape[2]):
for P_index in itertools.islice(itertools.count(),0,nshape[3]):
if infArray[E_index,D_index,Z_index,P_index]==True:
#self.data[E_index,D_index,Z_index,P_index]=-2000
self.data[E_index,D_index,Z_index,P_index]=maxVal
valueCorrections[E_index]+=1
if nanArray[E_index,D_index,Z_index,P_index]==True:
self.data[E_index,D_index,Z_index,P_index]=0
valueCorrections[E_index]+=1
def removeNans(self):
minVal = numpy.nanmin(self.data)
maxVal = numpy.nanmax(self.data)
print("before removeNans minVal: " + str(minVal))
print("before removeNans maxVal: " + str(maxVal))
# remove Nans and Infs
nshape = self.data.shape
for E_index in itertools.islice(itertools.count(),0,nshape[0]):
mask = numpy.isfinite(self.data[E_index,:,:,:])
maxVal = numpy.nanmax(self.data[E_index,:,:,:][mask])
#minVal = numpy.nanmin(self.data[E_index,:,:,:][mask])
nanArray = numpy.isnan(self.data[E_index,:,:,:])
infArray = numpy.isinf(self.data[E_index,:,:,:])
#negInfArray = numpy.isneginf(self.data[E_index,:,:,:])
self.data[E_index,:,:,:][nanArray] = 0
self.data[E_index,:,:,:][infArray] = maxVal
#self.data[E_index,:,:,:][negInfArray] = minVal
self.data[self.data < 0.0] = 0
minVal = numpy.nanmin(self.data)
maxVal = numpy.nanmax(self.data)
print("after removeNans minVal: " + str(minVal))
print("after removeNans maxVal: " + str(maxVal))
def plotAsImage(self):
import pylab
pylab.figure(1)
pylab.imshow(self.data[10,200,:,:].squeeze())
pylab.show()
def saveDataAsImages(self, data_path, file_ext):
print (self.data.shape)
numChannels = self.data.shape[0]
numSlices = self.data.shape[2]
for channelIndex in itertools.islice(itertools.count(),0,numChannels):
channelPath = data_path+os.path.sep+("channel_%04d" % (channelIndex))
if(os.path.exists(channelPath) == False):
os.mkdir(channelPath)
for sliceIndex in itertools.islice(itertools.count(),0,numSlices):
I = self.data[channelIndex,:,sliceIndex,:]
if "tiff" in file_ext:
self.saveImageAsTiff(channelPath,I,sliceIndex)
if "png" in file_ext:
self.saveImageAsGreyScale(channelPath,I,sliceIndex)
class RawData(ProjectionData):
def __init__(self, datapath_head, file_name = "raw"):
super(RawData,self).__init__(datapath_head = datapath_head, sub_dir="raw", file_name = file_name)
def loadData(self, file_ext = None):
super(RawData,self).loadData(file_ext)
#print ("corrected data loaded dims" + str(self.data.shape))
self.convertTo4D()
def getNumberOfSlices(self, filepath):
import struct
#nlines=1
#nslices=1
#nlinesPerSlice=1
with open(filepath, "rb") as binary_file:
binary_file.seek(60);
nlines_bytes = binary_file.read(4)
#nslices = int.from_bytes(nslices_bytes, signed=True)
nlines = struct.unpack("@I",nlines_bytes)[0]
return nlines
def loadDataMultix(self,geostruct):
import glob
import re
#import loadRawData_Cstyle
from dataStructures import loadRawData_Cstyle
fnames = glob.glob1(self.datapath_head, "*.bin")
if(len(fnames)<2):
exit(1);
# sort the names
digits = re.compile(r'(\d+)')
def tokenize(filename):
return tuple(int(token) if match else token
for token, match in
((fragment, digits.search(fragment))
for fragment in digits.split(filename)))
# Now you can sort your file names like so:
fnames.sort(key=tokenize)
nproj = int(geostruct["nproj"])
nenergy = int(128)
ndet = int(geostruct["ndet"])
nslices = 1
#==> ???? nslices = ???? <===#
nlines = self.getNumberOfSlices(self.datapath_head+os.path.sep+fnames[0])
linesPerSlice=1
linesPerSlice = nlines
#if(geostruct.has_key("nSliceLines")):
if not ('vol' in geostruct):
linesPerSlice = nlines
elif 'nSliceLines' in geostruct:
if(geostruct["vol"] and (geostruct["nSliceLines"]!=None)):
geostruct["nSliceLines"]=int(geostruct["nSliceLines"])
linesPerSlice = geostruct["nSliceLines"]
if(linesPerSlice>1):
nslices = int(nlines / linesPerSlice)
else:
nslices = nlines
# we need to get the number of slices to resize the field correctly. Any way to determine this upfront ? #
self.data = numpy.zeros((nenergy,ndet,nslices,nproj+1), dtype=numpy.double, order="F")
#raw_data = octave.load_raw_tomography(datapath_head,geostruct)
#raw_data,data_param = octave.feval("load_raw_tomography", datapath_head, geostruct, nout=2)
#rather replicate 'load_raw_data' file in python, working on file-by-file basis
volData = False
if(geostruct["vol"]>0):
volData=True
#get all files with .bin ending in folder 'datapath_head'
for i in range(0, len(fnames)):
## OCTAVE SOLUTION ##
#loaded = octave.read_multix_bin_files(datapath_head, fnames[i])
#raw_data[:,:,:,i]=octave.process_multi_lines(loaded["mltdata"], loaded["DataPara"]["NumIntTime"],volData)
## PYTHON NATIVE SOLUTION
reader = loadRawData_Cstyle.loadRawData_Cstyle()
##reader.read_multix_bin_file(os.path.join(datapath_head, fnames[i]), linesPerSlice)
##reader.process_multi_lines(True)
##raw_data[:,:,:,i]=reader.getRawData()
#
print(nslices)
print(linesPerSlice)
ret = reader.read_multix_bin_file(os.path.join(self.datapath_head, fnames[i]))
if ret==False:
print ("Error reading file '"+fnames[i]+"'. EXITING ...")
exit(1)
reader.process_multi_lines(True)
reader.average_lines(nslices, linesPerSlice)
# if(linesPerSlice>1):
# cnt=0
# binVal = linesPerSlice
# lineData = reader.getRawData()
# dims = lineData.shape
# for sliceIndex in itertools.islice(itertools.count(),0,nslices):
# stIndex = cnt
# edIndex = min(cnt+binVal, dims[2])
# #raw_data[:,:,sliceIndex,i] = numpy.squeeze(numpy.median(reader.getRawData()[:,:,stIndex:edIndex], axis=2))
# raw_data[:,:,sliceIndex,i] = numpy.nanmean(lineData[:,:,stIndex:edIndex], axis=2, keepdims=False)
# cnt = cnt+binVal
# else:
# raw_data[:,:,:,i]=reader.getRawData()
#raw_data[:,:,:,i]=reader.getRawData()
data_avg = reader.getAveragedData_FortranOrder()
print ("data_avg.shape: ", data_avg.shape)
import pylab
pylab.figure(1)
pylab.imshow(data_avg.squeeze())
pylab.show()
self.data[:,:,:,i]=data_avg
print ("read projection %d ..." % (i))
#raw_data = numpy.squeeze(raw_data)
class CorrectedData(ProjectionData):
def __init__(self, datapath_head, file_name = "corrected"):
super(CorrectedData,self).__init__(datapath_head = datapath_head, sub_dir="corrected", file_name = file_name)
def loadData(self, file_ext = None):
super(CorrectedData,self).loadData(file_ext)
#print ("corrected data loaded dims" + str(self.data.shape))
self.convertTo4D()
#start = time.time()
#self.data = numpy.array(numpy.transpose(self.data, [0,1,3,2])) # TODO: why we do this here?? If needed for correction do the transpose right bef
#shape = self.data.shape
#print(self.data.shape)
#import pylab
#pylab.gray()
#pylab.figure(1)
#print(self.data.shape)
#print(self.data[40,:,:,0].shape)
#pylab.imshow(self.data[40,:,:,100].squeeze())
#pylab.show()
#end = time.time()
#print("transpose: "+str(end - start))
#print ("corrected data new dims" + str(self.data.shape))
def loadDataMat(self):
self.data = octave.load_corrected_data(self.datapath_head)
class SinogramData(ProjectionData):
def __init__(self, datapath_head, file_name = "sinogram"):
super(SinogramData,self).__init__(datapath_head = datapath_head, sub_dir="sinogram", file_name = file_name)
def loadData(self, file_ext = None):
super(SinogramData,self).loadData(file_ext)
self.convertTo4D()
start = time.time()
#self.removeNans()
end = time.time()
#print("remove Nans: "+str(end - start))
minVal = numpy.nanmin(self.data)
print("minVal: " + str(minVal))
def loadDataMat(self):
#print("loadDataMat: ")
#self.data = octave.load_sinogram_data(self.datapath_head)
self.data = octave.load_data(self.datapath_head, 'sinogram', 'sinogram');
minVal = numpy.nanmin(self.data)
print("minVal: " + str(minVal))
#print( self.data.dtype)
def compute(self,ProjectionData):
#corrected_data = numpy.array(numpy.transpose(corrected_data, [0,1,3,2]))
#print "correction shape after permutation: "+str(corrected_data.shape) #should be: <e><d><p><s>
sinoShape = (ProjectionData.data.shape[0], ProjectionData.data.shape[1], ProjectionData.data.shape[2], ProjectionData.data.shape[3]-1)
self.data = numpy.zeros(sinoShape, dtype=numpy.double, order="F")
for sliceIndex in itertools.islice(itertools.count(),0,ProjectionData.data.shape[2]):
data_slice = ProjectionData.data[:,:,sliceIndex,:]
if sliceIndex == 0:
print ("Slice shape: "+str(data_slice.shape))
#sliceShape = (slice.shape[0], slice.shape[1], 1, slice.shape[2]-1)
#slice_reshaped = numpy.squeeze(slice) # - here in Python, a mid-index field is already squeezed
#sinogram_slice = octave.compute_sinograms(slice_reshaped)
#sinogram_slice = numpy.expand_dims(sinogram_slice, 2)
#sinogram_data[:,:,sliceIndex,:] = numpy.reshape(sinogram_slice, sliceShape, 'F')
sinogram_slice = octave.compute_sinograms(data_slice)
#sinogram_slice = octave.compute_sinograms3D(slice)
#sinogram_slice = numpy.reshape(sinogram_slice, slice.shape, order='F')
#print sinogram_slice.shape
#flat_field = data_slice[:,:,0];
#flat_field_rep = numpy.repeat(flat_field[:, :, numpy.newaxis], data_slice.shape[2]-1, axis=2)
#print(flat_field_rep.shape)
#data_slice_norm = data_slice[:,:,1:]/flat_field_rep
#sinogram_slice = -numpy.log(1e-8+data_slice_norm)
self.data[:,:,sliceIndex,:] = sinogram_slice
def add_nooise(self, n=0):
#print(self.data.shape[1:])
#raise SystemExit
data_ = numpy.copy(self.data)
##self.data[[8,20],:,:,:] = self.data[[8,20],:,:,:] + numpy.random.normal(0,n, size=(2,self.data.shape[1],self.data.shape[2],self.data.shape[3]))
data_[[8,20],:,:,:] = self.data[[8,20],:,:,:] + numpy.random.normal(0,n, size=(2,self.data.shape[1],self.data.shape[2],self.data.shape[3]))
return data_
class ImageData(CTData):
def averageChannels(self):
self.data = self.data.mean(3, keepdims = True)
def dumpAllImagesInDir(self, dir_path):
if(os.path.exists(dir_path) == False):
os.mkdir(dir_path)
numChannels = self.data.shape[3]
for channelIndex in itertools.islice(itertools.count(),0,numChannels):
#channelPath = data_path+os.path.sep+("channel_%04d" % (channelIndex))
numSlices = self.data.shape[2]
for sliceIndex in itertools.islice(itertools.count(),0,numSlices):
fname = dir_path+os.path.sep+self.file_name+'_'+str(channelIndex).zfill(3)+'_'+str(sliceIndex).zfill(3)+'.png'
I = self.data[:,:,sliceIndex,channelIndex]
I8 = (((I - I.min()) / (I.max() - I.min())) * 256).astype(numpy.uint8)
I8 = 255-I8
img = Image.fromarray(I8)
img.save(fname)
def saveDataAs2DImages(self, file_ext, channelIndex, channelPath):
numSlices = self.data.shape[2]
for sliceIndex in itertools.islice(itertools.count(),0,numSlices):
I = self.data[:,:,sliceIndex,channelIndex]
if "tiff" in file_ext:
self.saveImageAsTiff(channelPath,I,sliceIndex)
if "png" in file_ext:
self.saveImageAsGreyScale(channelPath,I,sliceIndex)
def saveDataAs3DImage(self, data_path, channelIndex):
fname = data_path + os.path.sep + ("image_channel%04d.mhd" % (channelIndex))
dimArray = numpy.array([self.data.shape[0], self.data.shape[1], self.data.shape[2]], dtype=numpy.uintc)
spaceArray = numpy.ones(3, dtype=numpy.float32)
arrayLen = self.data.shape[0]*self.data.shape[1]*self.data.shape[2]
selectedArray = numpy.reshape(self.data[:,:,:,channelIndex], arrayLen, 'F')
# sliceFile = mhd.MhdFile()
# sliceFile.SetDimensions(dimArray)
# sliceFile.SetSpacing(spaceArray)
# sliceFile.setDataAsDouble(selectedArray.astype(numpy.double))
# sliceFile.setFilename(fname)
# sliceFile.writeFile()
def saveDataAsImages(self, data_path, file_ext):
print (self.data.shape)
numChannels = self.data.shape[3]
#numSlices = self.data.shape[2]
for channelIndex in itertools.islice(itertools.count(),0,numChannels):
if "tiff" in file_ext or "png" in file_ext:
channelPath = data_path+os.path.sep+("channel_%04d" % (channelIndex))
if(os.path.exists(channelPath) == False):
os.mkdir(channelPath)
self.saveDataAs2DImages(file_ext, channelIndex, channelPath)
if "mhd" in file_ext:
self.saveDataAs3DImage(data_path, channelIndex)
def compute_TV(self):
numSlices = self.data.shape[2]
numChannels = self.data.shape[3]
self.TV = numpy.zeros(shape=(numSlices,numChannels))
self.TV_s = numpy.zeros(numSlices)
for sliceIndex in itertools.islice(itertools.count(),0,numSlices):
#TV_c = numpy.zeros(numChannels)
for channelIndex in itertools.islice(itertools.count(),0,numChannels):
image = self.data[:,:,sliceIndex,channelIndex]
g_x, g_y = numpy.gradient(image)
g_norm2 = g_x**2 + g_y**2
TV_ = numpy.sum(numpy.sqrt(g_norm2))
self.TV[sliceIndex,channelIndex] = TV_
#print(w.shape)
#import matplotlib.pyplot as plt
#print (image.shape)
#plt.figure()
#plt.imshow(image, cmap='gray')
#plt.show()
TV_c_norm2 = self.TV[sliceIndex,:]**2
self.TV_s[sliceIndex] = numpy.sqrt(numpy.sum(TV_c_norm2))
def compute_error(self, ref):
print(self.data.shape)
print(ref.data.shape)
assert (self.data.shape == ref.data.shape),"shape should be equal in both sets!"
numSlices = self.data.shape[2]
numChannels = self.data.shape[3]
dist = numpy.linalg.norm(self.data-ref.data)
print(dist)
#self.TV = numpy.zeros(shape=(numSlices,numChannels))
#self.TV_s = numpy.zeros(numSlices)
self.SSIM = numpy.zeros(shape=(numSlices,numChannels))
self.SSIM_s = numpy.zeros(numSlices)
self.MAE = numpy.zeros(shape=(numSlices,numChannels))
self.MAE_s = numpy.zeros(numSlices)
for sliceIndex in itertools.islice(itertools.count(),0,numSlices):
#TV_c = numpy.zeros(numChannels)
for channelIndex in itertools.islice(itertools.count(),0,numChannels):
image = self.data[:,:,sliceIndex,channelIndex]
image_ref = ref.data[:,:,sliceIndex,channelIndex]
#g_x, g_y = numpy.gradient(image)
#g_norm2 = g_x**2 + g_y**2
#TV_ = numpy.sum(numpy.sqrt(g_norm2))
#self.TV[sliceIndex,channelIndex] = TV_
#self.MAE[sliceIndex,channelIndex] = numpy.linalg.norm(image-image_ref,ord=1)
self.MAE[sliceIndex,channelIndex] = mae(image,image_ref)
self.SSIM[sliceIndex,channelIndex] = ssim(image,image_ref)
#print(w.shape)
#import matplotlib.pyplot as plt
#print (image.shape)
#plt.figure()
#plt.imshow(image, cmap='gray')
#plt.show()
#TV_c_norm2 = self.TV[sliceIndex,:]**2
#TV_c_norm1 = abs(self.TV[sliceIndex,:])
#self.TV_s[sliceIndex] = numpy.sqrt(numpy.sum(TV_c_norm2))
self.MAE_s[sliceIndex] = numpy.mean(self.MAE[sliceIndex,:])
self.SSIM_s[sliceIndex] = numpy.mean(self.SSIM[sliceIndex,:])
#self.TV_s[sliceIndex] = numpy.sum(TV_c_norm1)
#print(TV_c)
#print(self.TV_s)
#print(self.TV)
#raise SystemExit
#return TV_c, TV_s
def get_metric(self, error_type, slice_id):
if error_type == 'TV':
return numpy.log(self.TV_s[slice_id]), numpy.log(self.TV[slice_id,:].squeeze())
elif error_type == 'MAE':
return self.MAE_s[slice_id], self.MAE[slice_id,:].squeeze()
#return -numpy.log(self.MAE_s[slice_id]), -numpy.log(self.MAE[slice_id,:].squeeze())
elif error_type == 'SSIM':
#return -numpy.log(self.SSIM_s[slice_id]), -numpy.log(self.SSIM[slice_id,:].squeeze())
return self.SSIM_s[slice_id], self.SSIM[slice_id,:].squeeze()
class ReconstructionData(ImageData):
def __init__(self, datapath_head, file_name = "reconstruction"):
super(ReconstructionData,self).__init__(datapath_head = datapath_head, sub_dir="reconstructed", file_name = file_name)
def rearrange_data_dl(self):
#print("rearrange_data_dl:")
#print(self.data.shape)
#self.data = self.data.transpose((2, 0, 1, 3))
#print(self.data.shape)
#self.data = self.data.transpose()
#print(self.data.shape)
self.data = self.data.transpose((3, 1, 0, 2))
#print(self.data.shape)
#raise SystemExit
#def loadDataH5Silce(self, value):
#self.data = numpy.array(value[:,self.z_load_slice,:,:], order='F').transpose()
# def loadDataH5(self, data_path):
# f = h5py.File(data_path,'r')
# self.data = numpy.array(f['data']['value'], order='F').transpose()
# f.close()
# return True
def saveDataForDL(self,save_dir):
self.rearrange_data_dl()
self.datapath_head = save_dir
self.sub_dir = 'images/'
self.saveData()
def setLegendInfo(self, name, color, style='-'):
self.Lname = name
self.Lcolor = color
self.Lstyle = style
class LabelData(CTData):
def __init__(self, datapath_head, file_name = "segmented"):
super(LabelData,self).__init__(datapath_head = datapath_head, sub_dir="segmented", file_name = file_name)
#def getDirPath(self):
#return self.datapath_head + os.path.sep + "manualSegmentation" + os.path.sep
def loadData(self, file_ext = None):
super(LabelData,self).loadData(file_ext)
def loadDataH5Silce(self, value):
self.data = numpy.array(value[self.z_load_slice,:,:], order='F').transpose()
def saveDataAsImages(self, data_path, file_ext):
print (self.data.shape)
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!shape printed")
self.data = self.data +1
#numChannels = self.data.shape[0]
numSlices = self.data.shape[2]
#for channelIndex in itertools.islice(itertools.count(),0,numChannels):
#channelPath = data_path+os.path.sep+("channel_%04d" % (channelIndex))
#if(os.path.exists(channelPath) == False):
#os.mkdir(channelPath)
for sliceIndex in itertools.islice(itertools.count(),0,numSlices):
I = self.data[:,:,sliceIndex]
if "tiff" in file_ext:
self.saveImageAsTiff(data_path,I,0)
if "png" in file_ext:
self.saveImageAsGreyScale(data_path,I,sliceIndex)
def loadLabelMap(self):
file = self.datapath_head + "/processed/segmented/"+'label_map.txt'
with open(file) as f:
content = f.readlines()
#print(content)
self.label_names = [''] * len(content)
self.label_ids = [0] * len(content)
for seg_id in itertools.islice(itertools.count(), 0, len(content)):
content = [x.split('\n')[0] for x in content]
contents = content[seg_id].split(' ')
#label_name = content[seg_id].split(' ')[1]
self.label_ids[seg_id] = int(contents[0])
self.label_names[seg_id] = contents[1]
#content = [x.split(':')[0] for x in content]
#content = [x.split('\n')[0] for x in content]
#return content
print(self.label_names)
print(self.label_ids)
#----------------------------------------------------------not used
def extractSegmentValues(self, reconstruction_data):
ch_no = reconstruction_data.data.shape[3]
self.data_all = numpy.zeros((1,ch_no))
self.label_all = ['None']
self.data_mean = numpy.zeros((len(self.label_ids),ch_no))
self.data_std = numpy.zeros((len(self.label_ids),ch_no))
print("len(self.label_ids))",len(self.label_ids))
for seg_id in itertools.islice(itertools.count(), 0, len(self.label_ids)):
print("label_id is:")
print(self.label_ids[seg_id])
x, y, z = numpy.where(self.data==(self.label_ids[seg_id]+1))
#print(x,y,z)
print(reconstruction_data.data.shape)
recons_data_seg = reconstruction_data.data[x,y,z,:]
recons_data_seg = recons_data_seg.squeeze()
print(recons_data_seg.shape)
import matplotlib.pyplot as plt
plt.plot(x, x, label='linear')
x = numpy.arange(1,33)
plt.plot(x,recons_data_seg[:100,:].transpose())
plt.show()
#recons_data_seg = recons_data_seg.transpose()
self.data_mean[seg_id,:] = numpy.mean(recons_data_seg, axis = 0)
self.data_std[seg_id,:] = numpy.std(recons_data_seg, axis = 0)
self.data_all = numpy.vstack((self.data_all, recons_data_seg))
self.label_all = numpy.hstack((self.label_all, [self.label_names[seg_id]]*recons_data_seg.shape[0]))
print("------------1234-------------")
print(recons_data_seg.shape)
self.data_all = self.data_all[1:,]
self.label_all = self.label_all[1:]
#-------------------------------------------------------------not used
def saveExtractedValue(self):
numpy.savetxt(self.datapath_head + "/processed/segmented/"+"LAC_all.csv", self.data_all, delimiter=",")
numpy.savetxt(self.datapath_head + "/processed/segmented/"+"labels_all.txt", self.label_all, delimiter=" ", fmt="%s")
numpy.savetxt(self.datapath_head + "/processed/segmented/"+"LAC_mean.csv", self.data_mean, delimiter=",")
numpy.savetxt(self.datapath_head + "/processed/segmented/"+"LAC_std.csv", self.data_std, delimiter=",")
numpy.savetxt(self.datapath_head + "/processed/segmented/"+"labels_mean.txt", self.label_names, delimiter=" ", fmt="%s")
class SynthImages(ImageData):
def __init__(self, datapath_head, file_name = "images"):
super(SynthImages,self).__init__(datapath_head = datapath_head, sub_dir="images", file_name = file_name)
def loadData(self, file_ext = None):
super(SynthImages,self).loadData(file_ext)
def loadDataH5(self, data_path):
f = h5py.File(data_path,'r')
self.data = numpy.array(f['data']['value'], order='F').transpose((1, 2, 0, 3))
f.close()
return True
def getDirPath(self):
print("getDirPath")
print("self.datapath_head :" + self.datapath_head)
print("self.sub_dir :" + self.sub_dir)
print("self.file_name :" + self.file_name)
print (self.datapath_head + os.path.sep + self.sub_dir + self.file_name)
return self.datapath_head + os.path.sep + self.sub_dir + self.file_name
| [
"[email protected]"
] | |
03e89042f9e43ee0f4a84569e259fbf7739b5baa | 0f0440c398ce75044c0e54b12d6c0bc5d1e7a167 | /sitepr/votacao/models.py | 29d4c61720428aa026f4d758364f806a59e3b476 | [] | no_license | ElSulphur/DIAM | 8511b15681861c5198479bfdf18455656a5b60ba | 726f4df785ee5b7b6c58d961b4bb6621de55052f | refs/heads/master | 2023-03-31T20:23:38.345012 | 2021-04-10T22:57:46 | 2021-04-10T22:57:46 | 356,336,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | from django.db import models
# Create your models here.
from django.utils import timezone
from six import string_types
import datetime
class Questao(models.Model):
questao_texto = models.CharField(max_length=200)
pub_data = models.DateTimeField('data de publicacao')
def __str__(self):
return self.questao_texto
def foi_publicada_recentemente(self):
return self.pub_data >= timezone.now() - datetime.timedelta(days=1)
class Opcao(models.Model):
questao = models.ForeignKey(Questao, on_delete=models.CASCADE)
opcao_texto = models.CharField(max_length=200)
votos = models.IntegerField(default=0)
def __str__(self):
return self.opcao_texto
| [
"[email protected]"
] | |
82dce29d8fa0ff40bc86b3b1042df98f9ff565eb | cdc410b6025ae28e8184b6f92a7e324337fca320 | /dynamicprogramming/even_length.py | 156d355542766fa2c0e93d54d9e47d9b77d26cff | [] | no_license | superwololo/codingpractice | e9ffbe0b4673879ecda45072c5cfa3822ac28ef5 | 0106c04e2b3ec74f5a55467c6bee100e52fd3970 | refs/heads/master | 2020-12-26T12:31:35.239153 | 2020-01-31T20:28:25 | 2020-01-31T20:28:25 | 237,511,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,521 | py |
"""
arr1 = [1, 2, 3, 1, 2, 3]
arr2 = [1, 5, 3, 8, 0, 2, 3]
def longest_even(arr, index):
cumsum -> [1, 6, 9, 17, 0, 19, 22]
k = 1
k = 2
"""
https://livecode.amazon.jobs/session/aca517f3-53c6-4c69-befb-5236c3ae399f
The n-queens puzzle is the problem of placing n queens on an n×n chessboard such that no two queens attack each other.
Given an integer n, return the number of distinct solutions to the n-queens puzzle.
Example :
Input: 4
Output: 2
Explanation: There are two distinct solutions to the 4-queens puzzle as shown below.
[
[".Q..", // Solution 1
"...Q",
"Q...",
"..Q."],
["..Q.", // Solution 2
"Q...",
"...Q",
".Q.."]
]
"""
1 1 1 1
1 1 1 1
1 1 1 1
1 1 1 1
"""
import copy
class Board(object):
def __init__(self, n):
self.n = n
self.horizontal = [0] * n
self.vertical = [0] * n
self.diag_up = [0] * (2*n - 1) # Tricky
self.diag_down = [0] * (2*n - 1) # Tricky
self.num_queens = 0
self.visited = set([])
self.complete_solutions = []
def _set_queen(self, row, col, value):
self.horizontal[row] = value
self.vertical[col] = value
self.diag_up[row + col] = value
self.diag_down[n - row + col - 1] = value #Might need to double check this
def add(self, row, col):
self._set_queen(row, col, 1)
self.num_queens = self.num_queens + 1
self.visited.add((row, col))
if self.num_queens == n:
self.complete_solutions.append(copy.copy(self.visited))
def remove(self, row, col):
self._set_queen(row, col, 0)
self.num_queens = self.num_queens - 1
self.visited.remove((row, col))
def is_valid(self, row, col):
return all([
self.horizontal[row] == 0
self.vertical[col] == 0
self.diag_up[row + col] == 0
self.diag_down[n - row + col - 1] == 0
])
def all_valid(self):
valid = []
for row in xrange(self.n):
for col in xrange(self.n):
if self.is_valid(row, col):
valid.append((row, col))
return valid
def solution(n):
board = Board(n)
solutions(board)
return len(board.complete_solutions)
def solutions(board):
next_queens = board.all_valid()
for row, col in next_queens:
board.add(row, col)
solutions(board)
board.remove(row, col
| [
"[email protected]"
] | |
43a474811ccf00c5c702c89371ec5c28d26e0e29 | fe328aa4eaf907be3808d2e5a5815fb336f38d09 | /src/RunBot.py | 81a926a15205afa52f9759662378b85786a3640c | [
"MIT"
] | permissive | Ashvio/ProPlayerInfoBot | b3845113531fae19cd7a1d550ded9d50f1031d62 | 5da07962ecf1480db1b51e3192ecd095db8fa89c | refs/heads/master | 2021-01-13T00:55:39.583643 | 2016-09-02T21:04:56 | 2016-09-02T21:04:56 | 48,395,769 | 0 | 0 | null | 2016-09-02T21:04:57 | 2015-12-21T21:45:18 | null | UTF-8 | Python | false | false | 2,899 | py | from src.DatabaseManager import DatabaseManager, load_db, is_video, get_url
from src.Player import to_comment, Video, Player
import praw
import time
import re
def save(manager, filename):
manager.save_db(filename)
def get_players(title, db):
words = re.split(" |'", title)
# for value in db.values():
# print(value.name)
# print(value.region)
player_names = []
for word in words:
if word.lower() in db.keys():
player_names.append(word.lower())
if len(player_names) is 0:
return None
else:
players = []
for player in player_names:
players.append(db[player])
return players
def run_bot():
UA = "Pro Player Info-- Helps players on /r/leagueoflegends learn about pro players and see their old plays. " \
"Contact /u/ashivio."
login = "ProPlayerInfoBot"
pw = "fake_password"
filename = "../Databases/dict-2-25-16.db"
time0 = time.time()
r = praw.Reddit(UA)
r.login(login, pw, disable_warning=True)
# database.find_videos()
print("Loading database...", end="")
manager = load_db(filename)
print("[DONE]")
db = manager.database
# manager.done_submissions = []
print("Reading submissions...")
for s in praw.helpers.submission_stream(r, "bottesting", limit=1):
print("Reading next submission: " + s.title)
time1 = time.time()
# Backup database every 5 minutes
if time1 - time0 > 300:
time0 = time1
if not is_video(s):
continue
if manager.is_done(s):
continue
title = s.title.lower()
players = get_players(title=title, db=db)
if players is None:
print("failed")
continue
head = "Hello! I am a new bot to help you find information and resources about your favorite pro players" \
". I noticed your post mentioned at least one pro player, so I have put together some information and " \
"past videos about them. \n\n **Player(s) found in this post:**\n\n"
body = to_comment(players) + "#\n\n"
tail = "***\nmeep moop. \n\n Feedback or questions? Is this posted on " \
"something that doesn't have to do with pro players? Message me or my owner, /u/ashivio, or just reply" \
"to this comment."
print("Replying to submission at " + s.permalink + "...", end="")
video = Video(s.title, get_url(s), players, s.score)
# noinspection PyTypeChecker
for player in players:
player.add_video(video)
s.add_comment(head + body + tail)
print("[DONE]")
manager.add_submission(s)
save(manager, filename)
if __name__ == "__main__":
while True:
try:
run_bot()
except Exception:
print(str(Exception))
| [
"[email protected]"
] | |
33c0fe675935c71160a22f399c58efd34b7b34b8 | 4305377e2d58954adcfc5991c80f5dd3bd2bfab6 | /cgi-bin/profileDBManager.py | 3118db001607b872abd1d6ee2f04202413468cf3 | [] | no_license | gypdtc/CSC_410_website | 5fb5048381698b71c05087aa09709dee4bfb0218 | 81de0241d5a5cf56d55af58a13e68cd6213b68e3 | refs/heads/master | 2021-01-13T11:17:09.727218 | 2016-12-24T01:40:52 | 2016-12-24T01:40:52 | 77,259,436 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,120 | py | import pymysql
from dbconnector import connect
from datetime import datetime
def update(user_ID , post_data):
update_sql = 'UPDATE user_account set nickname = %s , email_address = %s where user_ID = %s'
# print user_ID
#connect to DB
con = connect()
if con == None:
return 'Error'
try:
with con.cursor() as cursor:
cursor.execute(update_sql,(post_data['nickname'],
post_data['email_address'],
user_ID
));
# print "!!!!!!!!!!!!!!!!!!!!"
con.commit()
con.close()
return True
except Exception as e:
print e
# print "#####################"
con.close()
return False
def change_password(user_ID, password , salt):
insert_sql = 'UPDATE user_account SET password = %s , salt = %s where user_ID = %s'
#connect to DB
con = connect()
if con == None:
return 'Error'
try:
with con.cursor() as cursor:
cursor.execute(insert_sql,(password,salt,user_ID));
con.commit()
con.close()
return True
except Exception as e:
print e
con.close()
return False
# query_post - retrieve old posts
def query_profile(username):
#query post ordered by post date
query_sql = 'SELECT user_ID,email_address,nickname FROM user_account WHERE user_ID = %s'
results = None
#connect to DB
con = connect()
if con == None:
return 'Error'
try:
with con.cursor(pymysql.cursors.DictCursor) as cursor:
count = cursor.execute(query_sql,(username));
con.commit()
if count > 0:
results = cursor.fetchall();
con.close()
return results
except Exception as e:
print e
con.close()
return None
# def update(user_ID , post_data):
# update_sql = 'UPDATE user_account set nickname = %s , email_address = %s where user_ID = %s'
# print post_data
# #connect to DB
# con = connect()
# if con == None:
# return 'Error'
# try:
# with con.cursor() as cursor:
# cursor.execute(update_sql,(post_data['nickname'],
# post_data['email_address']
# user_ID
# ));
# con.commit()
# con.close()
# return True
# except Exception as e:
# print e
# con.close()
# return False
| [
"[email protected]"
] | |
43a65b4c9ab7a04e313f976ac7e7cc37d94dab2b | cfc46fd56c16ac9c010bcf0c1eb50da3047d1b2b | /tests/metrics/test_num_capability_types_count.py | 51dddd5c9cf6c11bab8bb61b7dd79b78f12e4e20 | [
"Apache-2.0"
] | permissive | radon-h2020/radon-tosca-metrics | d93ef5b3dc53c7863ba98a985919237fe6c4aadf | d0a10e10f2d897299a04f69290f09d5589bc039f | refs/heads/master | 2021-08-24T13:53:43.207745 | 2021-07-06T08:44:00 | 2021-07-06T08:44:00 | 242,997,596 | 3 | 0 | Apache-2.0 | 2021-03-29T13:47:46 | 2020-02-25T12:45:05 | Python | UTF-8 | Python | false | false | 905 | py | import unittest
from parameterized import parameterized_class
from toscametrics.blueprint.num_capability_types import NumCapabilityTypes
yaml_0 = 'tosca_definitions_version: tosca_simple_yaml_1_0'
yaml_2 = '''
tosca_definitions_version: yorc_tosca_simple_yaml_1_0
capability_types:
tosca.capabilities.Root:
description: The TOSCA root Capability Type all other TOSCA base Capability Types derive from
tosca.capabilities.Node:
derived_from: tosca.capabilities.Root
description: The Node capability indicates the base capabilities of a TOSCA Node Type.
'''
@parameterized_class([
{'yaml': yaml_0, 'expected': 0},
{'yaml': yaml_2, 'expected': 2}
])
class TestNumCapabilityTypesCount(unittest.TestCase):
def setUp(self):
self.blueprint = self.yaml.expandtabs(2)
def test(self):
self.assertEqual(NumCapabilityTypes(self.blueprint).count(), self.expected)
| [
"[email protected]"
] | |
53e06503760a12ac6ce4123864321dd77ff2a55a | 2ec8c8e6786af00fde9b3842e6e0e0f97ee6b4e2 | /resumeparser/RP_RestAPI/urls.py | 76ae5802c112b98050c885161b7111f90a3b198b | [
"MIT"
] | permissive | job-hax/resume-parser | 6e842a277152fc513c80f38c6b714712d3209d06 | 4793702f24581d88ca021379341a652e42514659 | refs/heads/master | 2022-04-27T07:25:30.128013 | 2019-12-25T01:51:40 | 2019-12-25T01:51:40 | 221,457,165 | 19 | 11 | MIT | 2022-04-22T22:43:37 | 2019-11-13T12:43:58 | Python | UTF-8 | Python | false | false | 225 | py | from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from RP_RestAPI import views
urlpatterns = [
path('', views.resume_parser),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| [
"[email protected]"
] | |
d37066a5a6bf7f763b5b3e013e4fd15fa44146dd | e970c6bfb725a038a17600763db44e21e2591f18 | /ex43_classes.py | 9c23e10614c52b0286a29fe6cd006b1511725e08 | [] | no_license | gabyborja/learnpython | 5b9dede94c7bbf2087d79833c4dd0ee099b29d23 | 13bccb5723c125e28646da694cafec4f581ba55f | refs/heads/main | 2023-03-08T02:14:44.447713 | 2021-02-17T11:22:58 | 2021-02-17T11:22:58 | 329,461,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,425 | py | from sys import exit
from random import randint
from textwrap import dedent
class Scene(object):
def enter(self):
print("This scene is not yet configured.")
print("Subclass it and implement enter().")
exit(1)
pass
class Engine(object):
def __init__(self, scene_map):
print(f"Engine __init__")
self.scene_map = scene_map
print(f"Engine __init__ {scene_map}")
def play(self):
current_scene = self.scene_map.opening_scene() # Get the opening scene from the map that you specify
print(f"Engine.play current scene {current_scene}")
last_scene = self.scene_map.next_scene('finished') # Get the last scene from the map that you specify
print(f"Engine.play last scene {last_scene}")
while current_scene != last_scene: # Loop through this loop if you haven't reached the last scene
print(f"Engine current_scene {current_scene}")
next_scene_name = current_scene.enter() # enter current scene and return next scene
print(f"Engine next_scene_name {next_scene_name}")
current_scene = self.scene_map.next_scene(next_scene_name) # get the next scene accordinng to the scene map and set to current scene
print(f"Engine current_scene {current_scene}")
# be sure to print out the last scene
current_scene.enter() # prints out the last scene after exiting the while loop
class Death(Scene):
quips = [
"You died. You kinda suck at this.",
"Your Mom would be proud... if she were smarter.",
"Such a loser.",
"I have a small puppy that's better at this.",
"You're worse than your Dad's jokes."
]
def enter(self):
print(Death.quips[randint(0, len(self.quips)-1)])
exit(1)
class CentralCorridor(Scene):
def enter(self):
print(dedent("""
The Gothons of Planet Percal #26 have invaded your ship and
destroyed your entire crew. You are the last surviving
member and your last mission is to get the neutron destruct
bomb from the Weapons Armory, put it in the bridge, and
blow the ship up after getting into an escape pod.
You're running down the central corridor to the Weapons
Armory when a Gothon jumps out, red scaly skin, dark grimy
teeth, and evil clown costume flowing around his hate
filled body. He's blocking the door to the Armory and
about to pull a weapon to blast you.
"""))
action = input("> ")
if action == "shoot!":
print(dedent("""
Quick on the draw you yank out your blaster and fire
it at the Gothon. His clown costume is flowing and
moving around his body, which throws off your aim.
Your laser hits his costume but misses him entirely.
This completely ruins his brand new costume his mother
bought him, which makes him fly into an insane rage
and blast you repeatedly inthe face until you are
dead. Then he eats you.
"""))
print("returning 'death'")
return 'death'
elif action == "dodge!":
print(dedent("""
Like a world class boxer you dodge, weave, slip and
slide righ as the Gothon's blaster cranks a laser
past your head. In the middle of your artful dodge
your foot slips and you bang your head on the metal
wall and pass out. You wake up shortly after only to
die as the Gothon stomps on nyour head and eats you.
"""))
print("returning 'death'")
return 'death'
elif action == "tell a joke":
print(dedent("""
Lucky for you they made you learn Gothon insults in
the academy. You tell the one Gothon joke you know:
Lbakdfaf jasldkf jasdfl ;akf ajsfd;a , asldfkj asdfj
asd;fjasdlfkj. The Gothon stops, tries
not to laugh, then busts out laughing and can't move.
While he's laughing you run up and shoot him square in
the head putting him down, then jump through the
Weapon Armory door.
"""))
print("returning 'laser_weapon_armory'")
return 'laser_weapon_armory'
else:
print("Does not compute!")
return 'central_corridor'
class LaserWeaponArmory(Scene):
def enter(self):
print(dedent("""
You do a dive roll into the Weapon Armory, crouch and scan
the room for more Gothons that might be hidding. It's dead
quiet, too quiet. You stand up and run to the far side of
the room and find the neutron bomb in its container.
There's a keypad lock on the box and you need the code to
get the bomb out. If you get the code wrong 10 times then
hte lock closes forever and you can't get the bomb. The code is
3 digits.
"""))
code = f"{randint(1,9)}{randint(1,9)}{randint(1,9)}"
print(code)
guess = input("[keypad]> ")
guesses = 0
while guess != code and guesses < 10:
print("WRONG!")
guesses += 1
guess = input("[keypad]> ")
if guess == code:
print(dedent("""
The container clicks open and the seal breaks, letting
gas out. You grab the neutron bomb and run as fast as
you can to the bridge where you must place it in the
right spot.
"""))
print("returning the_bridge")
return 'the_bridge'
else:
print(dedent("""
The lock buzzes one last time and then you hear
a sickening melting sound as the mechanism is fused
together. You decide to sit there, and finally the
Gothons blow up the ship from their ship and you die.
"""))
print("returning 'death'")
return 'death'
class TheBridge(Scene):
def enter(self):
print(dedent("""
You burst onto the Bridge with the neutron destruct bomb
under your arm and surprise 5 Gothons who are trying to
take control of the ship. Each of them has an even uglier
clown costume than the last. They haven't pulled their
weapons out yet, as they see the active bomb under your
arm and don't want to set it off.
"""))
action = input("> ")
if action == "throw the bomb":
print(dedent("""
In a panic you throw the bomb at the group of Gothons
and make a leap for the door. Right as you drop it a
Gothon shoots you right in the back killing you. As
you did you see another Gothon frantically try to
disarm the bomb. You die knowing they will probably
blow up when it goes off.
"""))
return 'death'
elif action == "slowly place the bomb":
print(dedent("""
You point your blaster at the bomb under you arm and
the Gothons put their hands up and start to sweat.
You inch backward to the door, open it, and then
carefully place the bomb on the floor, pointing you
blaster at it. You then jump back through the door,
punch the close button and blast the lock so the
Gothons can't get out. Now that the bomb is place
you run to the escape pod to get off this tin can.
"""))
return 'escape_pod'
else:
print("Does not compute!")
return "the_bridge"
class EscapePod(Scene):
def enter(self):
print(dedent("""
You rush through the ship desperately try ing to make it to
the escape pod before the whole ship explodes. It seems
like hardly any Gothons are on the ship, so your run is
clear of interference. You get to the chamber with the
escape pods, and nown eed to pick one to take. Some of
them could be damaged but you don't have time to look.
There's 5 pods, which one do you take?
"""))
good_pod = randint(1,5)
print(good_pod)
guess = input("[pod #]> ")
if int(guess) != good_pod:
print(dedent("""
You jump into pod {guess} and hit the eject butotn.
The pod escapes out into the void of space, then
implodes as the hull ruptures, crushing your body into
jam jelly.
"""))
return 'death'
else:
print(dedent(f"""
You jump into pod {guess} and hit the eject button.
The pod easily slides out into space heading to the
planet below. As it flies to the planet, you look
back and see your ship implode then explode like a
bright star, taking out the Gothon ship at the same
time. You won!
"""))
return 'finished'
class Finished(Scene):
def enter(self):
print("You're finished. Well done.")
return 'finished'
class Map(object):
scenes = {
'central_corridor': CentralCorridor(),
'laser_weapon_armory': LaserWeaponArmory(),
'the_bridge': TheBridge(),
'escape_pod': EscapePod(),
'death': Death(),
'finished': Finished()
}
def __init__(self, start_scene):
print(f"Map start_scene {start_scene}")
self.start_scene = start_scene
def next_scene(self, scene_name):
val = Map.scenes.get(scene_name)
print(f"Map next_scene {val}")
return val
def opening_scene(self):
print(f"Map opening_scene {self.start_scene}")
print(f"Returning {self.next_scene(self.start_scene)}")
return self.next_scene(self.start_scene)
a_map = Map('central_corridor')
a_game = Engine(a_map)
a_game.play() | [
"[email protected]"
] | |
c5ba575380e28a585c9ac4e8e03714922bac52c8 | 8300081ddc9d64c22fdf4cce6fdac1fbdf8c04f1 | /config/settings/local.py | 14fa955e6ac315db9b91b1df3a847b41310d0bda | [
"MIT"
] | permissive | didils/patearn1 | 4234fe5cafce3f3beb435007f2ca971550837901 | 332d94acdce55ae15233b19d3fd6e2beea8c5122 | refs/heads/master | 2021-06-04T21:16:49.227953 | 2018-08-28T15:17:03 | 2018-08-28T15:17:03 | 146,205,150 | 0 | 0 | MIT | 2021-06-01T22:37:57 | 2018-08-26T18:16:01 | Python | UTF-8 | Python | false | false | 2,587 | py | from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env('DJANGO_SECRET_KEY', default='3W7RByHcPZqJQx7yBF4Cv8IHtgms1D5ULF8qaydOVFBzIUinbkuFGtMt3iv4BMJF')
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = [
"localhost",
"0.0.0.0",
"127.0.0.1",
]
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG # noqa F405
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend')
# https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = 'localhost'
# https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = 1025
# django-debug-toolbar
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
INSTALLED_APPS += ['debug_toolbar'] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware'] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2']
# django-extensions
# ------------------------------------------------------------------------------
# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration
INSTALLED_APPS += ['django_extensions'] # noqa F405
# Your stuff...
# ------------------------------------------------------------------------------
| [
"[email protected]"
] | |
23b62f0334b9a37e1575aa80724c0edee2a6e52b | 7680dbfce22b31835107403514f1489a8afcf3df | /Exercícios_parte_1/exercício__029.py | 4386d4d6650ace06a10d4a8400fb756767315b53 | [] | no_license | EstephanoBartenski/Aprendendo_Python | c0022d545af00c14e6778f6a80f666de31a7659e | 69b4c2e07511a0bd91ac19df59aa9dafdf28fda3 | refs/heads/master | 2022-11-27T17:14:00.949163 | 2020-08-03T22:11:19 | 2020-08-03T22:11:19 | 284,564,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | # radar eletrônico
v = float(input('Qual é a velocidade atual do seu carro em km/h? '))
multa = ((v-80)*7)
if v<= 80.0:
print('Tenha um bom dia! Dirija com segurança!')
else:
print('VOCÊ FOI MULTADO! Excedeu o limite de velocidade que é de 80 km/h\nVocê deve pagar uma multa de R${:.2f}!'.format(multa))
print('Tenha um bom dia! Dirija com segurança!')
| [
"[email protected]"
] | |
e3c68f4ac6e886779865be178261a082ad6cca6f | 48832d27da16256ee62c364add45f21b968ee669 | /res_bw/scripts/common/lib/distutils/command/install_egg_info.py | 0085fdca385a9fd1a67ca687b00ca2114190c80d | [] | no_license | webiumsk/WOT-0.9.15.1 | 0752d5bbd7c6fafdd7f714af939ae7bcf654faf7 | 17ca3550fef25e430534d079876a14fbbcccb9b4 | refs/heads/master | 2021-01-20T18:24:10.349144 | 2016-08-04T18:08:34 | 2016-08-04T18:08:34 | 64,955,694 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 2,730 | py | # 2016.08.04 19:58:52 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/distutils/command/install_egg_info.py
"""distutils.command.install_egg_info
Implements the Distutils 'install_egg_info' command, for installing
a package's PKG-INFO metadata."""
from distutils.cmd import Command
from distutils import log, dir_util
import os, sys, re
class install_egg_info(Command):
"""Install an .egg-info file for the package"""
description = "Install package's PKG-INFO metadata as an .egg-info file"
user_options = [('install-dir=', 'd', 'directory to install to')]
def initialize_options(self):
self.install_dir = None
return
def finalize_options(self):
self.set_undefined_options('install_lib', ('install_dir', 'install_dir'))
basename = '%s-%s-py%s.egg-info' % (to_filename(safe_name(self.distribution.get_name())), to_filename(safe_version(self.distribution.get_version())), sys.version[:3])
self.target = os.path.join(self.install_dir, basename)
self.outputs = [self.target]
def run(self):
target = self.target
if os.path.isdir(target) and not os.path.islink(target):
dir_util.remove_tree(target, dry_run=self.dry_run)
elif os.path.exists(target):
self.execute(os.unlink, (self.target,), 'Removing ' + target)
elif not os.path.isdir(self.install_dir):
self.execute(os.makedirs, (self.install_dir,), 'Creating ' + self.install_dir)
log.info('Writing %s', target)
if not self.dry_run:
f = open(target, 'w')
self.distribution.metadata.write_pkg_file(f)
f.close()
def get_outputs(self):
return self.outputs
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""Convert an arbitrary string to a standard version string
Spaces become dots, and all other non-alphanumeric characters become
dashes, with runs of multiple dashes condensed to a single dash.
"""
version = version.replace(' ', '.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-', '_')
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\distutils\command\install_egg_info.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.08.04 19:58:52 Střední Evropa (letní čas)
| [
"[email protected]"
] | |
8e3ec9c35c3f22fd3e972293f675185cb418958e | 75d0009170fe44c315ce72a8c29e712ade3848c3 | /9_Outter_Function/_random_pop.py | 47f2d189c7f4a4c61652331e96df35c2ab7a60e0 | [] | no_license | moon4311/sp | d6a65d5b95bc51332b9a80a1410ffb6854a99f61 | 207758d4f4f7c28fa1cd9f277825313257782433 | refs/heads/master | 2021-09-14T07:20:49.257372 | 2018-05-09T13:28:42 | 2018-05-09T13:28:42 | 116,090,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | import random
def random_pop(data):
# number = random.randint(0 , len(data)-1 )
number = random.choice(data)
# return data.pop(number)
data.remove(number)
return number
if __name__ == "__main__":
data = [1,2,3,4,5]
random.shuffle(data) # data 섞기 (셔플)
while data : print(random_pop(data))
| [
"[email protected]"
] | |
3d89834b623f58e1db722a58a6c10d0aab51a050 | 670ce29ecc090e25f4f97ced4cd57ca622b100aa | /171-excel-sheet-column-number.py | 63106fc2cab019a23e82922c299f843b40ac143e | [] | no_license | Dinesh-Sivanandam/LeetCode | 94dcff32d6652e085c262d0a11ca51345e9d1fde | b9298bbba8a5d36e352aba9efbd4d2875c35a49b | refs/heads/master | 2023-04-20T01:31:26.516015 | 2021-05-20T00:57:33 | 2021-05-20T00:57:33 | 290,481,529 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | class Solution(object):
def titleToNumber(self, s):
"""
:type s: str
:rtype: int
"""
result = 0
for letter in s:
result = result * 26 + (ord(letter) - ord("A") + 1)
return result
if __name__ == '__main__':
sol = Solution()
s = "ZY"
result = sol.titleToNumber(s)
print(result) | [
"[email protected]"
] | |
43489505ae9946941ddd6047e32d0228478da0fb | eee3ae1b9ff636fa23529b607cef12280691056b | /mysite/settings.py | 8b09ff37fbf3cc55011ffeac330ab209f0a2560c | [] | no_license | suryakandikonda/my-first-blog | bf68793ee6715f620f737f64964bbcefbef869d4 | fb92fb118463651f0c1aefabd67a0a82a1ac2f18 | refs/heads/master | 2021-05-26T09:05:31.864895 | 2020-04-08T14:35:14 | 2020-04-08T14:35:14 | 254,069,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,173 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4s-^ggl)n^#(a6ri*hm_b*t)7l)tyvv(gj3ox8u)(t1m%x$yy2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Calcutta'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"[email protected]"
] | |
c3167f44e52789fcb1bd320316272ce7c9a2f0ae | 673e829dda9583c8dd2ac8d958ba1dc304bffeaf | /data/multilingual/Latn.COT/Serif_8/pdf_to_json_test_Latn.COT_Serif_8.py | e4aedda09a59d48663e36d901911e8ca7937caf0 | [
"BSD-3-Clause"
] | permissive | antoinecarme/pdf_to_json_tests | 58bab9f6ba263531e69f793233ddc4d33b783b7e | d57a024fde862e698d916a1178f285883d7a3b2f | refs/heads/master | 2021-01-26T08:41:47.327804 | 2020-02-27T15:54:48 | 2020-02-27T15:54:48 | 243,359,934 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.COT/Serif_8/udhr_Latn.COT_Serif_8.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| [
"[email protected]"
] | |
3def6d57e5465af715fc2d59609c2a22eb819f2b | 3f856dc08cd450cfd3f4df5a68aa43498e2b88b9 | /venv/Lib/site-packages/boto3/__init__.py | 2d2f4418beaaed00b503e7beb1466bc73d31f753 | [] | no_license | choudharyamit26/textTospeech | e5ac58625e1dab9e7db847bb3b42d7d6284c4948 | 24711e7a313dcf1916e213cf1e987c7535a4feca | refs/heads/main | 2023-02-23T02:55:42.240045 | 2021-01-25T11:45:54 | 2021-01-25T11:45:54 | 302,002,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,340 | py | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
from boto3.session import Session
__author__ = 'Amazon Web Services'
__version__ = '1.15.12'
# The default Boto3 session; autoloaded when needed.
DEFAULT_SESSION = None
def setup_default_session(**kwargs):
"""
Set up a default session, passing through any parameters to the session
constructor. There is no need to call this unless you wish to pass custom
parameters, because a default session will be created for you.
"""
global DEFAULT_SESSION
DEFAULT_SESSION = Session(**kwargs)
def set_stream_logger(name='boto3', level=logging.DEBUG, format_string=None):
"""
Add a stream handler for the given name and level to the logging module.
By default, this logs all boto3 messages to ``stdout``.
>>> import boto3
>>> boto3.set_stream_logger('boto3.resources', logging.INFO)
For debugging purposes a good choice is to set the stream logger to ``''``
which is equivalent to saying "log everything".
.. WARNING::
Be aware that when logging anything from ``'botocore'`` the full wire
trace will appear in your logs. If your payloads contain sensitive data
this should not be used in production.
:type name: string
:param name: Log name
:type level: int
:param level: Logging level, e.g. ``logging.INFO``
:type format_string: str
:param format_string: Log message format
"""
if format_string is None:
format_string = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
logger = logging.getLogger(name)
logger.setLevel(level)
handler = logging.StreamHandler()
handler.setLevel(level)
formatter = logging.Formatter(format_string)
handler.setFormatter(formatter)
logger.addHandler(handler)
def _get_default_session():
"""
Get the default session, creating one if needed.
:rtype: :py:class:`~boto3.session.Session`
:return: The default session
"""
if DEFAULT_SESSION is None:
setup_default_session()
return DEFAULT_SESSION
def client(*args, **kwargs):
"""
Create a low-level service client by name using the default session.
See :py:meth:`boto3.session.Session.client`.
"""
return _get_default_session().client(*args, **kwargs)
def resource(*args, **kwargs):
"""
Create a resource service client by name using the default session.
See :py:meth:`boto3.session.Session.resource`.
"""
return _get_default_session().resource(*args, **kwargs)
# Set up logging to ``/dev/null`` like a library is supposed to.
# http://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger('boto3').addHandler(NullHandler())
| [
"[email protected]"
] | |
100ddc11054c0255d41bbb2c33745df5e14e4b4c | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/sankey/link/_targetsrc.py | 4e42976ca8999da96195282be5eaa6a49248ed35 | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 403 | py | import _plotly_utils.basevalidators
class TargetsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="targetsrc", parent_name="sankey.link", **kwargs):
super(TargetsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| [
"[email protected]"
] | |
5dac70dadcc3b9398003e47c74b4b2e2c18ab7a4 | 15b4f3daf1a7858c0bfd020a8de41165b132564d | /bin/correction.py | 538aadb740d044c625b187f2edafac19457ddf4c | [] | no_license | Daalma7/Fairness-with-Many-Objective-Optimization | dd3eae0ae185021c0c28779d98a01c9b86aff4f7 | 9661e9ece243be43a6ea770027f4da7d2cf8c040 | refs/heads/master | 2023-07-12T05:24:11.144703 | 2021-08-17T17:05:02 | 2021-08-17T17:05:02 | 384,517,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,216 | py | import pandas as pd
from math import ceil
import random
import csv
import sys
import warnings
import importlib
import os
import re
import numpy as np
from collections import OrderedDict as od
warnings.filterwarnings("ignore")
sys.path.append("..")
from general.ml import *
from general.individual import *
from general.problem import Problem
from algorithms.nsga2.utils import NSGA2Utils
from general.population import Population
from general.ml import *
alg = dat = var = obj = mod = False #Possible parameters given
#objectives_results_dict = {'gmean_inv': 'error_val', 'dem_fpr': 'dem_fpr_val', 'dem_ppv': 'dem_ppv_val', 'dem_pnr': 'dem_pnr_val'}
objectives_results_dict = {'gmean_inv': 'error', 'dem_fpr': 'dem_fp', 'dem_ppv': 'dem_ppv', 'dem_pnr': 'dem_pnr'}
objectives_results_norm_dict = {'num_leaves': 'num_leaves', 'data_weight_avg_depth': 'data_weight_avg_depth'}
variables_range = [(20, 200),(0.0001, 0.1), (0.001, 100000), (0, 1), (1, 9)]
strobj = "gmean_inv__dem_fpr__dem_ppv__dem_pnr"
strextra = ""
objectives = [gmean_inv, dem_fpr,dem_ppv, dem_pnr]
extraobj = []
obj
extra = ""
for alg in ["nsga2", "smsemoa", "grea"]:
for data in ["adult", "german", "propublica_recidivism", "propublica_violent_recidivism", "ricci"]:
if data == "adult" or data == "propublica_recidivism" or data == "propublica_violent_recidivism":
var = "race"
if data == "ricci":
var = "Race"
if data == "german":
var = "age"
pareto_fronts=[]
all_indivs = []
pareto_optimal =[]
for seed in range(100, 110):
read = pd.read_csv('../results/' + alg + '/individuals/individuals_' + data + '_seed_' + str(seed) + '_var_' + var + '_gen_300_indiv_150_model_LR_obj_' + strobj + strextra + '.csv')
pareto_fronts.append(read)
pareto_fronts = pd.concat(pareto_fronts) #Union of all pareto fronts got in each run
pareto_fronts.reset_index(drop=True, inplace=True) #Reset index because for each run all rows have repeated ones
for index, row in pareto_fronts.iterrows(): #We create an individual object associated with each row
indiv = IndividualLR()
hyperparameters = ['max_iter', 'tol', 'lambda', 'l1_ratio', 'class_weight']
indiv.features = [row[x] for x in hyperparameters]
indiv.id = row['id']
indiv.domination_count = 0
indiv.features = od(zip(hyperparameters, indiv.features))
indiv.objectives = []
for x in objectives:
# We will insert all objectives, normalizing every objective that should be
obj = objectives_results_dict.get(x.__name__, "None")
if not obj == "None": #The objective doesn't need to be normalized to the range [0,1]
indiv.objectives.append(float(row[obj]))
else: #In other case
obj = objectives_results_norm_dict.get(x.__name__)
indiv.objectives.append(float(row[obj]) / pareto_fronts[obj].max())
#The same with extra objectives
indiv.extra = []
if not extraobj == None:
for x in extraobj:
# We will insert all objectives, normalizing every objective that should be
ext = objectives_results_dict.get(x.__name__, "None")
if not ext == "None": #The objective doesn't need to be normalized to the range [0,1]
indiv.extra.append(float(row[ext]))
else: #In other case
ext = objectives_results_norm_dict.get(x.__name__)
indiv.extra.append(float(row[ext]) / pareto_fronts[ext].max())
indiv.creation_mode = row['creation_mode']
all_indivs.append(indiv)
print(len(all_indivs))
for indiv in all_indivs: #Now we calculate all the individuals non dominated by any other (pareto front)
print()
print(i)
for other_indiv in all_indivs:
if other_indiv.dominates(indiv):
indiv.domination_count += 1 #Indiv is dominated by the second
if indiv.domination_count < 10: #Could be done easily more efficiently, but could be interesting
pareto_optimal.append(indiv)
pareto_optimal_df = []
for p in pareto_optimal: #We select individuals from the files corresponding to the pareto front ones (we filter by id)
curr_id = p.id #BUT IF THERE ARE MORE THAN 1 INDIVIDUAL WITH THE SAME ID THEY WILL ALL BE ADDED, EVEN THOUGHT ONLY 1 OF THEM IS A PARETO OPTIMAL SOLUTION
found = False #Which is by the way really unlikely since there are 36^10 possibilities for an id
for index, row in pareto_fronts.iterrows():
if row['id'] == curr_id:
pareto_optimal_df.append(pd.DataFrame({x : row[x] for x in pareto_fronts.columns.tolist()}, index=[0])) #We introduce here the not-normalized version of them
found = True
if not found:
pareto_optimal.remove(p)
#We extract them to a file
pareto_optimal_df = pd.concat(pareto_optimal_df)
pareto_optimal_df.drop_duplicates(subset=(['seed']+hyperparameters), keep='first').dropna()
pareto_optimal_df.to_csv('../results/' + alg + '/individuals/general_individuals_pareto_' + data + '_baseseed_100_nruns_10_var_' + var + '_gen_300_indiv_150_model_LR_obj_' + strobj + strextra + '.csv', index = False, header = True, columns = list(pareto_fronts.keys()))
print("----") | [
"[email protected]"
] | |
b2ccb3486375934a7ce2ca85148e21351487cf9a | b738a0edcd7f23af475d913b91df18ca39c9e6fe | /lclCluster.py | c495a79f608edccbfb9f83bfd7027e0a2a6399fb | [] | no_license | erodrig9/Amazon-Recommendation-Analysis | b2d70df50fed5f9c165ff7868b15410baa70c94b | 5b4477a307a684b3561f1b85794dbfb46283d0e6 | refs/heads/master | 2021-01-20T12:04:44.723628 | 2011-08-05T02:51:40 | 2011-08-05T02:51:40 | 2,158,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,757 | py | import sys
def possiblePairs(n):
return n*(n-1) - (((n-1)*n)/2)
inOutDegree = {1: 0}
adjNodes = {1: [0,0]}
fileName = sys.argv[1]
if len(sys.argv) < 2:
print 'One file name must be specified'
quit()
IFILE = open(fileName, 'r')
line = IFILE.readline().strip()
while line[0] == '#':
line = IFILE.readline().strip()
edge = line.split()
if len(edge) != 2:
quit()
curr = edge[0]
adj = edge[1]
inOutDegree[int(curr)] = 1
inOutDegree[int(adj)] = 1
adjNodes[int(curr)] = set([int(adj)])
adjNodes[int(adj)] = set([int(curr)])
for line in IFILE:
line = line.strip()
edge = line.split()
if len(edge) != 2:
continue
curr = edge[0]
adj = edge[1]
if curr == '#':
continue
if int(curr) in inOutDegree:
inOutDegree[int(curr)] += 1
else:
inOutDegree[int(curr)] = 1
if int(adj) in inOutDegree:
inOutDegree[int(adj)] += 1
else:
inOutDegree[int(adj)] = 1
if int(curr) in adjNodes:
adjNodes[int(curr)].add(int(adj))
else:
adjNodes[int(curr)] = set([int(adj)])
if int(adj) in adjNodes:
adjNodes[int(adj)].add(int(curr))
else:
adjNodes[int(adj)] = set([int(curr)])
IFILE.close()
outputFile = 'results_' + fileName
OFILE = open(outputFile, 'w')
#OFILE.write(str('node,lcc,gcc\n'))
items = inOutDegree.items()
items.sort()
for key, value in items:
numAdj = 0
if key in adjNodes:
numAdj = len(adjNodes[key])
pairs = possiblePairs(numAdj)
connectedPairs = 0
actualEdges = 0
if numAdj > 0:
adjNodesList = list(adjNodes[key])
for i in range(0, numAdj-1):
node1 = adjNodesList[i]
for j in range(i+1, numAdj):
node2 = adjNodesList[j]
if node2 in adjNodes:
for node in adjNodes[node2]:
if(node == node1):
connectedPairs += 1
break
for node1 in adjNodesList:
for node2 in adjNodesList:
if node1 == node2:
continue
if node2 in adjNodes:
for node in adjNodes[node2]:
if(node == node1):
actualEdges += 1
break
possibleEdges = value*(value-1)
lcc = 0
if possibleEdges > 0:
lcc = float(float(actualEdges) / float(possibleEdges))
gcc = 0
if pairs > 0:
gcc = float(connectedPairs)/float(pairs)
OFILE.write(str(key) + ',' + str(lcc) + ',' + str(gcc) + '\n')
OFILE.close()
print 'Done'
| [
"[email protected]"
] | |
3a9466a85877955598211cf8c720848236b693ae | e45fef67477bb265f0b69e63ecb5517106525e8b | /calculator.py | e1dcde888b4da05d6cea78e7ac2464de9646eb9e | [] | no_license | lenskikh/cass | 0a6c72d75b6b41da066f05c3511016dd54e4e307 | f84ce7328db26bf1d82ddd26caf0a84d07e96b4c | refs/heads/master | 2023-07-29T11:04:14.997261 | 2023-07-17T19:53:33 | 2023-07-17T19:53:33 | 194,334,196 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,780 | py | import tkinter as tk
#Sound activation results in a slower response to key presses
#If you want to hear keystroke clicks, remove the comments on three lines
#import winsound
#sound_of_click = 'sounds/click.wav'
window = tk.Tk()
window.title("CASS calculator")
x = 390
y = 280
numbers = {".":tk.PhotoImage(file = r"images/dot.gif"),
"-":tk.PhotoImage(file = r"images/minus.gif"),
"gallons":tk.PhotoImage(file = r"images/gallons.gif"),
"miles":tk.PhotoImage(file = r"images/miles.gif"),
"celsius":tk.PhotoImage(file = r"images/celsius.gif"),
"pounds":tk.PhotoImage(file = r"images/pounds.gif")}
mini_numbers = {"+":tk.PhotoImage(file = r"images/mini_plus.gif"),
"-":tk.PhotoImage(file = r"images/mini_minus.gif"),
"*":tk.PhotoImage(file = r"images/mini_x.gif"),
"/":tk.PhotoImage(file = r"images/mini_divide.gif"),
".":tk.PhotoImage(file = r"images/mini_dot.gif")}
#digits from 0 to 9 and empty screen as 10
for counter in range(11):
numbers[str(counter)] = tk.PhotoImage(file = r"images/"+str(counter)+".gif")
mini_numbers[str(counter)] = tk.PhotoImage(file = r"images/m"+str(counter)+".gif")
memory = {"first_slot":"","second_slot":"",
"third_slot":"", "total":"","result":""}
def button_of_number(num):
if memory["second_slot"] == "":
memory["first_slot"]+= num
screen(memory["first_slot"],photo = numbers[num])
else:
memory["third_slot"]+= num
screen(memory["third_slot"],photo = numbers[num])
def check_button_opt():
if memory["result"] != "" and memory["first_slot"] == "":
memory["first_slot"] = memory["result"]
def operation(opt):
empty_screen()
if memory["second_slot"] == "" and memory["third_slot"] == "":
memory["second_slot"] = opt
empty_screen()
#if user press operation after equal
elif memory["third_slot"] != "":
first_zero()
def first_zero():
memory["first_slot"],memory["second_slot"],memory["third_slot"],memory["total"] = "","","",""
empty_screen()
mini_empty()
canvas.create_image(120,y, image=numbers["0"]) #zero on a screen
def equal():
memory["total"] = memory["first_slot"] + memory["second_slot"] + memory["third_slot"]
match memory["second_slot"]:
case "%":
memory["total"] = memory["first_slot"]+"/100"+"*"+memory["third_slot"]
case "Volume":
memory["total"] = memory["first_slot"]+"/"+"3.785411784"
case "Temperature":
memory["total"] = memory["first_slot"]+"*1.8"+"+32"
case "Pounds":
memory["total"] = memory["first_slot"]+"*2.2046"
case "Length":
memory["total"] = memory["first_slot"]+"*0.62137"
case "Root":
counter = 1
root = 0
while root <= int(memory["first_slot"]):
counter+= 0.1
root = counter * counter
memory["total"] = str(counter)
empty_screen()
mini_empty()
memory["result"] = str(eval(memory["total"]))
photo = ""
#first_zero()
screen(memory["result"],photo)
mini()
def mini():
mini_empty()
if memory["second_slot"] == "+" or memory["second_slot"] == "-" or memory["second_slot"] == "*" or memory["second_slot"] == "/":
x = 125
znak = memory["first_slot"]+memory["second_slot"]+memory["third_slot"]
for i in znak[:19]:
canvas.create_image(x,198, image=mini_numbers[str(i)])
x+=15
if memory["second_slot"] == "Volume":
canvas.create_image(220,198, image=numbers["gallons"])
elif memory["second_slot"] == "Length":
canvas.create_image(180,198, image=numbers["miles"])
elif memory["second_slot"] == "Temperature":
canvas.create_image(260,198, image=numbers["celsius"])
elif memory["second_slot"] == "Pounds":
canvas.create_image(270,198, image=numbers["pounds"])
def screen(result,photo):
#prevent 10 digits (limitaion of a screen)
cut = result[:9]
empty_screen()
x = 110
for i in cut:
canvas.create_image(x,y, image=numbers[str(i)])
x+=35
def empty_screen():
canvas.create_image(252,280, image=numbers["10"])
def mini_empty():
canvas.create_image(252,198, image=mini_numbers["10"])
def left_click(event):
if event.x > 45 and event.x < 124 and event.y > 706 and event.y < 760:
button_of_number(num = "1")
if event.x > 155 and event.x < 236 and event.y > 706 and event.y < 760:
button_of_number(num = "2")
if event.x > 267 and event.x < 347 and event.y > 706 and event.y < 760:
button_of_number(num = "3")
if event.x > 46 and event.x < 121 and event.y > 628 and event.y < 680:
button_of_number(num = "4")
if event.x > 155 and event.x < 236 and event.y > 628 and event.y < 680:
button_of_number(num = "5")
if event.x > 267 and event.x < 347 and event.y > 628 and event.y < 680:
button_of_number(num = "6")
if event.x > 46 and event.x < 121 and event.y > 549 and event.y < 600:
button_of_number(num = "7")
if event.x > 155 and event.x < 236 and event.y > 549 and event.y < 600:
button_of_number(num = "8")
if event.x > 267 and event.x < 347 and event.y > 549 and event.y < 600:
button_of_number(num = "9")
if event.x > 45 and event.x < 124 and event.y > 788 and event.y < 834:
button_of_number(num = "0")
#Dot
if event.x > 160 and event.x < 232 and event.y > 788 and event.y < 834:
if memory["second_slot"] == "":
if "." in memory["first_slot"][:2]:
pass
else:
memory["first_slot"]+= "."
screen(memory["first_slot"],photo = numbers["."])
else:
if "." in memory["third_slot"][:2]:
pass
else:
memory["third_slot"]+= "."
screen(memory["third_slot"],photo = numbers["."])
#negative number
if event.x > 156 and event.x < 233 and event.y > 468 and event.y < 519:
if memory["second_slot"] == "":
if "-" in memory["first_slot"]:
pass
else:
memory["first_slot"] = "-"+ memory["first_slot"]
screen(memory["first_slot"],photo = numbers["-"])
else:
if "-" in memory["third_slot"]:
pass
else:
memory["third_slot"] = "-" + memory["third_slot"]
screen(memory["third_slot"],photo = numbers["-"])
if event.x > 267 and event.x < 347 and event.y > 786 and event.y < 839:
check_button_opt()
operation(opt="+")
mini()
if event.x > 379 and event.x < 454 and event.y > 627 and event.y < 678:
mini()
check_button_opt()
operation(opt="-")
mini()
if event.x > 379 and event.x < 454 and event.y > 549 and event.y < 600:
mini()
check_button_opt()
operation(opt="*")
mini()
if event.x > 379 and event.x < 454 and event.y > 469 and event.y < 525:
check_button_opt()
operation(opt="/")
mini()
#percent
if event.x > 271 and event.x < 345 and event.y > 469 and event.y < 525:
memory["second_slot"] = "%"
equal()
#Volume
if event.x > 400 and event.x < 456 and event.y > 400 and event.y < 435:
memory["second_slot"] = "Volume"
equal()
#Temperature
if event.x > 222 and event.x < 273 and event.y > 400 and event.y < 435:
memory["second_slot"] = "Temperature"
equal()
#Weight
if event.x > 136 and event.x < 189 and event.y > 400 and event.y < 435:
memory["second_slot"] = "Pounds"
equal()
#Length
if event.x > 47 and event.x < 100 and event.y > 400 and event.y < 435:
memory["second_slot"] = "Length"
equal()
#root
if event.x > 313 and event.x < 369 and event.y > 400 and event.y < 435:
memory["second_slot"] = "Root"
equal()
#equal button
if event.x > 379 and event.x < 454 and event.y > 707 and event.y < 839:
equal()
#ac/c button
if event.x > 54 and event.x < 118 and event.y > 471 and event.y < 524:
first_zero()
#winsound.PlaySound(sound_of_click, winsound.SND_FILENAME)
canvas = tk.Canvas(window , height=900, width=502)
canvas.grid(row = 0, column = 0)
calculator_background = tk.PhotoImage(file = 'images/bg.gif')
canvas.create_image(253,450, image=calculator_background)
first_zero()
window.bind("<Button-1>", left_click)
canvas.pack()
window.mainloop()
| [
"[email protected]"
] | |
3a94e53dca1597278dc61bdfb447167934a00193 | 77b246d2dc02fb1faba5df8803dadbda483a03c9 | /test.py | a1ae48249f1c306a960e2c199886ed0d51445ff3 | [] | no_license | Vergangenheit/RestAPI | 3a62c1dcbbc444c9b312af481f58e89666db8363 | ec8513ee2466e281cb4bd9acbd90892007be50c5 | refs/heads/master | 2022-11-19T08:36:04.444962 | 2020-07-23T14:31:07 | 2020-07-23T14:31:07 | 281,750,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | import requests
BASE = "http://127.0.0.1:5000/"
# data = [{'likes':20, 'name':'Lollo', 'views':123},
# {'likes':4, 'name':'Pillo', 'views':45},
# {'likes':204, 'name':'Calo', 'views':235}]
# for i in range(len(data)):
# response = requests.put(BASE + 'video/' + str(i), data[i])
# print(response.json())
# input()
response = requests.patch(BASE + 'video/2', {"views":99, 'likes':1001})
print(response.json()) | [
"[email protected]"
] | |
ba8e27733b696dda064fe948e3719ab2450b6ba8 | c70c7aae620cd725ce3f94943ba82d399098ef80 | /app/neuralNetworkMod/learning/starter2.py | caccde51d3b51a2b6e2afc7b159bdf8bb25bac59 | [] | no_license | ra2003/Ciphey | ca90eebcc6b11cd6dd527c49d8411326ddd53752 | 4fed962b6b839d21835f94738461e737441f02fa | refs/heads/master | 2021-02-05T00:31:52.562122 | 2019-12-30T13:28:36 | 2019-12-30T13:28:36 | 243,722,593 | 1 | 0 | null | 2020-02-28T09:15:34 | 2020-02-28T09:15:33 | null | UTF-8 | Python | false | false | 1,531 | py | import tensorflow as tf # deep learning library. Tensors are just multi-dimensional arrays
mnist = tf.keras.datasets.mnist # mnist is a dataset of 28x28 images of handwritten digits and their labels
(x_train, y_train),(x_test, y_test) = mnist.load_data() # unpacks images to x_train/x_test and labels to y_train/y_test
x_train = tf.keras.utils.normalize(x_train, axis=1) # scales data between 0 and 1
x_test = tf.keras.utils.normalize(x_test, axis=1) # scales data between 0 and 1
model = tf.keras.models.Sequential() # a basic feed-forward model
model.add(tf.keras.layers.Flatten()) # takes our 28x28 and makes it 1x784
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu)) # a simple fully-connected layer, 128 units, relu activation
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu)) # a simple fully-connected layer, 128 units, relu activation
model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax)) # our output layer. 10 units for 10 classes. Softmax for probability distribution
model.compile(optimizer='adam', # Good default optimizer to start with
loss='sparse_categorical_crossentropy', # how will we calculate our "error." Neural network aims to minimize loss.
metrics=['accuracy']) # what to track
model.fit(x_train, y_train, epochs=3) # train the model
val_loss, val_acc = model.evaluate(x_test, y_test) # evaluate the out of sample data with model
print(val_loss) # model's loss (error)
print(val_acc) # model's accuracy | [
"[email protected]"
] | |
f20b66793a778f5a890d39576989a7578d983703 | 8b7c8d5d8603b9532293460f8d320a7b85c03959 | /revenge/native_exception.py | 381074b93d959e7c44342091db2f4a9d1541b66e | [] | no_license | freemanZYQ/revenge | 035abcb2940e07b1b522caa76e2fadc5fdc3c7ab | 96cd24560010c472b7183519f636d7047fba7c53 | refs/heads/master | 2020-07-27T20:29:54.464726 | 2019-09-13T02:20:59 | 2019-09-13T02:20:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,459 | py |
from . import Colorer
import logging
logger = logging.getLogger(__name__)
import typing
import frida
import colorama
colorama.init()
import os
from termcolor import cprint, colored
from prettytable import PrettyTable
here = os.path.dirname(os.path.abspath(__file__))
class NativeBacktrace(object):
def __init__(self, process, backtrace):
"""Represents a backtrace. I.e.: what called what.
Args:
backtrace (list): List of instruction pointers
"""
self._backtrace = backtrace
class NativeException(object):
TYPES = ['abort', 'access-violation', 'illegal-instruction', 'arithmetic',
'breakpoint', 'system']
def __init__(self, context, backtrace=None, type=None,
memory_operation=None, memory_address=None):
"""Represent a native CPU exception.
Args:
context: Frida-util cpu context
backtrace: native backtrace object
type (str): What type of exception is this.
memory_operation (str, optional): Type of memory operation
(read/write/execute)
memory_address (int, optional): Address that was accessed when
exception occurred.
"""
self.context = context
self.backtrace = backtrace
self.type = type
self.memory_operation = memory_operation
self.memory_address = memory_address
def __repr__(self):
attrs = ['NativeException',
self._process.memory.describe_address(self.address),
self.type]
return '<' + ' '.join(attrs) + '>'
def __str__(self):
s = "Native Exception\n"
s += "~~~~~~~~~~~~~~~~\n"
s += self.type + " at " + self._process.memory.describe_address(self.address) + "\n"
if self.memory_operation is not None:
s += "Memory " + self.memory_operation + " " + hex(self.memory_address) + "\n\n"
else:
s += "\n"
s += str(self.context)
s += "\n"
# If we can't execute the memory location, don't print it
if self.memory_operation != "execute":
s += "\n" + str(self._process.memory[self.address].instruction_block)
return s
@classmethod
def _from_frida_dict(cls, process, exception, backtrace):
"""Build a NativeException object directly from a frida dict."""
assert isinstance(exception, dict)
assert isinstance(backtrace, list)
backtrace = NativeBacktrace(process, backtrace)
return cls(
context = CPUContext(process, **exception['context']),
backtrace = backtrace,
type = exception['type'],
memory_operation = exception['memory']['operation'] if 'memory' in exception else None,
memory_address = common.auto_int(exception['memory']['address']) if 'memory' in exception else None,
)
@property
def _process(self):
return self.context._process
@property
def type(self):
"""str: What type of native exception? One of """
return self.__type
@type.setter
def type(self, type):
type = type.lower()
assert type in NativeException.TYPES, 'Unexpected native exception type of {}'.format(type)
self.__type = type
@property
def address(self):
"""int: Address of this exception."""
return self.context.ip
@property
def memory_address(self):
"""int: Address of memory exception."""
return self.__memory_address
@memory_address.setter
def memory_address(self, memory_address):
assert isinstance(memory_address, (int, type(None)))
self.__memory_address = memory_address
@property
def memory_operation(self):
"""str: Type of memory operation performed at exception.
Enum: read, write, execute"""
return self.__memory_operation
@memory_operation.setter
def memory_operation(self, memory_operation):
if isinstance(memory_operation, str):
memory_operation = memory_operation.lower()
assert memory_operation in ['read', 'write', 'execute', None], "Unexpected memory_operation of '{}'".format(memory_operation)
self.__memory_operation = memory_operation
from .tracer.contexts import Context as CPUContext
from . import common
NativeException.type.__doc__ += ', '.join(NativeException.TYPES)
| [
"[email protected]"
] | |
8a72a1ce79731e27b566a353ffb0ab6d393bfdd5 | eb6738286948946905ff076ccdfc83c5a811f62a | /app.py | 97f06d1656a7c2ef818da34e9ab10f7a64a296e2 | [] | no_license | TretanAll/belajar-flask | 020591e9e7124ce000f77d25565ebcfe6b8498f6 | 9102052ae49a7866763f3a6c669df5614b8c9c11 | refs/heads/master | 2020-05-18T16:48:28.770385 | 2019-05-08T05:08:15 | 2019-05-08T05:08:15 | 184,536,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 769 | py | from flask import Flask, render_template, request
app = Flask(__name__)
#: route untuk index
@app.route('/')
def home():
search = request.args.get('search')
if not search:
#:memanggil templates yang sudah dibuat
return render_template('index.html')
return 'Hasil dari kata kunci search adalah = '+search
#: route untuk url tambahan + parameter
@app.route('/profile/<username>')
def show_profile(username):
#: memanggil profil.html dengan parameter username
return render_template('profil.html', username=username) #: key = value
@app.route('/login', methods=['GET','POST'])
def show_login():
if request.method == 'POST':
return 'Email kamu adalah '+ request.form['email']
return render_template('login.html') | [
"[email protected]"
] | |
1aa152002ee24ec974bb466baa4c482491acd0b8 | d1d5caf663266106d367a2557c82dfc0de167691 | /Part 6/client/main.py | 437ed26de8ac45c954d5d0c33c1575b9e876cff8 | [] | no_license | navin20/TalkieTut | e4d15ac631da2bac77ea465da87d8cc33f912163 | 3fcab1100a84a61a2c89fe29244ebd9f6e5a4258 | refs/heads/master | 2020-12-04T02:40:32.433411 | 2018-12-06T03:03:29 | 2018-12-06T03:03:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,378 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from kivy.lang import Builder
from kivy.app import App
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy import Config
Config.set('graphics', 'multisamples', '0')
from kivy.utils import get_color_from_hex
import List
from List import MDList
from label import MDLabel
from kivy.uix.popup import Popup
from kivy.uix.image import AsyncImage
from navigationdrawer import NavigationDrawer
############
import socket
import threading
import json
import string
import random
from os.path import expanduser
import os
import requests
global s
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = '192.168.42.11'
port = 5005
#######
global name
name = 'Kiran'
was_here = False
path_images = "."#expanduser('~\\Pictures')
avail_image_extensions = ['*.jpg', '*.png', '*.gif'] # filter
avail_image_extensions_selection = ['.jpg', '.png', '.gif']
Builder.load_string("""
#:import get_color_from_hex __main__.get_color_from_hex
#:import path_images __main__.path_images
#:import avail_image_extensions __main__.avail_image_extensions
<Chat>:
NavigationDrawer:
id: nav_draw
GridLayout:
cols: 1
Label:
text: "sample"
Button:
text: "text"
on_release:
root.manager.current = "image_select_screen"
nav_draw.toggle_state()
GridLayout:
rows: 2
GridLayout:
cols: 1
rows: 0
canvas:
Color:
rgba: get_color_from_hex("#ffffff")
Rectangle:
pos: self.pos
size: self.size
ScrollView:
do_scroll_x: False
MDList:
id: ml
GridLayout:
size_hint_y: None
height: 40
spacing: 15
rows: 1
cols: 2
canvas:
Color:
rgba: (0.746,0.8,0.86,1)
Rectangle:
pos: self.pos
size: self.size
TextInput:
id: message
hint_text: "Type here"
multiline: False
on_text_validate: root.send_message(message.text)
TextInput:
id: pvt_name
hint_text: "name of person to pvt"
multiline: False
<ImageSelectScreen>:
GridLayout:
rows: 3
cols: 1
BoxLayout:
size_hint_y: None
Button:
text: "Icon View"
on_release: filechooser.view_mode = "icon"
Button:
text: "List View"
on_release: filechooser.view_mode = "list"
BoxLayout:
canvas:
Color:
rgba: get_color_from_hex("#000000")
Rectangle:
pos: self.pos
size: self.size
FileChooser:
id: filechooser
path: path_images
filters: avail_image_extensions
on_selection: root.select(filechooser.selection)
FileChooserIconLayout
FileChooserListLayout
BoxLayout:
size_hint_y: None
height: 30
spacing: 10
canvas:
Color:
rgba: get_color_from_hex("#ffffff")
Rectangle:
pos: self.pos
size: self.size
Button:
text: "Send"
on_release: root.send_it()
Button:
text: "Back"
on_release: root.manager.current = "main_screen"
""")
class Chat(Screen):
global s
def __init__(self, **kwargs):
super(Chat, self).__init__(**kwargs)
self.ml = self.ids['ml']
self.pvt_name = self.ids['pvt_name']
def add_two_line(self, from_who, msg_to_add):
self.ml.add_widget(List.TwoLineListItem(
text=msg_to_add,
secondary_text=from_who,
markup=True,
text_size=(self.width, None),
size_hint_y=None,
font_size=self.height / 23,
))
def on_enter(self): # only run this once, not everytime we switch back to it(main_screen)
global was_here
if was_here == False:
was_here = True
s.connect((host, port))
welcome = s.recv(512)
# self.msg_log.text += str(welcome + "\n")
self.add_two_line('Admin', welcome)
temp_template = {'name': name}
s.send(json.dumps(temp_template))
threading.Thread(target=self.handle_messages).start()
def send_message(self, to_send_out):
try:
if self.pvt_name.text != '':
type_msg = 'private_message'
pvt_receiver = self.pvt_name.text
else:
type_msg = 'broadcast'
pvt_receiver = ''
template = {}
template['msg_type'] = type_msg
template['from'] = name
template['msg'] = to_send_out
template['pvt_receiver'] = pvt_receiver
s.send(json.dumps(template))
except Exception, e:
print 'Error sending: ', e
def download_file_arbi(self, url):
local_filename = url.split('/')[-1]
# NOTE the stream=True parameter
r = requests.get(url, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
# f.flush() commented by recommendation from J.F.Sebastian
return local_filename
def handle_image_download(self, url_img):
# create file downloader function for arbitrary files
print 'starting downlad'
saved_img = self.download_file_arbi(url_img)
self.add_two_line('self', 'File saved as ' + saved_img)
print 'download complete'
self.pop_image_saved(saved_img)
def pop_image_saved(self, src):
the_pic = AsyncImage(source=src)
self.pop1(the_pic)
def pop1(self, src):
popup = Popup(title='Image loading', content=src)
popup.open()
def handle_messages(self):
while True:
try:
data = json.loads(s.recv(1024))
if data['msg_type'] == 'broadcast':
# self.msg_log.text += data["from"] + " - " + data["msg"] + "\n"
self.add_two_line(data['from'], data['msg'])
if data['msg_type'] == 'image':
# thread it
threading.Thread(target=self.handle_image_download,
args=(data['link'], )).start()
except Exception, e:
print e
class A:
# class to return the name
def get_the_name(self):
return name
class ImageSelectScreen(Screen):
global s
def select(self, filename):
try:
self.filename = filename[0]
self.preview_img(self.filename)
except Exception, e:
print e
def preview_img(self, src):
# do image popup import popup & async image later
popup = Popup(title='Preview', content=AsyncImage(source=src))
popup.open()
def upload_image(
self,
fname,
urlll,
some_dict,
):
with open(fname, 'rb') as f:
files = {'testname': f}
r = requests.post(urlll, files=files) # import requests
s.send(json.dumps(some_dict))
self.remove_file(fname) # delete the temp file
def remove_file(self, fname):
try:
os.remove(fname)
print 'temp file removed'
except Exception, e:
print e
def send_it(self):
# this is upload part
print 'upload part'
if len(self.filename) > 5:
try:
host = 'http://192.168.42.11/'
url_for_img = host + 'man_images.php'
url_for_img_no_php = host + 'img/'
print 'inside'
c_extension = os.path.splitext(self.filename)[1] # get file extension
if c_extension in avail_image_extensions_selection:
extesion = c_extension
# create temp file for randomness of filename
my_name = A().get_the_name()
temp_img_file = my_name + '-' \
+ ''.join([random.choice(string.ascii_letters
+ string.digits) for n in xrange(7)]) \
+ extesion
with open(self.filename, 'rb') as f:
orag = f.read() # read image
with open(temp_img_file, 'wb') as fb:
fb.write(orag) # write image to temp file
link_img = url_for_img_no_php + temp_img_file
some_dict = {'msg_type': 'image', 'link': link_img,
'from': my_name}
threading.Thread(target=self.upload_image,
args=(temp_img_file, url_for_img,
some_dict)).start()
sm.current = 'main_screen'
except Exception, e:
print e
class Talkie(App):
def build(self):
return sm
sm = ScreenManager()
sm.add_widget(Chat(name='main_screen'))
sm.add_widget(ImageSelectScreen(name='image_select_screen'))
if __name__ == '__main__':
Talkie().run()
| [
"[email protected]"
] | |
8baed9fb3e23ad004c4f3dedc0a4b1acbf3ddce0 | acab53339f2ec0a656dd297d185d7dd5dd9821fa | /all/findLowestCommonAncestor.py | 76be497f2f11df909f15a90a4a8e07d474b4fd84 | [] | no_license | shrutisaxena0617/Data_Structures_and_Algorithms | a355b127f2e5850e485aa784561ba7190432e14a | 31641f04a7c9dfa0b8c5ca1f5c92c56d6f22b239 | refs/heads/master | 2021-05-03T13:04:08.578505 | 2019-07-08T02:28:09 | 2019-07-08T02:28:09 | 120,508,581 | 1 | 0 | null | 2018-02-06T19:05:28 | 2018-02-06T18:59:10 | null | UTF-8 | Python | false | false | 931 | py | class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def findLowestCommonAncestor(root, node1, node2):
path1, path2 = [], []
if not findPath(root, node1.data, path1) or not findPath(root, node2.data, path2):
return -1
i = 0
while i < len(path1) and i < len(path2):
if path1[i] != path2[i]:
break
i += 1
return path1[i-1]
def findPath(root, nodeData, path):
if root is None:
return False
path.append(root.data)
if root.data == nodeData:
return True
if (root.left and findPath(root.left, nodeData, path)) or (root.right and findPath(root.right, nodeData, path)):
return True
path.pop()
return False
root = Node(10)
root.left = Node(20)
root.right = Node(30)
root.left.left = Node(15)
root.left.right = Node(25)
root.right.left = Node(27)
root.right.right = Node(35)
print(findLowestCommonAncestor(root, root.left, root.right))
| [
"[email protected]"
] | |
efd7e00ff85405fefa382fd9e4b1e1fe36de907b | 726d8518a8c7a38b0db6ba9d4326cec172a6dde6 | /0501. Find Mode in Binary Search Tree/Solution.py | 808554b5b1825dcc00a81a5bd808278ae5eb3276 | [] | no_license | faterazer/LeetCode | ed01ef62edbcfba60f5e88aad401bd00a48b4489 | d7ba416d22becfa8f2a2ae4eee04c86617cd9332 | refs/heads/master | 2023-08-25T19:14:03.494255 | 2023-08-25T03:34:44 | 2023-08-25T03:34:44 | 128,856,315 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,120 | py | from typing import List
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def __init__(self):
self.pre = None
self.ret = []
self.ret_count, self.max_count, self.cur_count = 0, 0, 0
def findMode(self, root: TreeNode) -> List[int]:
self.inOrder(root)
self.pre = None
self.ret = [0] * self.ret_count
self.ret_count, self.cur_count = 0, 0
self.inOrder(root)
return self.ret
def inOrder(self, root: TreeNode) -> None:
if not root:
return
self.inOrder(root.left)
if self.pre and self.pre.val == root.val:
self.cur_count += 1
else:
self.cur_count = 1
if self.cur_count > self.max_count:
self.max_count = self.cur_count
self.ret_count = 1
elif self.cur_count == self.max_count:
if len(self.ret):
self.ret[self.ret_count] = root.val
self.ret_count += 1
self.pre = root
self.inOrder(root.right)
| [
"[email protected]"
] | |
fbc91cbcf1923091f1650547f04392faef815546 | 965a7d9f81c051b9f56ea08fe048a3935f10ced6 | /lclbindings/lclpython/TCustomButtonunit.py | f41c0045361708d399deab0567d319aa2958953b | [] | no_license | mabudrais/lazarus-ccr | 1fd074078d04c869fe0a5a5140a1871b66e5c16d | be1510ff5bb5adae34fa91781c61f43650779f04 | refs/heads/master | 2020-12-25T11:41:40.836166 | 2015-08-14T07:26:17 | 2015-08-14T07:26:17 | 40,607,784 | 0 | 0 | null | 2015-08-12T15:16:48 | 2015-08-12T15:16:48 | null | UTF-8 | Python | false | false | 1,332 | py | import PyMinMod
from TButtonControlunit import*
class TCustomButton(TButtonControl):
def Create(self,TheOwner):
r=PyMinMod.TCustomButtonCreate(self.pointer,TheOwner.pointer)
ro=TCustomButton()
ro.pointer=r
return ro
def Click(self):
r=PyMinMod.TCustomButtonClick(self.pointer)
def ExecuteDefaultAction(self):
r=PyMinMod.TCustomButtonExecuteDefaultAction(self.pointer)
def ExecuteCancelAction(self):
r=PyMinMod.TCustomButtonExecuteCancelAction(self.pointer)
def ActiveDefaultControlChanged(self,NewControl):
r=PyMinMod.TCustomButtonActiveDefaultControlChanged(self.pointer,NewControl.pointer)
def UpdateRolesForForm(self):
r=PyMinMod.TCustomButtonUpdateRolesForForm(self.pointer)
def getActive(self):
r=PyMinMod.TCustomButtongetActive(self.pointer)
return r
def setDefault(self,a1):
r=PyMinMod.TCustomButtonsetDefault(self.pointer,a1)
def getDefault(self):
r=PyMinMod.TCustomButtongetDefault(self.pointer)
return r
Default=property(getDefault,setDefault)
def setCancel(self,a1):
r=PyMinMod.TCustomButtonsetCancel(self.pointer,a1)
def getCancel(self):
r=PyMinMod.TCustomButtongetCancel(self.pointer)
return r
Cancel=property(getCancel,setCancel)
| [
"[email protected]"
] | |
359b9da5690e878c8d9deee3140376753253f023 | 385f8d8ed7ab17e6217ab33485157e620cfcd51c | /base/16_进程与线程/02_线程.py | 40551ed2beb05e2cdf3f22f6a83368c304ff2240 | [] | no_license | simeon49/python-practices | 78937992671a5447e11bc5cbc283201ad5b716a6 | 130f9845c6ab4c049fcbc549ee298c6e9576c2dc | refs/heads/master | 2021-07-15T06:21:38.442779 | 2020-06-04T07:04:38 | 2020-06-04T07:04:38 | 159,765,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,710 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# python 标准库提供了两个模块: _thread(低级模块) threading(高级模块 对_thread进行封装)
import time
import random
import _thread
import threading
def job(thread_type):
start = time.time()
thread_name = ''
if thread_type == '_thread':
thread_name = _thread.get_ident()
else:
thread_name = threading.current_thread().name
print('thread %s is running...' % thread_name)
time.sleep(random.random())
print('thread %s is end. time used: %s' % (thread_name, time.time() - start))
###################################################
# _thread
###################################################
print('============= _thread =============')
_thread.start_new_thread(job, ('_thread',)) # 如果主线程结束, 创建的子线程也会结束
time.sleep(1)
###################################################
# threading
###################################################
print('============= threading =============')
t = threading.Thread(target=job, name='job_thread', args=('threading', ))
t.start()
t.join() # 等待子线程结束
###################################################
# threading 锁
###################################################
print('============= threading.Lock =============')
balance = 0
lock = threading.Lock()
def run_thread(n):
for i in range(1000000):
lock.acquire()
try:
global balance
balance += n
balance -= n
finally:
pass
lock.release()
t = threading.Thread(target=run_thread, args=(3,))
t2 = threading.Thread(target=run_thread, args=(5,))
t.start()
t2.start()
t.join()
t2.join()
print('balance: %s' % balance)
###################################################
# threading.local: 虽然是全局变量,但每个线程都只能读写自己线程的独立副本,互不干扰。
# ThreadLocal解决了参数在一个线程中各个函数之间互相传递的问题。
###################################################
print('============= threading.Lock =============')
local_info = threading.local()
def printUserInfo():
print(local_info.user)
def run_thread2(name, phone):
local_info.user = {'name': name, 'phone': phone}
printUserInfo()
t1 = threading.Thread(target=run_thread2, args=('Tom', '1364409918'))
t2 = threading.Thread(target=run_thread2, args=('Jack', '15828205867'))
t1.start()
t2.start()
t1.join()
t2.join()
###################################################
# GIL: python的线程虽然是真正的线程, 但解释器(官方的CPython)有一个GIL锁(Golabel interpreter look)
# 在python任何线程执行前必须先获得GIL锁, 然后每执行100条字节码, 解释器就自动释放GIL锁, 让其它线程可以
# 执行, 所以在单个进程中即使有多个线程 这些线程也只能交替执行, 注意 每个进程有这个GIL锁
###################################################
import multiprocessing
def loop():
x = 0
while True:
x = x ^ 1
# 多线程死循环在多核CPU下 只能占用100%左右
for i in range(multiprocessing.cpu_count()):
t = threading.Thread(target=loop)
t.start()
# 解决pyhon GIL问题的办法
# 1.使用用多进程: 多进程不存在这个问题 可以占到 CPU核数*100%
p = multiprocessing.Pool(multiprocessing.cpu_count())
for i in range(multiprocessing.cpu_count()):
p.apply_async(loop)
p.close()
# p.join()
# 2.使用多线程load C modle 执行
import ctypes
lib = ctypes.cdll.LoadLibrary('./base/16_进程与线程/liba.so')
for i in range(multiprocessing.cpu_count()):
t = threading.Thread(target=lib.loop)
t.start()
| [
"[email protected]"
] | |
815c603a59208f188e4e1771b1bd9f90f728af1e | 24fefc553716d5b6420cd08bcc19d98f78e32a71 | /OSP_1.py | 55497598f971c2187505559c25d856bbccd75a73 | [] | no_license | Raywang0211/Orthogonal-Subspace-Projection | 5e927b1fdcb48f7c2dc3f00425f5ac4fe24c366d | 231ecf355bfc9fcde7216ee348e88db575326a4f | refs/heads/master | 2020-06-17T05:15:05.130439 | 2019-07-08T12:46:02 | 2019-07-08T12:46:02 | 195,808,952 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,164 | py | import numpy as np
import matplotlib.pyplot as plt
from numpy.linalg import inv
from scipy import signal
import random
def Make_signal(s):
t=np.arange(0,1,0.01)*2*np.pi #初始化時間並轉成角度顯示(2p=360度)
sin=1+np.sin(t) #製造正弦波 make sin wave
square=1+signal.square(t) #製造方波 make square wave
triangle=signal.sawtooth(2*np.pi*t) #製造三角波 make triangle
mixA=sin+square #將正弦波及方波混和 mix sin and square
mixB=sin+square+triangle #將正弦波 方波 及 三角波 混和 mix sin square and triangle mix
plt.subplot(221)#============================================= show the mix1
plt.title('mixA')
plt.plot(t,mixA)
plt.subplot(222)
plt.title('mixB')
plt.plot(t,mixB)#============================================= show the mix1
u=np.zeros([100,2],float) #
d=np.zeros([100,1],float)
for i in range(100):
u[i][0]=sin[i]
u[i][1]=square[i]
d[i][0]=triangle[i]
return u,d,np.array(mixA),np.array(mixB)
def Make_PuT(u):
uT=np.transpose(u)
uTu=uT.dot(u)
uTu_inverce=inv(uTu)
# print(uTu_inverce)
uTu_inverce_uT=uTu_inverce.dot(uT)
id_matrix=np.identity(100)
u_uTu_invers_uT=u.dot(uTu_inverce_uT)
PuT=id_matrix-u_uTu_invers_uT #identity matrix - pseudo_inverse
return PuT
def OSP(d,u,x):
PuT=Make_PuT(u)
PuTR=np.transpose(x.dot(PuT)) # R the spectral (PuT*R)
PuTR_TR=np.transpose(d) # transpose of D
OSP_result=PuTR_TR.dot(PuTR) #the result of mix3
return OSP_result
if __name__=='__main__':
sample_signal=100
u , d , mixA , mixB = Make_signal(sample_signal) #call our every signal
sample_number=10 #make 10 random case to detect
input_data=[mixA,mixB]
dict={0:'mixA',1:'mixB'}
seed = [random.randint(0,1) for x in range(sample_number)]
test_space=[dict[i] for i in seed]
print(test_space)
testcase=[input_data[x] for x in seed]
ans=[OSP(d,u,x) for x in testcase]
plt.subplot(223)
plt.plot(ans)
plt.show()
| [
"[email protected]"
] | |
3b0474b73ac383434f9c830cadaaaa8ced9e74bf | 0c9bcafe1ad47b967c51e52f43693ddd8f35c2ef | /myCode.py | b21de49064354b50039e2197eb9c69d083a823bc | [] | no_license | FilipinoJonas/myCode | a5f1b57c5765b17dd653c23f1c12ef861d89874a | efb08cfb4ffaac010ac394cd951d53133a86f99c | refs/heads/master | 2020-03-13T16:12:23.376253 | 2018-04-26T18:02:35 | 2018-04-26T18:12:32 | 131,191,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | print ("H")
print ("E")
print ("L")
print ("L")
print ("O")
print ("")
print ("W")
print ("O")
print ("R")
print ("L")
print ("D")
| [
"[email protected]"
] | |
15fece6a426be27c6103e892920a63d3e689447f | afe810cbf8e8f14a1ece9bbe757e7001701c39a3 | /weatherservice.py | e4441a6887d13ad9db697fb6a3e5be3a9bab28ea | [] | no_license | ginnikhanna/WeatherApp | 3b5a4e5d2ee51681faf54227a08f38dd51c3d961 | c9d6751460e688d891494fec1150367301267eb2 | refs/heads/master | 2021-01-05T05:52:47.709737 | 2020-02-16T20:29:58 | 2020-02-16T20:29:58 | 240,904,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,919 | py | import weatherapiclient
from weatherapiclient import WeatherInfo
import openweather_apiclient
class WeatherService:
def __init__(self, city,
weather_apiclient = openweather_apiclient.OpenWeatherApiClient):
self._city = city
self._weatherapiclient = weather_apiclient
def city(self):
return self._city
def get_weather(self):
weather = self._weatherapiclient.current_weather(self._city)
if weather.temp_unit is 'celsius':
return weather
elif weather.temp_unit is 'kelvin':
return self._convert_from_kelvin(weather)
else:
return self._convert_from_fahrenheit(weather)
def get_temperature(self):
return self.get_weather().temp
def get_temperature_feels_like(self):
return self.get_weather().feels_like
def _convert_from_fahrenheit(self, weather :WeatherInfo):
temp_celsius = int((weather.temp - 32)/1.8)
temp_feels_like = int((weather.feels_like - 32)/1.8)
new_weather = WeatherInfo(temp_celsius,
temp_feels_like,
weather.temp_min,
weather.temp_max,
weather.pressure,
weather.humidity,
'celsius')
return new_weather
def _convert_from_kelvin(self, weather: WeatherInfo):
temp_celsius = int(weather.temp - 273.15)
temp_feels_like = int(weather.feels_like - 273.15)
new_weather = WeatherInfo(temp_celsius,
temp_feels_like,
weather.temp_min,
weather.temp_max,
weather.pressure,
weather.humidity,
'celsius')
return new_weather
| [
"[email protected]"
] | |
7d346bcd38ae3d28823754ad8174065e4599a0a3 | 63cf47ff7a2bf9b1c73d1874dc7182473e392d95 | /0x05-python-exceptions/0-safe_print_list.py | bfbb4f5e9b981e500deff16d25cf09d3a9733514 | [] | no_license | paurbano/holbertonschool-higher_level_programming | cddbf9fd7145e3ba059df3d155312e0d9845abea | 2c055b5240ddd5298996400d8f2a7bc4d33c0ea4 | refs/heads/master | 2020-09-29T04:14:33.125185 | 2020-05-16T05:18:42 | 2020-05-16T05:18:42 | 226,903,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | #!/usr/bin/python3
def safe_print_list(my_list=[], x=0):
try:
cont = 0
while cont < x:
print("{:d}".format(my_list[cont]), end="")
cont = cont + 1
print("")
return cont
except IndexError:
print()
return cont
| [
"[email protected]"
] | |
255f9fd73a162984b372c7afaaba667bb6e51176 | 5a5fd75627d8119cf2484fb319208d482ee86f70 | /Python/forTest.py | 35caa681c568530a0298f44d00bee96e34f8ebfa | [] | no_license | cgscreamer/UdemyProjects | 9fb7daa2e86daf6ee603452f20cbf4d71551bdfb | b0a428fc88de0c1b8c6113ea8a8ca20afe187fa9 | refs/heads/master | 2020-04-16T14:02:01.166670 | 2019-06-04T10:17:08 | 2019-06-04T10:17:08 | 165,652,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | number = "9,234,567,789,567"
cleanedNumber = ""
for char in number:
if char in "0123456789":
cleanedNumber = cleanedNumber + char
newNumber = int(cleanedNumber)
print("The number is {}".format(newNumber)) | [
"[email protected]"
] | |
6538dae5234e2367ccdb23d6f142688adf723e44 | a4c144caa7e002d173641d6e7818b5e7932caf16 | /drf_chart_of_account/migrations/0013_auto_20201015_1924.py | eec7de6dcfe66eac7c041b292ce204d7abe8345f | [
"MIT"
] | permissive | skoobytechforimpact/drf_chart_of_account | 43a4dc55e132b5fb3e40609d9ab6714194cc29ba | a9c8243ed2231d38d7fb4fd5323a9f0ffadab5f2 | refs/heads/master | 2022-12-28T09:05:26.881381 | 2020-10-17T09:59:05 | 2020-10-17T09:59:05 | 302,094,331 | 0 | 0 | null | 2020-10-17T09:59:06 | 2020-10-07T16:22:44 | Python | UTF-8 | Python | false | false | 1,505 | py | # Generated by Django 3.1.2 on 2020-10-15 19:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('drf_chart_of_account', '0012_auto_20201015_1924'),
]
operations = [
migrations.AlterField(
model_name='layerfivemodel',
name='ref_no',
field=models.CharField(default='5ef0cd3d-588d-4af8-8479-68be01a204cd', max_length=80, unique=True, verbose_name='Reference No.'),
),
migrations.AlterField(
model_name='layerfourmodel',
name='ref_no',
field=models.CharField(default='5ef0cd3d-588d-4af8-8479-68be01a204cd', max_length=80, unique=True, verbose_name='Reference No.'),
),
migrations.AlterField(
model_name='layeronemodel',
name='ref_no',
field=models.CharField(default='5ef0cd3d-588d-4af8-8479-68be01a204cd', max_length=80, unique=True, verbose_name='Reference No.'),
),
migrations.AlterField(
model_name='layerthreemodel',
name='ref_no',
field=models.CharField(default='5ef0cd3d-588d-4af8-8479-68be01a204cd', max_length=80, unique=True, verbose_name='Reference No.'),
),
migrations.AlterField(
model_name='layertwomodel',
name='ref_no',
field=models.CharField(default='5ef0cd3d-588d-4af8-8479-68be01a204cd', max_length=80, unique=True, verbose_name='Reference No.'),
),
]
| [
"[email protected]"
] | |
9c3e561c05ad7084a0fc8afc891b85a747eb8b7c | eb4e686d5cc25a48591238e7d42b8c88f0c73700 | /edison/urls.py | 314905bd3c54e4fceb2ee6fa563480c27f29bbae | [] | no_license | ead-ru/edison-test | 933677c72e0d6415404282ecc55c6b233f47995b | d50582b7b3acecb3d0bb74ccb4ce6265458f6850 | refs/heads/main | 2023-06-05T01:50:09.832051 | 2021-06-28T06:49:06 | 2021-06-28T06:49:06 | 380,065,673 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,289 | py | """edison URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.contrib import admin
from django.urls import path, include, re_path
from django.conf.urls.static import static
from django.contrib.staticfiles import views as static_views
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('accounts/', include('django.contrib.auth.urls')),
path('games/', include('games.urls')),
path('users/', include('users.urls')),
path('admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += [re_path(r'^static/(?P<path>.*)$', static_views.serve), ]
| [
"[email protected]"
] | |
ca95ba1e5c60662d2effcbff8f9000c073bb79f8 | c402e39720dce456f97fb596ed555f422c12b181 | /Problems/udemy/reverse_string.py | 57074e9ce2713471b99849e8351339d3697719af | [] | no_license | esau91/Python | af076ab4fd28fdff37cbfa9560769f18292c8a74 | 7df895e86c614bcf5ceb9030c34301a70826b9ae | refs/heads/main | 2021-11-17T14:34:04.584707 | 2021-10-20T04:02:16 | 2021-10-20T04:02:16 | 314,422,938 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | #Reverse a string
def reverse_string(my_string):
my_words = my_string.split(' ')
reversed_words = []
for word in my_words:
reversed_words.insert(0, word[::-1])
return ' '.join(reversed_words)
if __name__ == '__main__':
my_string = 'Hello My Name is Esaú'
print(reverse_string(my_string))
| [
"[email protected]"
] | |
bea952f31b540393fda7966d18a311d387f994f3 | 7edc9a47324f59b5b7299a1665da6d1ea7868d06 | /test.py | a4eff73229ec562136a16da917ea77a25d1c1dbe | [] | no_license | ycdhqzhiai/PaddleOCR-demo | 85adadeaa4aefcc67e17ca74edffa06c314d5401 | 51e045ed5023b3d9cadf3042432e9e7836ef922f | refs/heads/main | 2023-03-08T13:17:25.518698 | 2021-02-24T02:51:38 | 2021-02-24T02:51:38 | 341,465,294 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,478 | py | import argparse
import yaml
import cv2
import numpy as np
from PIL import ImageFont, ImageDraw, Image
from core.PaddleOCR import PaddleOCR
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--params", type=str, default='data/params.yaml')
return parser.parse_args()
def draw_result(img, result):
img_rec = np.ones_like(img, np.uint8)*255
img_pil = Image.fromarray(img_rec)
draw = ImageDraw.Draw(img_pil)
fontpath = "font/simsun.ttc"
font = ImageFont.truetype(fontpath, 16)
for info in result:
bbox, rec_info = info
pts=np.array(bbox, np.int32)
pts=pts.reshape((-1,1,2))
cv2.polylines(img,[pts],True,(0,255,0),2)
txt = rec_info[0] + str(rec_info[1])
draw.text(tuple(pts[0][0]), txt, font=font, fill =(0,255,0))
bk_img = np.array(img_pil)
draw_img = np.hstack([img,bk_img])
return draw_img
if __name__ == '__main__':
args = parse_args()
with open(args.params) as f:
data_dict = yaml.load(f, Loader=yaml.FullLoader) # data dict
ocr_engine = PaddleOCR(data_dict)
img = cv2.imread(data_dict['image_dir'])
result = ocr_engine.ocr(img,
det=data_dict['det'],
rec=data_dict['rec'],
cls=data_dict['use_angle_cls'])
draw_img = draw_result(img, result)
cv2.imwrite('result.jpg', draw_img)
cv2.imshow("img", draw_img)
cv2.waitKey(0) | [
"[email protected]"
] | |
70ecfde020b40b26f350d100c40a2ab6967e5e2b | 0b1e91048726d39ae01a08e16e25af80209e09d5 | /src/Qb_node/qbrobotics-qbdevice-ros-internal/qb_device_msgs/catkin_generated/pkg.develspace.context.pc.py | 9c2c34f881f9402a7ca74b7c5c42599a841ad332 | [
"BSD-3-Clause"
] | permissive | CentroEPiaggio/SoftLEGS-ROS_Package | e49e8d61a60c068f15b78c82950beddff30f0280 | 23f0f405dccdec1df43bb2d448e12a37a712ab51 | refs/heads/master | 2020-04-11T04:08:44.328439 | 2019-04-11T14:11:29 | 2019-04-11T14:11:29 | 161,502,560 | 0 | 2 | null | 2019-04-11T14:11:31 | 2018-12-12T14:49:41 | null | UTF-8 | Python | false | false | 665 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/riccardo/catkin_ws/devel/include;/home/riccardo/catkin_ws/src/Qb_node/qbrobotics-qbdevice-ros-internal/qb_device_msgs/include".split(';') if "/home/riccardo/catkin_ws/devel/include;/home/riccardo/catkin_ws/src/Qb_node/qbrobotics-qbdevice-ros-internal/qb_device_msgs/include" != "" else []
PROJECT_CATKIN_DEPENDS = "std_msgs;message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "qb_device_msgs"
PROJECT_SPACE_DIR = "/home/riccardo/catkin_ws/devel"
PROJECT_VERSION = "0.11.3"
| [
"[email protected]"
] | |
a81bd154aac7ad9219f75bf433e0f82c1aaad35e | b6dc025a739f5fe75a6c7dc91f282b87e20edb51 | /Model/NN/ReducedSet/RandSample/urinary_tract/urinary_tract.py | 698a15eb22054f5ed316a7b653339129cfbb1109 | [] | no_license | leon1003/QSMART | edc31dd2c4e6505108180debbe0cca22eff088f7 | 489ac208fe3fd08c7e65375d40367ea52458338e | refs/heads/master | 2023-03-23T19:37:36.780320 | 2021-03-13T19:08:18 | 2021-03-13T19:08:18 | 174,651,478 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,764 | py | from __future__ import division
import jmp_score as jmp
from math import *
import numpy as np
""" ==================================================================
Copyright(C) 2018 SAS Institute Inc.All rights reserved.
Notice:
The following permissions are granted provided that the
above copyright and this notice appear in the score code and
any related documentation. Permission to copy, modify
and distribute the score code generated using
JMP(R) software is limited to customers of SAS Institute Inc. ("SAS")
and successive third parties, all without any warranty, express or
implied, or any other obligation by SAS. SAS and all other SAS
Institute Inc. product and service names are registered
trademarks or trademarks of SAS Institute Inc. in the USA
and other countries. Except as contained in this notice,
the name of the SAS Institute Inc. and JMP shall not be used in
the advertising or promotion of products or services without
prior written authorization from SAS Institute Inc.
================================================================== """
""" Python code generated by JMP v14.1.0 """
def getModelMetadata():
return {"creator": u"Neural", "modelName": u"", "predicted": u"IC50", "table": u"urinary_tract", "version": u"14.1.0", "timestamp": u"2020-09-16T05:27:28Z"}
def getInputMetadata():
return {
u"EXP_ABL1_X_EXP_WASF1": "float",
u"EXP_BMP2K_X_EXP_NUMB": "float",
u"EXP_BMP2K_X_EXP_RALBP1": "float",
u"EXP_COQ8A": "float",
u"EXP_EIF2AK2": "float",
u"EXP_GRK2_X_EXP_OR5AC2": "float",
u"EXP_GRK2_X_EXP_OR6A2": "float",
u"EXP_GRK2_X_EXP_P2RY11": "float",
u"EXP_MAP2K5": "float",
u"EXP_PHKG2_X_EXP_PHKA1": "float",
u"EXP_STK25_X_EXP_PDCD10": "float",
u"EXP_TRPM6": "float",
u"Fingerprint_576": "float",
u"Fingerprint_611": "float",
u"Fingerprint_617": "float",
u"Fingerprint_625": "float",
u"Fingerprint_629": "float",
u"Fingerprint_635": "float",
u"Fingerprint_643": "float",
u"Fingerprint_644": "float",
u"Fingerprint_646": "float",
u"Fingerprint_650": "float",
u"Fingerprint_656": "float",
u"Fingerprint_658": "float",
u"Fingerprint_659": "float",
u"Fingerprint_667": "float",
u"Fingerprint_672": "float",
u"Fingerprint_677": "float",
u"Fingerprint_679": "float",
u"Fingerprint_685": "float",
u"Fingerprint_697": "float",
u"Fingerprint_698": "float",
u"Fingerprint_704": "float",
u"Fingerprint_707": "float",
u"Fingerprint_709": "float",
u"Fingerprint_710": "float",
u"Fingerprint_712": "float",
u"Fingerprint_714": "float",
u"Fingerprint_776": "float",
u"Fingerprint_779": "float",
u"Fingerprint_784": "float",
u"Fingerprint_791": "float",
u"Fingerprint_797": "float",
u"Fingerprint_798": "float",
u"Fingerprint_800": "float",
u"Fingerprint_801": "float",
u"Fingerprint_803": "float",
u"Fingerprint_812": "float",
u"Fingerprint_813": "float",
u"Fingerprint_818": "float",
u"Fingerprint_819": "float",
u"Fingerprint_820": "float",
u"Fingerprint_821": "float",
u"Fingerprint_822": "float",
u"Fingerprint_825": "float",
u"Fingerprint_826": "float",
u"Fingerprint_830": "float",
u"Fingerprint_833": "float",
u"Fingerprint_834": "float",
u"From_Sanger": "float",
u"GO_0090263": "float",
u"PKA_140_POL_X_Fingerprint_646": "float",
u"PKA_252_ASA_X_Fingerprint_576": "float",
u"PKA_265_ASA_X_Fingerprint_659": "float",
u"PKA_265_CSV_X_Fingerprint_659": "float",
u"PKA_265_EXP_X_Fingerprint_659": "float",
u"PKA_265_HYD_X_Fingerprint_659": "float",
u"PKA_265_X_Fingerprint_659": "float"
}
def getOutputMetadata():
return {
u"Predicted IC50_1": "float"
}
def score(indata, outdata):
# H1_1
# H1_2
# H1_3
# H1_4
# H1_5
# H1_6
# H1_7
# H1_8
# H1_9
H1_1 = tanh((-17.4489638289197 + -0.03384063616984 * indata[u"EXP_ABL1_X_EXP_WASF1"] + -0.208638861976683 * indata[u"EXP_BMP2K_X_EXP_NUMB"] + 0.0112268824476951 * indata[u"EXP_BMP2K_X_EXP_RALBP1"] + -2.84486247922276 * indata[u"EXP_COQ8A"] + 1.46439114900473 * indata[u"EXP_EIF2AK2"] + 0.854579596812174 * indata[u"EXP_GRK2_X_EXP_OR5AC2"] + -0.102277427773119 * indata[u"EXP_GRK2_X_EXP_OR6A2"] + 0.186233557052793 * indata[u"EXP_GRK2_X_EXP_P2RY11"] + 1.59976686529793 * indata[u"EXP_MAP2K5"] + -0.0131646940829501 * indata[u"EXP_PHKG2_X_EXP_PHKA1"] + 0.0722827528160879 * indata[u"EXP_STK25_X_EXP_PDCD10"] + 1.59018404018633 * indata[u"EXP_TRPM6"] + 3.72797692945958 * indata[u"Fingerprint_576"] + -1.27695035149801 * indata[u"Fingerprint_611"] + -4.04307959505601 * indata[u"Fingerprint_617"] + -3.30530920008048 * indata[u"Fingerprint_625"] + -2.42676474343724 * indata[u"Fingerprint_629"] + 7.10892020270393 * indata[u"Fingerprint_635"] + 1.37739376524462 * indata[u"Fingerprint_643"] + -1.18991146970314 * indata[u"Fingerprint_644"] + -5.09920200771214 * indata[u"Fingerprint_646"] + -3.23242660804352 * indata[u"Fingerprint_650"] + 0.724196385908929 * indata[u"Fingerprint_656"] + -1.28335167117176 * indata[u"Fingerprint_658"] + 5.16119243246201 * indata[u"Fingerprint_659"] + -2.67257026746771 * indata[u"Fingerprint_667"] + 2.38337139259445 * indata[u"Fingerprint_672"] + -1.00675063304502 * indata[u"Fingerprint_677"] + 1.4057403092101 * indata[u"Fingerprint_679"] + -4.7148635794299 * indata[u"Fingerprint_685"] + -1.45436697250638 * indata[u"Fingerprint_697"] + -2.55442483239196 * indata[u"Fingerprint_698"] + 0.198196456310837 * indata[u"Fingerprint_704"] + 1.6779037429678 * indata[u"Fingerprint_707"] + -5.85535880859836 * indata[u"Fingerprint_709"] + 1.84360285833278 * indata[u"Fingerprint_710"] + 0.732919905273196 * indata[u"Fingerprint_712"] + -2.36687590794109 * indata[u"Fingerprint_714"] + -4.18919871328833 * indata[u"Fingerprint_776"] + 0.16147992603471 * indata[u"Fingerprint_779"] + -1.11951592207865 * indata[u"Fingerprint_784"] + 5.95887555339273 * indata[u"Fingerprint_791"] + -1.03813560156293 * indata[u"Fingerprint_797"] + -4.72508560671303 * indata[u"Fingerprint_798"] + 1.30693199269184 * indata[u"Fingerprint_800"] + 8.15246448323 * indata[u"Fingerprint_801"] + -0.0238341134003004 * indata[u"Fingerprint_803"] + 3.1762434675568 * indata[u"Fingerprint_812"] + -7.03638991089316 * indata[u"Fingerprint_813"] + -1.84078021310589 * indata[u"Fingerprint_818"] + -6.38812597315488 * indata[u"Fingerprint_819"] + 2.35387610871315 * indata[u"Fingerprint_820"] + -0.394640863117228 * indata[u"Fingerprint_821"] + -1.80235508239219 * indata[u"Fingerprint_822"] + -8.36188375640651 * indata[u"Fingerprint_825"] + -1.6322817681689 * indata[u"Fingerprint_826"] + -4.55751148765807 * indata[u"Fingerprint_830"] + -0.190923465919727 * indata[u"Fingerprint_833"] + 4.23496663893606 * indata[u"Fingerprint_834"] + 1.4748403456486 * indata[u"From_Sanger"] + 0.40160490216547 * indata[u"GO_0090263"] + -1.49583052356579 * indata[u"PKA_140_POL_X_Fingerprint_646"] + -0.00236364131765016 * indata[u"PKA_252_ASA_X_Fingerprint_576"] + 0.822697278045898 * indata[u"PKA_265_ASA_X_Fingerprint_659"] + 5.11342730975192 * indata[u"PKA_265_CSV_X_Fingerprint_659"] + -0.158228094742972 * indata[u"PKA_265_EXP_X_Fingerprint_659"] + -11.1801992984983 * indata[u"PKA_265_HYD_X_Fingerprint_659"] + 4.59466841919073 * indata[u"PKA_265_X_Fingerprint_659"]))
H1_2 = tanh((1.1484942089278 + -0.0162618858922618 * indata[u"EXP_ABL1_X_EXP_WASF1"] + -0.0829724176196426 * indata[u"EXP_BMP2K_X_EXP_NUMB"] + 0.0347543326274715 * indata[u"EXP_BMP2K_X_EXP_RALBP1"] + -1.03533156619363 * indata[u"EXP_COQ8A"] + -0.686945891047771 * indata[u"EXP_EIF2AK2"] + -0.389285200844639 * indata[u"EXP_GRK2_X_EXP_OR5AC2"] + 0.100264698140447 * indata[u"EXP_GRK2_X_EXP_OR6A2"] + 0.105946899508099 * indata[u"EXP_GRK2_X_EXP_P2RY11"] + -0.239932235355953 * indata[u"EXP_MAP2K5"] + 0.0413006905598837 * indata[u"EXP_PHKG2_X_EXP_PHKA1"] + 0.0895647637187592 * indata[u"EXP_STK25_X_EXP_PDCD10"] + 1.3280160951838 * indata[u"EXP_TRPM6"] + 2.11861822753346 * indata[u"Fingerprint_576"] + -0.0350731611982862 * indata[u"Fingerprint_611"] + 5.36480417297934 * indata[u"Fingerprint_617"] + -3.70468778388924 * indata[u"Fingerprint_625"] + -2.60761526504623 * indata[u"Fingerprint_629"] + -3.68429899972343 * indata[u"Fingerprint_635"] + 2.68762193354246 * indata[u"Fingerprint_643"] + -2.19869451052286 * indata[u"Fingerprint_644"] + -0.477852258458967 * indata[u"Fingerprint_646"] + -5.26542720567128 * indata[u"Fingerprint_650"] + -0.296332752939709 * indata[u"Fingerprint_656"] + -4.58337701068446 * indata[u"Fingerprint_658"] + 3.04258773587418 * indata[u"Fingerprint_659"] + -6.08564109433186 * indata[u"Fingerprint_667"] + 1.67599449036197 * indata[u"Fingerprint_672"] + 0.856659973259716 * indata[u"Fingerprint_677"] + -5.18274747032375 * indata[u"Fingerprint_679"] + 1.92550451329634 * indata[u"Fingerprint_685"] + 0.0334642749284108 * indata[u"Fingerprint_697"] + 2.47674238247782 * indata[u"Fingerprint_698"] + -1.11727363683382 * indata[u"Fingerprint_704"] + 3.20644445933306 * indata[u"Fingerprint_707"] + -5.27207277915552 * indata[u"Fingerprint_709"] + 4.1200507535679 * indata[u"Fingerprint_710"] + -2.56452564621884 * indata[u"Fingerprint_712"] + -2.16120496430367 * indata[u"Fingerprint_714"] + -4.39339962657206 * indata[u"Fingerprint_776"] + -0.338272351992453 * indata[u"Fingerprint_779"] + 3.43724831075093 * indata[u"Fingerprint_784"] + 3.80919927303853 * indata[u"Fingerprint_791"] + 6.1851892040954 * indata[u"Fingerprint_797"] + -5.22642039710149 * indata[u"Fingerprint_798"] + 2.61275359580007 * indata[u"Fingerprint_800"] + -6.7769743763636 * indata[u"Fingerprint_801"] + -0.534728049841321 * indata[u"Fingerprint_803"] + -1.65519584422273 * indata[u"Fingerprint_812"] + 5.20213938583666 * indata[u"Fingerprint_813"] + -0.653665234449419 * indata[u"Fingerprint_818"] + -2.92716584214204 * indata[u"Fingerprint_819"] + -4.05834657810018 * indata[u"Fingerprint_820"] + 4.21932331113117 * indata[u"Fingerprint_821"] + -2.20647426183707 * indata[u"Fingerprint_822"] + 3.59406619010863 * indata[u"Fingerprint_825"] + -3.30486044747335 * indata[u"Fingerprint_826"] + 0.278491157489118 * indata[u"Fingerprint_830"] + 1.93876027096119 * indata[u"Fingerprint_833"] + 4.85264323866483 * indata[u"Fingerprint_834"] + 5.37880642519667 * indata[u"From_Sanger"] + -1.0058134286674 * indata[u"GO_0090263"] + 1.14379428245569 * indata[u"PKA_140_POL_X_Fingerprint_646"] + -0.122333417968633 * indata[u"PKA_252_ASA_X_Fingerprint_576"] + -1.33799296372706 * indata[u"PKA_265_ASA_X_Fingerprint_659"] + 5.01530971378832 * indata[u"PKA_265_CSV_X_Fingerprint_659"] + -0.157116747376008 * indata[u"PKA_265_EXP_X_Fingerprint_659"] + 10.7875829247962 * indata[u"PKA_265_HYD_X_Fingerprint_659"] + -0.643590402379127 * indata[u"PKA_265_X_Fingerprint_659"]))
H1_3 = tanh((-13.0941070403987 + 0.000568930885183191 * indata[u"EXP_ABL1_X_EXP_WASF1"] + 0.121860751321488 * indata[u"EXP_BMP2K_X_EXP_NUMB"] + -0.0891484569633032 * indata[u"EXP_BMP2K_X_EXP_RALBP1"] + 1.17796827929625 * indata[u"EXP_COQ8A"] + 2.01349326741265 * indata[u"EXP_EIF2AK2"] + 0.647036484636149 * indata[u"EXP_GRK2_X_EXP_OR5AC2"] + -0.0432273254575424 * indata[u"EXP_GRK2_X_EXP_OR6A2"] + -0.0412474009096219 * indata[u"EXP_GRK2_X_EXP_P2RY11"] + -1.17169861238972 * indata[u"EXP_MAP2K5"] + 0.0445346271803952 * indata[u"EXP_PHKG2_X_EXP_PHKA1"] + 0.063768459442039 * indata[u"EXP_STK25_X_EXP_PDCD10"] + -2.63683780625312 * indata[u"EXP_TRPM6"] + -0.758070202703703 * indata[u"Fingerprint_576"] + -5.51920619863918 * indata[u"Fingerprint_611"] + 0.652670352785891 * indata[u"Fingerprint_617"] + 10.1722508129093 * indata[u"Fingerprint_625"] + 9.19017791601047 * indata[u"Fingerprint_629"] + 4.90769959106624 * indata[u"Fingerprint_635"] + 1.32217897702809 * indata[u"Fingerprint_643"] + 0.10145973051792 * indata[u"Fingerprint_644"] + -1.10104131324684 * indata[u"Fingerprint_646"] + -0.29471410675063 * indata[u"Fingerprint_650"] + -0.299118558914153 * indata[u"Fingerprint_656"] + -0.134641074796954 * indata[u"Fingerprint_658"] + -0.611047190569502 * indata[u"Fingerprint_659"] + 3.6064346103244 * indata[u"Fingerprint_667"] + -2.73872183310964 * indata[u"Fingerprint_672"] + 3.70350734366661 * indata[u"Fingerprint_677"] + -0.758556854523389 * indata[u"Fingerprint_679"] + -1.76158938202798 * indata[u"Fingerprint_685"] + -0.564913694767512 * indata[u"Fingerprint_697"] + -0.513474058099561 * indata[u"Fingerprint_698"] + -2.38936298298804 * indata[u"Fingerprint_704"] + -0.334883084027033 * indata[u"Fingerprint_707"] + -7.20756788279249 * indata[u"Fingerprint_709"] + 1.71986853431909 * indata[u"Fingerprint_710"] + -0.177133351016768 * indata[u"Fingerprint_712"] + 5.32073513156621 * indata[u"Fingerprint_714"] + -0.0660423803464357 * indata[u"Fingerprint_776"] + -2.35410302140335 * indata[u"Fingerprint_779"] + -2.27392552947926 * indata[u"Fingerprint_784"] + -0.716921916159666 * indata[u"Fingerprint_791"] + -3.84700550971286 * indata[u"Fingerprint_797"] + 0.437646222118036 * indata[u"Fingerprint_798"] + 2.26141715520416 * indata[u"Fingerprint_800"] + -3.11527800547533 * indata[u"Fingerprint_801"] + 3.53477478471799 * indata[u"Fingerprint_803"] + 2.66435334953056 * indata[u"Fingerprint_812"] + -7.22848366703184 * indata[u"Fingerprint_813"] + -2.40278429877311 * indata[u"Fingerprint_818"] + 0.948148803995416 * indata[u"Fingerprint_819"] + -2.44316220455496 * indata[u"Fingerprint_820"] + -0.044877505140785 * indata[u"Fingerprint_821"] + -5.29461533905343 * indata[u"Fingerprint_822"] + -8.4174135262537 * indata[u"Fingerprint_825"] + -2.29408936209563 * indata[u"Fingerprint_826"] + -2.42986745446713 * indata[u"Fingerprint_830"] + -3.20236830868059 * indata[u"Fingerprint_833"] + 0.324349579009568 * indata[u"Fingerprint_834"] + 0.790586622661874 * indata[u"From_Sanger"] + 0.647835167316177 * indata[u"GO_0090263"] + -1.32835517611991 * indata[u"PKA_140_POL_X_Fingerprint_646"] + 0.102395058104991 * indata[u"PKA_252_ASA_X_Fingerprint_576"] + -0.875492275938549 * indata[u"PKA_265_ASA_X_Fingerprint_659"] + 8.51070952302791 * indata[u"PKA_265_CSV_X_Fingerprint_659"] + 0.46267938474533 * indata[u"PKA_265_EXP_X_Fingerprint_659"] + 10.5529436337986 * indata[u"PKA_265_HYD_X_Fingerprint_659"] + -0.228484884473288 * indata[u"PKA_265_X_Fingerprint_659"]))
H1_4 = tanh((-11.1605071745731 + 0.176016599576983 * indata[u"EXP_ABL1_X_EXP_WASF1"] + -0.112240882622797 * indata[u"EXP_BMP2K_X_EXP_NUMB"] + -0.029960630762318 * indata[u"EXP_BMP2K_X_EXP_RALBP1"] + -0.558045715469371 * indata[u"EXP_COQ8A"] + 0.46636557213299 * indata[u"EXP_EIF2AK2"] + -0.0618781122626189 * indata[u"EXP_GRK2_X_EXP_OR5AC2"] + -0.499070332320946 * indata[u"EXP_GRK2_X_EXP_OR6A2"] + -0.303473881160214 * indata[u"EXP_GRK2_X_EXP_P2RY11"] + 1.98518482903797 * indata[u"EXP_MAP2K5"] + -0.231470128338823 * indata[u"EXP_PHKG2_X_EXP_PHKA1"] + 0.0367988943136206 * indata[u"EXP_STK25_X_EXP_PDCD10"] + 4.9492703679406 * indata[u"EXP_TRPM6"] + -3.17035864724319 * indata[u"Fingerprint_576"] + -3.80230755588932 * indata[u"Fingerprint_611"] + -6.45960814767695 * indata[u"Fingerprint_617"] + -0.0812221190437555 * indata[u"Fingerprint_625"] + -5.06061419002125 * indata[u"Fingerprint_629"] + -4.48453091038449 * indata[u"Fingerprint_635"] + -0.283172979794127 * indata[u"Fingerprint_643"] + 3.02142460436491 * indata[u"Fingerprint_644"] + 1.16696269376958 * indata[u"Fingerprint_646"] + -4.13712651528967 * indata[u"Fingerprint_650"] + 1.48881776251114 * indata[u"Fingerprint_656"] + 1.1481174352067 * indata[u"Fingerprint_658"] + 2.47556288383927 * indata[u"Fingerprint_659"] + -4.92046935169853 * indata[u"Fingerprint_667"] + -1.78952572750795 * indata[u"Fingerprint_672"] + 1.08915267739236 * indata[u"Fingerprint_677"] + 5.30660608131007 * indata[u"Fingerprint_679"] + 4.40292672440906 * indata[u"Fingerprint_685"] + -2.85214985194855 * indata[u"Fingerprint_697"] + 0.46428668249775 * indata[u"Fingerprint_698"] + -0.813832825414222 * indata[u"Fingerprint_704"] + 2.00710561885785 * indata[u"Fingerprint_707"] + -7.71010035880635 * indata[u"Fingerprint_709"] + 4.44841259828568 * indata[u"Fingerprint_710"] + 0.704053459346146 * indata[u"Fingerprint_712"] + 4.75143090845596 * indata[u"Fingerprint_714"] + 0.3508662786825 * indata[u"Fingerprint_776"] + -0.853954268798953 * indata[u"Fingerprint_779"] + 4.94753325050791 * indata[u"Fingerprint_784"] + -1.23726890617328 * indata[u"Fingerprint_791"] + -1.17388281490852 * indata[u"Fingerprint_797"] + 3.30050297761045 * indata[u"Fingerprint_798"] + 1.62108863575325 * indata[u"Fingerprint_800"] + 3.86187273542481 * indata[u"Fingerprint_801"] + 0.357654353247603 * indata[u"Fingerprint_803"] + 0.667842394540125 * indata[u"Fingerprint_812"] + 3.12615120919728 * indata[u"Fingerprint_813"] + -2.49541270387021 * indata[u"Fingerprint_818"] + -3.44162532456695 * indata[u"Fingerprint_819"] + -0.931104554931348 * indata[u"Fingerprint_820"] + 3.41573650501573 * indata[u"Fingerprint_821"] + -6.69044773200817 * indata[u"Fingerprint_822"] + -3.10658822508563 * indata[u"Fingerprint_825"] + -2.31784436478966 * indata[u"Fingerprint_826"] + 3.66013672212917 * indata[u"Fingerprint_830"] + 0.563593396728432 * indata[u"Fingerprint_833"] + 3.23942481155161 * indata[u"Fingerprint_834"] + 1.38085621383956 * indata[u"From_Sanger"] + -2.35120636416128 * indata[u"GO_0090263"] + 0.498511977576595 * indata[u"PKA_140_POL_X_Fingerprint_646"] + -0.0760250214916024 * indata[u"PKA_252_ASA_X_Fingerprint_576"] + -0.1043005465958 * indata[u"PKA_265_ASA_X_Fingerprint_659"] + -3.69254866250787 * indata[u"PKA_265_CSV_X_Fingerprint_659"] + -3.50668967359532 * indata[u"PKA_265_EXP_X_Fingerprint_659"] + -15.3626158134211 * indata[u"PKA_265_HYD_X_Fingerprint_659"] + -6.1331137103435 * indata[u"PKA_265_X_Fingerprint_659"]))
H1_5 = tanh((-26.0485970114031 + 0.0331413942345078 * indata[u"EXP_ABL1_X_EXP_WASF1"] + 0.0346721495740685 * indata[u"EXP_BMP2K_X_EXP_NUMB"] + 0.0546729895157554 * indata[u"EXP_BMP2K_X_EXP_RALBP1"] + 2.46370301542054 * indata[u"EXP_COQ8A"] + 0.407473368180815 * indata[u"EXP_EIF2AK2"] + 0.791697049302302 * indata[u"EXP_GRK2_X_EXP_OR5AC2"] + 0.68164358465167 * indata[u"EXP_GRK2_X_EXP_OR6A2"] + -0.162797474848947 * indata[u"EXP_GRK2_X_EXP_P2RY11"] + 0.92782818275872 * indata[u"EXP_MAP2K5"] + -0.138246565074377 * indata[u"EXP_PHKG2_X_EXP_PHKA1"] + -0.0841843869265326 * indata[u"EXP_STK25_X_EXP_PDCD10"] + -1.10848470532257 * indata[u"EXP_TRPM6"] + -1.15636231494057 * indata[u"Fingerprint_576"] + 0.281315474746373 * indata[u"Fingerprint_611"] + -1.41801506442782 * indata[u"Fingerprint_617"] + -2.19209598928381 * indata[u"Fingerprint_625"] + 0.917552027523811 * indata[u"Fingerprint_629"] + 6.56762574608614 * indata[u"Fingerprint_635"] + 1.2263944200165 * indata[u"Fingerprint_643"] + -4.19077587597146 * indata[u"Fingerprint_644"] + -3.67012326925445 * indata[u"Fingerprint_646"] + 3.38590117810992 * indata[u"Fingerprint_650"] + -1.63018732915361 * indata[u"Fingerprint_656"] + -0.194349148482062 * indata[u"Fingerprint_658"] + -5.56581160238152 * indata[u"Fingerprint_659"] + -1.53459339152853 * indata[u"Fingerprint_667"] + -2.59958380541973 * indata[u"Fingerprint_672"] + -2.45037168013498 * indata[u"Fingerprint_677"] + -3.1704149936032 * indata[u"Fingerprint_679"] + 7.52699204135275 * indata[u"Fingerprint_685"] + 2.34184615273399 * indata[u"Fingerprint_697"] + 0.966202321236134 * indata[u"Fingerprint_698"] + 0.917890647435834 * indata[u"Fingerprint_704"] + 2.18186924608136 * indata[u"Fingerprint_707"] + 0.714697746498247 * indata[u"Fingerprint_709"] + 6.67521610475157 * indata[u"Fingerprint_710"] + 1.28854936637591 * indata[u"Fingerprint_712"] + 1.57428220088359 * indata[u"Fingerprint_714"] + 0.478567827387103 * indata[u"Fingerprint_776"] + -2.59281444305733 * indata[u"Fingerprint_779"] + 0.845987985919079 * indata[u"Fingerprint_784"] + -0.174551114790143 * indata[u"Fingerprint_791"] + -2.14288008246369 * indata[u"Fingerprint_797"] + 0.361149741642908 * indata[u"Fingerprint_798"] + 4.88597551423651 * indata[u"Fingerprint_800"] + -5.27431655691027 * indata[u"Fingerprint_801"] + -1.28398064164104 * indata[u"Fingerprint_803"] + 4.6266882629994 * indata[u"Fingerprint_812"] + -10.6331264135529 * indata[u"Fingerprint_813"] + -4.67081006350422 * indata[u"Fingerprint_818"] + 7.50933428761638 * indata[u"Fingerprint_819"] + 0.759403988010153 * indata[u"Fingerprint_820"] + 2.13146759806505 * indata[u"Fingerprint_821"] + -5.55129428911944 * indata[u"Fingerprint_822"] + 7.99156679501701 * indata[u"Fingerprint_825"] + -0.518231665746639 * indata[u"Fingerprint_826"] + -0.517032992141317 * indata[u"Fingerprint_830"] + -4.158510318919 * indata[u"Fingerprint_833"] + -1.97865391197133 * indata[u"Fingerprint_834"] + -1.64556565005169 * indata[u"From_Sanger"] + 0.822669939166975 * indata[u"GO_0090263"] + -0.375737318099766 * indata[u"PKA_140_POL_X_Fingerprint_646"] + -0.691028649462493 * indata[u"PKA_252_ASA_X_Fingerprint_576"] + 0.662850745894362 * indata[u"PKA_265_ASA_X_Fingerprint_659"] + -2.08903562862821 * indata[u"PKA_265_CSV_X_Fingerprint_659"] + 0.492812687333098 * indata[u"PKA_265_EXP_X_Fingerprint_659"] + 6.58387096613873 * indata[u"PKA_265_HYD_X_Fingerprint_659"] + 2.18374093182191 * indata[u"PKA_265_X_Fingerprint_659"]))
H1_6 = tanh((-16.4877218165091 + -0.147344003793052 * indata[u"EXP_ABL1_X_EXP_WASF1"] + -0.00903144836277075 * indata[u"EXP_BMP2K_X_EXP_NUMB"] + 0.106402358719174 * indata[u"EXP_BMP2K_X_EXP_RALBP1"] + 2.45059799405463 * indata[u"EXP_COQ8A"] + -0.865054787026143 * indata[u"EXP_EIF2AK2"] + 1.58888164327313 * indata[u"EXP_GRK2_X_EXP_OR5AC2"] + -1.07704420875003 * indata[u"EXP_GRK2_X_EXP_OR6A2"] + 1.195918557426 * indata[u"EXP_GRK2_X_EXP_P2RY11"] + 1.88012995895299 * indata[u"EXP_MAP2K5"] + -0.0732293981875795 * indata[u"EXP_PHKG2_X_EXP_PHKA1"] + 0.0875477185800213 * indata[u"EXP_STK25_X_EXP_PDCD10"] + -7.7708556996494 * indata[u"EXP_TRPM6"] + 0.41156366548254 * indata[u"Fingerprint_576"] + 2.02913848385012 * indata[u"Fingerprint_611"] + 1.44829651270362 * indata[u"Fingerprint_617"] + 2.51935945019468 * indata[u"Fingerprint_625"] + 5.59737313725016 * indata[u"Fingerprint_629"] + 3.62490167703353 * indata[u"Fingerprint_635"] + -1.46789138347118 * indata[u"Fingerprint_643"] + 1.22142174385628 * indata[u"Fingerprint_644"] + 1.14549275279329 * indata[u"Fingerprint_646"] + 5.43892488732554 * indata[u"Fingerprint_650"] + 3.67179715096721 * indata[u"Fingerprint_656"] + -1.23295219218177 * indata[u"Fingerprint_658"] + -1.7474934001924 * indata[u"Fingerprint_659"] + -7.0995782733703 * indata[u"Fingerprint_667"] + 2.95947485061653 * indata[u"Fingerprint_672"] + 4.41295193086363 * indata[u"Fingerprint_677"] + -3.58264239062851 * indata[u"Fingerprint_679"] + 5.32552946655342 * indata[u"Fingerprint_685"] + -0.459264732161095 * indata[u"Fingerprint_697"] + 1.10137102546857 * indata[u"Fingerprint_698"] + 3.3709787763769 * indata[u"Fingerprint_704"] + 2.09915869113592 * indata[u"Fingerprint_707"] + 1.05331708815621 * indata[u"Fingerprint_709"] + -8.0196848563701 * indata[u"Fingerprint_710"] + -0.0525164832301289 * indata[u"Fingerprint_712"] + 3.2642328525921 * indata[u"Fingerprint_714"] + -1.97733541224889 * indata[u"Fingerprint_776"] + 1.46442643565701 * indata[u"Fingerprint_779"] + -3.2028829150695 * indata[u"Fingerprint_784"] + -0.607453867918589 * indata[u"Fingerprint_791"] + 1.68087882015833 * indata[u"Fingerprint_797"] + 2.01887501302137 * indata[u"Fingerprint_798"] + 1.69163118582029 * indata[u"Fingerprint_800"] + -13.8305362881614 * indata[u"Fingerprint_801"] + -0.812676385726852 * indata[u"Fingerprint_803"] + -0.993260514965094 * indata[u"Fingerprint_812"] + 5.21593496353491 * indata[u"Fingerprint_813"] + 3.28736100959644 * indata[u"Fingerprint_818"] + -5.42899552382674 * indata[u"Fingerprint_819"] + -4.61862024726102 * indata[u"Fingerprint_820"] + -0.0664665762878172 * indata[u"Fingerprint_821"] + -9.07021382309341 * indata[u"Fingerprint_822"] + -3.57514590509758 * indata[u"Fingerprint_825"] + 1.07918436995299 * indata[u"Fingerprint_826"] + 0.348504978219423 * indata[u"Fingerprint_830"] + -7.88821236783586 * indata[u"Fingerprint_833"] + 2.62866074390727 * indata[u"Fingerprint_834"] + -1.75231567063239 * indata[u"From_Sanger"] + 0.578206673030751 * indata[u"GO_0090263"] + 0.389605446335451 * indata[u"PKA_140_POL_X_Fingerprint_646"] + -0.0974332053142329 * indata[u"PKA_252_ASA_X_Fingerprint_576"] + 1.954618830946 * indata[u"PKA_265_ASA_X_Fingerprint_659"] + -1.49151906348657 * indata[u"PKA_265_CSV_X_Fingerprint_659"] + -0.26851975372125 * indata[u"PKA_265_EXP_X_Fingerprint_659"] + -2.15792080737236 * indata[u"PKA_265_HYD_X_Fingerprint_659"] + 1.96655232321754 * indata[u"PKA_265_X_Fingerprint_659"]))
H1_7 = tanh((-40.2102714377397 + 0.129157165630173 * indata[u"EXP_ABL1_X_EXP_WASF1"] + -0.395010166709183 * indata[u"EXP_BMP2K_X_EXP_NUMB"] + 0.0134135349227413 * indata[u"EXP_BMP2K_X_EXP_RALBP1"] + 3.61129489446125 * indata[u"EXP_COQ8A"] + 0.418972612936108 * indata[u"EXP_EIF2AK2"] + 0.189637990342247 * indata[u"EXP_GRK2_X_EXP_OR5AC2"] + 1.43546262919402 * indata[u"EXP_GRK2_X_EXP_OR6A2"] + -0.563788281945154 * indata[u"EXP_GRK2_X_EXP_P2RY11"] + 0.983218949517404 * indata[u"EXP_MAP2K5"] + -0.110531907587942 * indata[u"EXP_PHKG2_X_EXP_PHKA1"] + -0.000755874736391343 * indata[u"EXP_STK25_X_EXP_PDCD10"] + 1.68882990656312 * indata[u"EXP_TRPM6"] + 0.761486822540793 * indata[u"Fingerprint_576"] + 0.0879340475934506 * indata[u"Fingerprint_611"] + -2.56000617017738 * indata[u"Fingerprint_617"] + 1.12747439245435 * indata[u"Fingerprint_625"] + -1.39053729570057 * indata[u"Fingerprint_629"] + 5.86818005688356 * indata[u"Fingerprint_635"] + 2.23099376997261 * indata[u"Fingerprint_643"] + 2.27654257923562 * indata[u"Fingerprint_644"] + 2.65714074912595 * indata[u"Fingerprint_646"] + -5.36624952912004 * indata[u"Fingerprint_650"] + -5.3644007202718 * indata[u"Fingerprint_656"] + -0.399194385295165 * indata[u"Fingerprint_658"] + -1.72288148116535 * indata[u"Fingerprint_659"] + 3.26466143227626 * indata[u"Fingerprint_667"] + -0.197483178972642 * indata[u"Fingerprint_672"] + 11.3353362456324 * indata[u"Fingerprint_677"] + -3.14162471512631 * indata[u"Fingerprint_679"] + 0.0289386131420476 * indata[u"Fingerprint_685"] + 0.861581776760126 * indata[u"Fingerprint_697"] + 3.30224538381924 * indata[u"Fingerprint_698"] + 2.00843605681069 * indata[u"Fingerprint_704"] + 2.6190358157179 * indata[u"Fingerprint_707"] + 0.368948379823907 * indata[u"Fingerprint_709"] + 3.66294862527094 * indata[u"Fingerprint_710"] + -3.22716600615212 * indata[u"Fingerprint_712"] + -0.700218821838725 * indata[u"Fingerprint_714"] + -0.597985082633684 * indata[u"Fingerprint_776"] + -0.347496496886741 * indata[u"Fingerprint_779"] + -0.773960544682966 * indata[u"Fingerprint_784"] + 3.74740172883821 * indata[u"Fingerprint_791"] + 3.50884092637044 * indata[u"Fingerprint_797"] + 0.698377915994339 * indata[u"Fingerprint_798"] + 2.46612114352196 * indata[u"Fingerprint_800"] + 5.18930777710737 * indata[u"Fingerprint_801"] + -1.86544104482818 * indata[u"Fingerprint_803"] + -7.01117554187091 * indata[u"Fingerprint_812"] + -3.71314850872473 * indata[u"Fingerprint_813"] + -2.80710798447972 * indata[u"Fingerprint_818"] + -0.806245900733964 * indata[u"Fingerprint_819"] + -1.99433427208779 * indata[u"Fingerprint_820"] + 0.164133504243564 * indata[u"Fingerprint_821"] + -1.69826296472606 * indata[u"Fingerprint_822"] + 4.02001386112892 * indata[u"Fingerprint_825"] + -0.892685284485493 * indata[u"Fingerprint_826"] + -3.05332426019241 * indata[u"Fingerprint_830"] + -0.981368989890439 * indata[u"Fingerprint_833"] + 2.39708592378143 * indata[u"Fingerprint_834"] + -3.4045947743675 * indata[u"From_Sanger"] + 0.62355602810359 * indata[u"GO_0090263"] + -2.07449709700857 * indata[u"PKA_140_POL_X_Fingerprint_646"] + -0.555035441897514 * indata[u"PKA_252_ASA_X_Fingerprint_576"] + 1.91669355154768 * indata[u"PKA_265_ASA_X_Fingerprint_659"] + 3.48645421256402 * indata[u"PKA_265_CSV_X_Fingerprint_659"] + 0.00905014101270096 * indata[u"PKA_265_EXP_X_Fingerprint_659"] + 20.5796567969056 * indata[u"PKA_265_HYD_X_Fingerprint_659"] + -2.38581476720624 * indata[u"PKA_265_X_Fingerprint_659"]))
H1_8 = tanh((-49.3554966584402 + -0.0954345529930431 * indata[u"EXP_ABL1_X_EXP_WASF1"] + 0.0326839334148208 * indata[u"EXP_BMP2K_X_EXP_NUMB"] + 0.0180536090323104 * indata[u"EXP_BMP2K_X_EXP_RALBP1"] + -0.85888186718544 * indata[u"EXP_COQ8A"] + 1.72516712647535 * indata[u"EXP_EIF2AK2"] + 0.413024400484081 * indata[u"EXP_GRK2_X_EXP_OR5AC2"] + 0.912723840220051 * indata[u"EXP_GRK2_X_EXP_OR6A2"] + -0.360759605205901 * indata[u"EXP_GRK2_X_EXP_P2RY11"] + 1.91178544777373 * indata[u"EXP_MAP2K5"] + 0.0314349871902432 * indata[u"EXP_PHKG2_X_EXP_PHKA1"] + 0.0989792626246006 * indata[u"EXP_STK25_X_EXP_PDCD10"] + 0.597653226188894 * indata[u"EXP_TRPM6"] + 1.58956418979806 * indata[u"Fingerprint_576"] + 1.46375025222909 * indata[u"Fingerprint_611"] + 5.40000713448029 * indata[u"Fingerprint_617"] + -2.15164341751039 * indata[u"Fingerprint_625"] + 4.21383667831044 * indata[u"Fingerprint_629"] + -1.03939931501941 * indata[u"Fingerprint_635"] + -2.05600852315238 * indata[u"Fingerprint_643"] + 1.21462170023416 * indata[u"Fingerprint_644"] + 2.37144983454974 * indata[u"Fingerprint_646"] + 0.0845976046926738 * indata[u"Fingerprint_650"] + 1.81954769848885 * indata[u"Fingerprint_656"] + 1.38782445352937 * indata[u"Fingerprint_658"] + 3.21597127479634 * indata[u"Fingerprint_659"] + 5.23174494699555 * indata[u"Fingerprint_667"] + -0.401195347239477 * indata[u"Fingerprint_672"] + 1.63360756607459 * indata[u"Fingerprint_677"] + 17.0027200573104 * indata[u"Fingerprint_679"] + 4.51465713931606 * indata[u"Fingerprint_685"] + 2.17711818044357 * indata[u"Fingerprint_697"] + 1.51985551960078 * indata[u"Fingerprint_698"] + 1.45260089599339 * indata[u"Fingerprint_704"] + 3.11031973507035 * indata[u"Fingerprint_707"] + -6.36435013509164 * indata[u"Fingerprint_709"] + -0.452674256428442 * indata[u"Fingerprint_710"] + -6.64228829771112 * indata[u"Fingerprint_712"] + 1.82110110857119 * indata[u"Fingerprint_714"] + -1.71624205317893 * indata[u"Fingerprint_776"] + 2.27340231243943 * indata[u"Fingerprint_779"] + -3.13557588381317 * indata[u"Fingerprint_784"] + -5.68922211046879 * indata[u"Fingerprint_791"] + 1.17555382880294 * indata[u"Fingerprint_797"] + 2.16037287181947 * indata[u"Fingerprint_798"] + 1.3176468458715 * indata[u"Fingerprint_800"] + -1.39222477883734 * indata[u"Fingerprint_801"] + -3.38684995181639 * indata[u"Fingerprint_803"] + -7.10433994103236 * indata[u"Fingerprint_812"] + -7.81669707773047 * indata[u"Fingerprint_813"] + 0.807699329210617 * indata[u"Fingerprint_818"] + 0.0539155779860813 * indata[u"Fingerprint_819"] + -0.0244637727785953 * indata[u"Fingerprint_820"] + -4.05180123601464 * indata[u"Fingerprint_821"] + 6.49015545457672 * indata[u"Fingerprint_822"] + -12.1417344129344 * indata[u"Fingerprint_825"] + 7.74409637190409 * indata[u"Fingerprint_826"] + -3.63759489387357 * indata[u"Fingerprint_830"] + 9.25227253286294 * indata[u"Fingerprint_833"] + -3.0326898939039 * indata[u"Fingerprint_834"] + -4.76573589812309 * indata[u"From_Sanger"] + -0.129552187845017 * indata[u"GO_0090263"] + 0.223467390365249 * indata[u"PKA_140_POL_X_Fingerprint_646"] + 0.119489518342948 * indata[u"PKA_252_ASA_X_Fingerprint_576"] + 2.50167696786905 * indata[u"PKA_265_ASA_X_Fingerprint_659"] + 7.41464029784492 * indata[u"PKA_265_CSV_X_Fingerprint_659"] + 0.509368082621991 * indata[u"PKA_265_EXP_X_Fingerprint_659"] + -21.6654819584849 * indata[u"PKA_265_HYD_X_Fingerprint_659"] + 1.90917032059619 * indata[u"PKA_265_X_Fingerprint_659"]))
H1_9 = tanh((96.4648466035971 + -0.242040239169766 * indata[u"EXP_ABL1_X_EXP_WASF1"] + 0.0684395761444439 * indata[u"EXP_BMP2K_X_EXP_NUMB"] + 0.0225305650046428 * indata[u"EXP_BMP2K_X_EXP_RALBP1"] + -0.336804979684824 * indata[u"EXP_COQ8A"] + -0.29331231149263 * indata[u"EXP_EIF2AK2"] + -0.665111578097951 * indata[u"EXP_GRK2_X_EXP_OR5AC2"] + -0.566747554999088 * indata[u"EXP_GRK2_X_EXP_OR6A2"] + 0.441835231233196 * indata[u"EXP_GRK2_X_EXP_P2RY11"] + -6.91430559280384 * indata[u"EXP_MAP2K5"] + 0.166885288683713 * indata[u"EXP_PHKG2_X_EXP_PHKA1"] + -0.113718385348762 * indata[u"EXP_STK25_X_EXP_PDCD10"] + -15.2590724537449 * indata[u"EXP_TRPM6"] + -1.47592094617421 * indata[u"Fingerprint_576"] + -3.99403249046543 * indata[u"Fingerprint_611"] + -1.35153610330597 * indata[u"Fingerprint_617"] + 6.05540848541821 * indata[u"Fingerprint_625"] + 2.17092499301711 * indata[u"Fingerprint_629"] + -0.687165240905041 * indata[u"Fingerprint_635"] + -0.532221665659998 * indata[u"Fingerprint_643"] + 3.75198616759509 * indata[u"Fingerprint_644"] + -1.6999140208278 * indata[u"Fingerprint_646"] + -0.230495664023956 * indata[u"Fingerprint_650"] + 1.34939311868337 * indata[u"Fingerprint_656"] + 0.537448183735122 * indata[u"Fingerprint_658"] + 2.50046328081472 * indata[u"Fingerprint_659"] + 1.20126819111821 * indata[u"Fingerprint_667"] + 0.658417490583646 * indata[u"Fingerprint_672"] + -2.61322899338759 * indata[u"Fingerprint_677"] + -7.54433149789617 * indata[u"Fingerprint_679"] + 0.225616528564852 * indata[u"Fingerprint_685"] + 4.36777355165044 * indata[u"Fingerprint_697"] + 1.08448680192337 * indata[u"Fingerprint_698"] + -1.69904715519135 * indata[u"Fingerprint_704"] + -0.167441293970086 * indata[u"Fingerprint_707"] + 1.05325138550523 * indata[u"Fingerprint_709"] + 1.24285312499201 * indata[u"Fingerprint_710"] + -2.43118835774479 * indata[u"Fingerprint_712"] + -1.89537466193851 * indata[u"Fingerprint_714"] + 3.93524666084115 * indata[u"Fingerprint_776"] + -1.48238338150549 * indata[u"Fingerprint_779"] + 0.124991528615425 * indata[u"Fingerprint_784"] + -0.860486842671832 * indata[u"Fingerprint_791"] + 0.0154833829486507 * indata[u"Fingerprint_797"] + 5.21985971518557 * indata[u"Fingerprint_798"] + -4.45497395123947 * indata[u"Fingerprint_800"] + 6.20262038437467 * indata[u"Fingerprint_801"] + 5.69000146259566 * indata[u"Fingerprint_803"] + -1.70302030322866 * indata[u"Fingerprint_812"] + 8.61514035777202 * indata[u"Fingerprint_813"] + -4.08109235695615 * indata[u"Fingerprint_818"] + 7.19526525774381 * indata[u"Fingerprint_819"] + -2.49907970478611 * indata[u"Fingerprint_820"] + 1.33785486800663 * indata[u"Fingerprint_821"] + -3.01188111532797 * indata[u"Fingerprint_822"] + 14.0509690796081 * indata[u"Fingerprint_825"] + 1.92217645341121 * indata[u"Fingerprint_826"] + 1.82324778860255 * indata[u"Fingerprint_830"] + 0.822273843167425 * indata[u"Fingerprint_833"] + 7.34690920074209 * indata[u"Fingerprint_834"] + -0.351386871300694 * indata[u"From_Sanger"] + -1.19999109206893 * indata[u"GO_0090263"] + -0.960292109125361 * indata[u"PKA_140_POL_X_Fingerprint_646"] + -0.182090884182275 * indata[u"PKA_252_ASA_X_Fingerprint_576"] + 0.233422334486717 * indata[u"PKA_265_ASA_X_Fingerprint_659"] + 10.7471462671223 * indata[u"PKA_265_CSV_X_Fingerprint_659"] + -0.217021744010681 * indata[u"PKA_265_EXP_X_Fingerprint_659"] + -0.00770240307786153 * indata[u"PKA_265_HYD_X_Fingerprint_659"] + -0.432977400939573 * indata[u"PKA_265_X_Fingerprint_659"]))
outdata[u"Predicted IC50_1"] = 2.35201493592557 + 1.26810528750201 * H1_1 + -1.19477373312071 * H1_2 + -0.81157819938711 * H1_3 + 0.74945458463904 * H1_4 + 1.14579216381282 * H1_5 + -0.725707869504081 * H1_6 + -0.558535572433184 * H1_7 + 0.958219843383584 * H1_8 + 0.841951708395723 * H1_9
return outdata[u"Predicted IC50_1"]
| [
"[email protected]"
] | |
236a021fff78d1002577bb143ebe22a10e70a9c0 | b5ba12d4dcb240ba6069964380f6a3aede79f448 | /assessments/migrations/0001_initial.py | 2239db1cf11d367868abd78fefe5fa79e660e53b | [] | no_license | 70-6C-65-61-73-75-72-65h/erp | 9e1a6f20a15d16794043f583022b1e04a9435b20 | 0e088c767d0d0c0e5515be703ed71252d55b70d9 | refs/heads/master | 2022-03-27T21:12:52.305257 | 2019-12-17T15:41:59 | 2019-12-17T15:41:59 | 224,333,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,123 | py | # Generated by Django 2.2.6 on 2019-12-03 17:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('company_operations', '0001_initial'),
('accounts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Assessment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('assess', models.IntegerField()),
('created', models.DateField()),
('client', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assesments', to='accounts.Client')),
('pharmacy', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assesments', to='company_operations.WareHouse')),
('worker', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assesments', to='accounts.Worker')),
],
),
]
| [
"[email protected]"
] | |
d29d485bf02a0c8cf68758d59a9ad5251572abcd | 5bd69424f2f526d2a3d396121477eaa8ecab3605 | /python3/machine_learning/tensor/deep_network_XOR/deep_network.py | 516efbf1d36edcc4310e05fae5daa1c23cf8318a | [] | no_license | Sn-Kinos/toy_box | c7aec0f7e67c4e41d1eaff255cf7c64b44b926f6 | bccfa3012e6ece58cd2a50940d2ecced03e4d38c | refs/heads/master | 2020-05-27T09:31:16.440670 | 2017-06-17T19:05:52 | 2017-06-17T19:05:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,531 | py | import tensorflow as tf
import numpy as np
xy = np.loadtxt('train.txt', unpack=True, dtype='float32')
x_data = np.transpose(xy[0:-1])
y_data = np.reshape(xy[-1], (4, 1))
print(x_data)
print(y_data)
X = tf.placeholder(tf.float32, name='X-input')
Y = tf.placeholder(tf.float32, name='Y-input')
y_hist = tf.histogram_summary('y', Y)
W1 = tf.Variable(tf.random_uniform([2, 5], -1.0, 1.0), name='Weight1')
W2 = tf.Variable(tf.random_uniform([5, 4], -1.0, 1.0), name='Weight2')
W3 = tf.Variable(tf.random_uniform([4, 1], -1.0, 1.0), name='Weight3')
w1_hist = tf.histogram_summary('weights1', W1)
w2_hist = tf.histogram_summary('weights2', W2)
w3_hist = tf.histogram_summary('weights3', W3)
b1 = tf.Variable(tf.zeros([5]), name="Bias1")
b2 = tf.Variable(tf.zeros([4]), name="Bias2")
b3 = tf.Variable(tf.zeros([1]), name="Bias3")
b1_hist = tf.histogram_summary('biases1', b1)
b2_hist = tf.histogram_summary('biases2', b2)
b3_hist = tf.histogram_summary('biases3', b3)
with tf.name_scope('layer2') as scope:
L2 = tf.sigmoid(tf.matmul(X, W1) + b1)
with tf.name_scope('layer3') as scope:
L3 = tf.sigmoid(tf.matmul(L2, W2) + b2)
with tf.name_scope('layer4') as scope:
hypothesis = tf.sigmoid(tf.matmul(L3, W3) + b3)
with tf.name_scope('cost') as scope:
cost = -tf.reduce_mean(Y*tf.log(hypothesis) + (1-Y)*tf.log(1-hypothesis))
cost_summ = tf.scalar_summary('cost', cost)
a = tf.Variable(0.1)
with tf.name_scope('train') as scope:
optimizer = tf.train.GradientDescentOptimizer(a)
train = optimizer.minimize(cost)
correct_prediction = tf.equal(tf.floor(hypothesis+0.5), Y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
accuracy_summ = tf.scalar_summary('accuracy', accuracy)
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
# python3 /usr/local/bin/tensorboard --logdir=./logs/xor_logs/
merged = tf.merge_all_summaries()
writer = tf.train.SummaryWriter("./logs/xor_logs", sess.graph_def)
for step in range(10000):
sess.run(train, feed_dict={X:x_data, Y:y_data})
if step % 1000 == 0:
summary = sess.run(merged, feed_dict={X:x_data, Y:y_data})
writer.add_summary(summary, step)
print(step, sess.run(cost, feed_dict={X:x_data, Y:y_data}), sess.run(W1), sess.run(W2), sess.run(W3))
# Test model
print(sess.run([hypothesis, tf.floor(hypothesis+0.5), correct_prediction, accuracy], feed_dict={X:x_data, Y:y_data}))
print("Accuracy: ", accuracy.eval({X:x_data, Y:y_data}))
| [
"[email protected]"
] | |
5e281f3953e260e9561c2040ebfd0bee86bdbbbe | d306ab21f0960a5c44012ee84bc795284415cac8 | /seventeenth_of_december.py | 5483d0ce4adfaeb4bd350365d1b18976ed1990ed | [] | no_license | MartinMekk/julekalender | 64d2ba01f3f780b447231a7a582f33e770ee64a8 | 4f20b96f77cb3e2801977128d3e464c494618520 | refs/heads/master | 2020-05-24T14:02:54.312528 | 2014-12-24T18:30:25 | 2014-12-24T18:30:25 | 27,596,079 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,054 | py | __author__ = 'martinsolheim'
import copy
def state_machine(from_state):
if from_state == 1:
return 6, 8
elif from_state == 2:
return 7, 9
elif from_state == 3:
return 4, 8
elif from_state == 4:
return 0, 3, 9
elif from_state == 6:
return 0, 1, 7
elif from_state == 7:
return 2, 6
elif from_state == 8:
return 1, 3
elif from_state == 9:
return 2, 4
elif from_state == 0:
return 4, 6
else:
return -1
node_list = [[1]]
while len(node_list[0]) < 10:
list_length = len(node_list)
for i in range(0, list_length):
sub_list = node_list[i]
next_move = list(state_machine(sub_list[-1]))
new_node = copy.copy(sub_list)
new_node.append(next_move[1])
node_list.append(new_node)
if len(next_move) > 2:
new_node = copy.copy(sub_list)
new_node.append(next_move[2])
node_list.append(new_node)
sub_list.append(next_move[0])
print(len(node_list)) | [
"[email protected]"
] |
Subsets and Splits