blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ed3ddb6a7b66c75f3fd5a5d1e046abf2a0cd5b28 | b6d461ca9f5183e8ceb427d3d1bd41cb9af63c22 | /CommitmentSchemeAttack.py | a57a5d61332d24bc47ba5080cf7ecc2c4c677182 | [] | no_license | CarlTern/Commitment-scheme-attack | adb8e8a745c6f1cd48ba5107683d16b2282e0bd6 | f48034dcd2de4b40dc47cb07a8e0e17b462122df | refs/heads/master | 2022-07-19T23:52:51.433411 | 2020-01-26T12:56:46 | 2020-01-26T12:56:46 | 226,065,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,749 | py | import hashlib
import matplotlib.pyplot as plot
import random
def makeHash(k , v):
bitString =str(v) + str(bin(k)[2:])
md5Hash = hashlib.md5(bitString.encode()).hexdigest()
return (bin(int(md5Hash, 16))[2:]).zfill(128)
def truncate(bitString, outputSize):
return bitString[:outputSize]
def createCommitments():
# First we create the different commitments.
commitmentsIsVote0 = list()
commitmentsIsVote1 = list()
for k in range(0, pow(2,16)):
commitmentsIsVote0.append(makeHash(k, 0))
commitmentsIsVote1.append(makeHash(k, 1))
print("succesfully created hashes!")
return commitmentsIsVote0, commitmentsIsVote1
# The receiver of the commitment performs the attack
#We want to prove a a collition between ANY hashes since then we can change the vote.
def conceilingAttack(commitmentsIsVote0, commitmentsIsVote1):
x = list()
y = list()
#The start of the sumilation. For every size of hash, let's simulate.
for sizeOfHash in range(1, 129): # As MD5 has 128 bit output.
print("Current size:", sizeOfHash)
x.append(sizeOfHash)
hashes = dict()
for i in range(pow(2, 16)):
truncatedHash0 = truncate(commitmentsIsVote0[i], sizeOfHash)
truncatedHash1 = truncate(commitmentsIsVote1[i], sizeOfHash)
if(truncatedHash0 in hashes):
hashes[truncatedHash0][0] +=1
else:
hashes[truncatedHash0] = [1, 0]
#let's check vote 1 too.
if(truncatedHash1 in hashes):
hashes[truncatedHash1][1] +=1
else:
hashes[truncatedHash1] = [0, 1]
hashesWithoutCollisions = 0
for hash in hashes:
if(hashes[hash][0] == 0 or hashes[hash][1] == 0): # If no collisions => we can break the conceiling.
hashesWithoutCollisions += 1
y.append(hashesWithoutCollisions / len(hashes))
plot.plot(x, y)
plot.xlabel('Size of hash')
plot.ylabel('Probability of breaking concealing')
plot.title('Simulation')
plot.show()
#The creator of the commitment performs the attack
# We want to be certain of the vote howto? If many collisions => Hard to be certain of the vote.
def bindingAttack(commitmentsIsVote0, commitmentsIsVote1):
x = list()
y = list()
#The start of the sumilation. For every size of hash, let's simulate.
for sizeOfHash in range(1, 129): # As MD5 has 128 bit output.
print("Current size:", sizeOfHash)
x.append(sizeOfHash)
hasCollision = 0 # Either 0% or 100%
hashes = dict()
for i in range(pow(2, 16)):
truncatedHash0 = truncate(commitmentsIsVote0[i], sizeOfHash)
truncatedHash1 = truncate(commitmentsIsVote1[i], sizeOfHash)
if(truncatedHash0 in hashes):
hashes[truncatedHash0][0] +=1
else:
hashes[truncatedHash0] = [1, 0]
#let's check vote 1 too.
if(truncatedHash1 in hashes):
hashes[truncatedHash1][1] +=1
else:
hashes[truncatedHash1] = [0, 1]
for hash in hashes:
if(hashes[hash][0] > 0 and hashes[hash][1] > 0):
hasCollision = 1
break
y.append(hasCollision)
plot.plot(x, y)
plot.xlabel('Size of hash')
plot.ylabel('Probability of breaking binding')
plot.title('Simulation')
plot.show()
if __name__ == '__main__':
commitmentsIsVote0, commitmentsIsVote1 = createCommitments()
#bindingAttack(commitmentsIsVote0, commitmentsIsVote1)
conceilingAttack(commitmentsIsVote0, commitmentsIsVote1) | [
"[email protected]"
] | |
075238930b25156a7223cb7e2965790dda93916e | d1ac937ec8064c994cda762a91d6bcf441cbed1e | /p3.py | 6e146b16a737fca1dbcf38cce40eefec6f11678e | [] | no_license | ali7697/principles-of-artificial-intelligence-project1 | a3d63351b7fa02ee6a01758227ae6856b22ae65d | 37e116d37eb3c142a14a15158c26501bd8437ae8 | refs/heads/main | 2023-07-11T10:00:51.267964 | 2021-08-23T11:58:09 | 2021-08-23T11:58:09 | 397,610,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,871 | py | from copy import deepcopy
infile = r"E:\test.txt"
state = []
explored = []
frontier = []
node_tolid_shode = 0
node_bast_dade_shode = 0
def print_state(printed_state):
for column in range(k):
if printed_state[column][0]:
for indd in range(len(printed_state[column][0])):
print(printed_state[column][0][indd], end='')
print(printed_state[column][1][indd], end='')
print(" ", end='')
print(" ")
else:
print('#')
print("depth = ", end='')
print(printed_state[k])
print(" ")
def move_to_next_state(st1, mv):
st = deepcopy(st1)
source_column = mv[0]
destination_column = mv[1]
# number
st[destination_column][0].append(st[source_column][0][-1])
# column
st[destination_column][1].append(st[source_column][1][-1])
st[source_column][0].pop(-1)
st[source_column][1].pop(-1)
return st
def goal_check(st):
nums_sorted = [False] * (len(st) - 2)
same_color = [False] * (len(st) - 2)
for index in range(len(st) - 2):
nums = st[index][0][:]
if len(nums) == 0:
same_color[index] = True
nums_sorted[index] = True
continue
if len(nums) != n:
return False
cols = st[index][1][:]
nums.sort(reverse=True)
if nums == st[index][0]:
nums_sorted[index] = True
same_color[index] = all(elem == st[index][1][0] for elem in cols)
final_num_check = all(elem == True for elem in nums_sorted)
final_color_check = all(elem == True for elem in same_color)
if final_color_check and final_num_check:
return True
return False
def next_moves_function(st):
possible_moves = []
for column in range(len(st) - 2):
if st[column][0]:
last_card_num = st[column][0][-1]
for col in range(len(st) - 2):
if col != column:
if st[col][0]:
if last_card_num < st[col][0][-1]:
# we have a move now
possible_moves.append([column, col])
else:
possible_moves.append([column, col])
return possible_moves
def heuristic_calculate(st):
g = 0
for d in range(len(st)-2):
changed = False
column_current_length = len(st[d][0])
# column length
if column_current_length > n:
g = g + (column_current_length - n)
# order of the numbers
for c in range(len(st[d][0])):
if st[d][0] and st[d][0][c] != (n - c):
g = g + (len(st[d][0]) - c)
changed = True
break
# colors
if not changed and st[d][1]:
for c in range(1, len(st[d][1])):
if st[d][1][c] != st[d][1][c-1]:
g = g + (len(st[d][0]) - c)
break
return g
def a_star(st):
global node_tolid_shode
global node_bast_dade_shode
global frontier
done = False
while not done:
next_moves = next_moves_function(st)
for ind in range(len(next_moves)):
tmp_state = move_to_next_state(st, next_moves[ind])
# graph search
tmp_state[k] = tmp_state[k] + 1
tmp_state[k+1] = deepcopy(st)
flag_in_explored = False
for s in explored:
flag = True
for c in range(k):
if s[c] != tmp_state[c]:
flag = False
break
if flag:
flag_in_explored = True
break
if flag_in_explored:
continue
flag_in_frontier = False
for s in frontier:
flag = True
for c in range(k):
if s[c] != tmp_state[c]:
flag = False
break
if flag:
flag_in_frontier = True
the_state = s
break
if flag_in_frontier:
# heuristics are equal
# should just go for tmp_state[k]
if tmp_state[k] < the_state[k]: # is this incorrect?!
frontier.remove(the_state)
frontier.append(deepcopy(tmp_state))
continue
frontier.append(deepcopy(tmp_state))
node_tolid_shode += 1
# get the state with the minimum f + g
min_value_state = frontier[0]
for s in frontier:
cost = heuristic_calculate(s)+s[k]
min_cost = heuristic_calculate(min_value_state) + min_value_state[k]
if cost < min_cost:
min_value_state = s
node_bast_dade_shode += 1
if goal_check(min_value_state):
print("done!")
print(f"node tolid shode: {node_tolid_shode}")
print(f"node bast dade shode: {node_bast_dade_shode}")
printed_states = []
the_s = deepcopy(min_value_state)
while the_s[k + 1] != 0:
printed_states.append(the_s)
the_s = the_s[k + 1]
q = len(printed_states) - 1
print_state(init_state)
while q >= 0:
# print(printed_states[q][0:6])
print_state(printed_states[q])
q -= 1
break
explored.append(deepcopy(min_value_state))
frontier.remove(min_value_state)
st = min_value_state
# reading and processing the inputs
with open(infile) as f:
k, m, n = [int(inp) for inp in next(f).split()]
for i in range(k):
j = 0
tmp = []
numbers = []
colors = []
card = [inp for inp in next(f).split()]
if card != ['#']:
for j in range(len(card) - 1):
color = card[j][-1]
number = card[j][0:-1]
colors.append(color)
numbers.append(int(number))
if len(card) >= 1:
if len(card) == 1:
j = j - 1
card[j + 1] = card[j + 1].split("\n")[0]
color = card[j + 1][-1]
number = card[j + 1][0:-1]
colors.append(color)
numbers.append(int(number))
tmp.append(numbers)
tmp.append(colors)
state.append(tmp)
# appending depth
state.append(0)
# appending the parent
state.append(0)
init_state = deepcopy(state)
r = heuristic_calculate(state)
a_star(state)
| [
"[email protected]"
] | |
4488dbb333b7e96e5ee17647585f22704f56f64e | e2222687047566b36e3c5bb91d6aeb994dab3f26 | /multi-task/cf_vae_cpmf_extend.py | e8882b93f2f5b02ba5c3b9858b6bce1dccb4337c | [] | no_license | zakosai/research | 330266b1d3a68c111eeae3e96f974cd2a7108c65 | cef571724ddb8dfd98a49e7bf393e67009238164 | refs/heads/master | 2022-06-27T08:37:33.832990 | 2020-05-08T00:30:02 | 2020-05-08T00:30:02 | 120,154,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,131 | py | import tensorflow as tf
import os
from tensorbayes.layers import dense, placeholder, conv2d, conv2d_transpose, max_pool
from keras.backend import binary_crossentropy
import numpy as np
import time
import scipy
import scipy.io as sio
import math
import tensorflow.contrib.layers as slim
class params:
def __init__(self):
self.C_a = 1.0
self.C_b = 0.01
self.lambda_u = 0.1
self.lambda_v = 1.0
self.lambda_r = 1.0
self.max_iter_m = 1
# for updating W and b in vae
self.learning_rate = 0.001
self.batch_size = 500
self.num_iter = 300 # used in the e_step
self.EM_iter = 30
self.weight_decay = 2e-4
class cf_vae_extend:
def __init__(self, num_users, num_items, num_factors, params, input_dim, encoding_dims, z_dim, decoding_dims,
encoding_dims_str=None, decoding_dims_str=None, loss_type="cross_entropy", useTranse = False,
eps=1e-10, model=0, ckpt_folder='pre_model', initial=True, model_mat=None):
self.num_users = num_users
self.num_items = num_items
self.num_factors = num_factors
self.params = params
self.U = 0.1 * np.random.randn(self.num_users, self.num_factors)
self.V = 0.1 * np.random.randn(self.num_items, self.num_factors)
self.exp_z = 0.1 * np.random.rand(self.num_items, self.num_factors)
self.exp_z_im = 0.1 * np.random.rand(self.num_items, self.num_factors)
self.input_dim = input_dim
self.z_dim = z_dim
self.encoding_dims = encoding_dims
self.decoding_dims = decoding_dims
self.encoding_dims_str = encoding_dims_str
self.decoding_dims_str = decoding_dims_str
self.loss_type = loss_type
self.useTranse = useTranse
self.eps = eps
self.initial = initial
self.input_width = 32
self.input_height = 32
self.channel = 3
self.num_conv = 4
self.intermediate_dim = 256
self.filter = 64
self.model = model
self.ckpt_model = ckpt_folder
print(self.params.EM_iter)
if self.initial == False:
self.load_model(model_mat)
# def e_step(self, x_data, reuse = None):
def e_step(self, x_data, im_data, str_data):
print "e_step finetuning"
tf.reset_default_graph()
self.x_ = placeholder((None, self.input_dim)) # we need these global nodes
self.v_ = placeholder((None, self.num_factors))
# inference process
if self.model != 6:
with tf.variable_scope("text"):
x = self.x_
depth_inf = len(self.encoding_dims)
#x = tf.layers.dropout(x, rate=0.3)
# noisy_level = 1
# x = x + noisy_level*tf.random_normal(tf.shape(x))
reg_loss = 0
for i in range(depth_inf):
x = dense(x, self.encoding_dims[i], scope="enc_layer"+"%s" %i, activation=tf.nn.sigmoid)
#x = tf.nn.sigmoid(x+x1)
# x = slim.fully_connected(x, self.encoding_dims[i], activation_fn=tf.nn.sigmoid, scope="enc_layer%s"%i)
# print("enc_layer0/weights:0".graph)
# h_encode = x
# z_mu = dense(h_encode, self.z_dim, scope="mu_layer")
# z_log_sigma_sq = dense(h_encode, self.z_dim, scope = "sigma_layer")
# e = tf.random_normal(tf.shape(z_mu))
# z = z_mu + tf.sqrt(tf.maximum(tf.exp(z_log_sigma_sq), self.eps)) * e
h_encode = x
z_mu = slim.fully_connected(h_encode, self.z_dim, scope="mu_layer")
z_log_sigma_sq = slim.fully_connected(h_encode, self.z_dim, scope="sigma_layer")
e = tf.random_normal(tf.shape(z_mu))
z = z_mu + tf.sqrt(tf.maximum(tf.exp(z_log_sigma_sq), self.eps)) * e
# generative process
depth_gen = len(self.decoding_dims)
y = z
print(self.decoding_dims)
for i in range(depth_gen):
y = dense(y, self.decoding_dims[i], scope="dec_layer"+"%s" %i, activation=tf.nn.sigmoid)
# y = slim.fully_connected(y, self.decoding_dims[i], activation_fn=tf.nn.sigmoid,
# scope="dec_layer%s"%i)
# if last_layer_nonelinear: depth_gen -1
x_recons = y
if self.model == 2 or self.model == 3:
self.x_s_ = placeholder((None, 4526))
with tf.variable_scope("structure"):
x_s = self.x_s_
depth_inf = len(self.encoding_dims_str)
for i in range(depth_inf):
x_s = dense(x_s, self.encoding_dims_str[i], scope="enc_layer"+"%s" %i, activation=tf.nn.sigmoid)
# print("enc_layer0/weights:0".graph)
h_s_encode = x_s
z_s_mu = dense(h_s_encode, self.z_dim, scope="mu_layer")
z_s_log_sigma_sq = dense(h_s_encode, self.z_dim, scope = "sigma_layer")
e_s = tf.random_normal(tf.shape(z_s_mu))
z_s = z_s_mu + tf.sqrt(tf.maximum(tf.exp(z_s_log_sigma_sq), self.eps)) * e_s
# generative process
depth_gen = len(self.decoding_dims_str)
y_s = z_s
for i in range(depth_gen):
y_s = dense(y_s, self.decoding_dims_str[i], scope="dec_layer"+"%s" %i, activation=tf.nn.sigmoid)
# if last_layer_nonelinear: depth_gen -1
x_s_recons = y_s
if self.model == 1 or self.model == 2 or self.model==6:
self.x_im_ = placeholder((None, self.input_width, self.input_height, self.channel))
with tf.variable_scope("image"):
x_im_ = self.x_im_
x_im = x_im_
keep_prob = 0.8
#x_im = tf.layers.dropout(x_im, rate=0.3)
# for i in range(self.num_conv):
# x_im = conv2d(x_im, self.filter * np.power(2, i),kernel_size=(2,2), strides=(2,2), scope="enc_layer"+"%s" %i, activation=tf.nn.relu)
x_im = conv2d(x_im, 32,kernel_size=(3,3), strides=(2,2), scope="enc_layer0", activation=tf.nn.relu)
x_im = tf.nn.dropout(x_im, keep_prob)
x_im = conv2d(x_im, 64,kernel_size=(3,3), strides=(2,2), scope="enc_layer1", activation=tf.nn.relu)
x_im = tf.nn.dropout(x_im, keep_prob)
x_im = conv2d(x_im, 128,kernel_size=(3,3), strides=(2,2), scope="enc_layer2", activation=tf.nn.relu)
x_im = tf.nn.dropout(x_im, keep_prob)
x_im = conv2d(x_im, 256,kernel_size=(3,3), strides=(2,2), scope="enc_layer3", activation=tf.nn.relu)
x_im = tf.nn.dropout(x_im, keep_prob)
x_im = conv2d(x_im, 256,kernel_size=(3,3), strides=(2,2), scope="enc_layer4", activation=tf.nn.relu)
# x_im = conv2d(x_im, 512,kernel_size=(3,3), strides=(2,2), scope="enc_layer5", activation=tf.nn.relu)
# x_im = max_pool(x_im, kernel_size=(3,3), strides=(2,2))
h_im_encode = tf.reshape(x_im, [-1, 256])
z_im_mu = dense(h_im_encode, self.z_dim, scope="mu_layer")
z_im_log_sigma_sq = dense(h_im_encode, self.z_dim, scope = "sigma_layer")
e_im = tf.random_normal(tf.shape(z_im_mu))
z_im = z_im_mu + tf.sqrt(tf.maximum(tf.exp(z_im_log_sigma_sq), self.eps)) * e_im
# generative process
# h_decode = dense(z_im, self.intermediate_dim, activation=tf.nn.relu)
h_upsample = dense(z_im, 256, activation=tf.nn.relu)
y_im = tf.reshape(h_upsample, [-1, 1, 1, 256])
# y_im = conv2d_transpose(y_im, 512, kernel_size=(3,3), strides=(2,2), scope="dec_layer0", activation=tf.nn.relu)
y_im = conv2d_transpose(y_im, 256, kernel_size=(3,3), strides=(2,2), scope="dec_layer1", activation=tf.nn.relu)
y_im = tf.nn.dropout(y_im, keep_prob)
y_im = conv2d_transpose(y_im, 128, kernel_size=(3,3), strides=(2,2), scope="dec_layer2", activation=tf.nn.relu)
y_im = tf.nn.dropout(y_im, keep_prob)
y_im = conv2d_transpose(y_im, 64, kernel_size=(3,3), strides=(2,2), scope="dec_layer3", activation=tf.nn.relu)
y_im = tf.nn.dropout(y_im, keep_prob)
y_im= conv2d_transpose(y_im, 32, kernel_size=(3,3), strides=(2,2), scope="dec_layer4", activation=tf.nn.relu)
y_im = tf.nn.dropout(y_im, keep_prob)
y_im = conv2d_transpose(y_im, 3, kernel_size=(3,3), strides=(2,2), scope="dec_layer5", activation=tf.nn.relu)
x_im_recons = y_im
m = tf.reshape(x_im_, [-1, self.input_width*self.input_height, self.channel])
n = tf.reshape(x_im_recons, [-1, self.input_width*self.input_height, self.channel])
if self.loss_type == "cross_entropy":
if self.model != 6:
loss_recons = tf.reduce_mean(tf.reduce_sum(binary_crossentropy(self.x_, x_recons), axis=1))
loss_kl = 0.5 * tf.reduce_mean(tf.reduce_sum(tf.square(z_mu) + tf.exp(z_log_sigma_sq)
- z_log_sigma_sq - 1, 1))
else:
loss_im_recons = -tf.reduce_mean(tf.reduce_sum(m * tf.log(tf.maximum(n, 1e-10)) + (1-m) * tf.log(tf.maximum(1 - n, 1e-10)),1))
loss_im_kl = 0.5 * tf.reduce_mean(tf.reduce_sum(tf.square(z_im_mu) + tf.exp(z_im_log_sigma_sq) - z_im_log_sigma_sq - 1, 1))
loss_v = 1.0*self.params.lambda_v/self.params.lambda_r * tf.reduce_mean( tf.reduce_sum(tf.square(self.v_ - z_im), 1))
self.loss_e_step = loss_v + loss_im_kl + loss_im_recons
if self.model == 0:
loss_v = 1.0*self.params.lambda_v/self.params.lambda_r * tf.reduce_mean( tf.reduce_sum(tf.square(self.v_ - z), 1))
self.loss_e_step = loss_recons + loss_kl + loss_v + 2e-4*reg_loss
elif self.model == 1:
# loss_im_recons = self.input_width * self.input_height * metrics.binary_crossentropy(K.flatten(x_im_), K.flatten(x_im_recons))
# loss_im_kl = 0.5 * tf.reduce_sum(tf.square(z_im_mu) + tf.exp(z_im_log_sigma_sq) - z_im_log_sigma_sq - 1, 1)
# loss_v = 1.0*self.params.lambda_v/self.params.lambda_r * tf.reduce_mean( tf.reduce_sum(tf.square(self.v_ - z - z_im), 1))
# self.loss_e_step = loss_recons + loss_kl + loss_v + K.mean(loss_im_recons + loss_im_kl)
loss_im_recons = -tf.reduce_mean(tf.reduce_sum(m * tf.log(tf.maximum(n, 1e-10)) + (1-m) * tf.log(tf.maximum(1 - n, 1e-10)),1))
loss_im_kl = 0.5 * tf.reduce_mean(tf.reduce_sum(tf.square(z_im_mu) + tf.exp(z_im_log_sigma_sq) - z_im_log_sigma_sq - 1, 1))
loss_v = 1.0*self.params.lambda_v/self.params.lambda_r * tf.reduce_mean( tf.reduce_sum(tf.square(self.v_ - z - z_im), 1))
self.loss_e_step = loss_v + loss_im_kl + loss_im_recons + loss_kl + loss_recons
elif self.model == 3:
loss_s_recons = tf.reduce_mean(tf.reduce_sum(binary_crossentropy(self.x_s_, x_s_recons), axis=1))
loss_s_kl = 0.5 * tf.reduce_mean(tf.reduce_sum(tf.square(z_s_mu) + tf.exp(z_s_log_sigma_sq) - z_s_log_sigma_sq - 1, 1))
loss_v = 1.0*self.params.lambda_v/self.params.lambda_r * tf.reduce_mean( tf.reduce_sum(tf.square(self.v_ - z - z_s), 1))
self.loss_e_step = loss_recons + loss_kl + loss_s_recons + loss_s_kl + loss_v
elif self.model == 2:
print("abc")
loss_im_recons = -tf.reduce_mean(tf.reduce_sum(m * tf.log(tf.maximum(n, 1e-10)) + (1-m) * tf.log(tf.maximum(1 - n, 1e-10)),1))
loss_im_kl = 0.5 * tf.reduce_mean(tf.reduce_sum(tf.square(z_im_mu) + tf.exp(z_im_log_sigma_sq) - z_im_log_sigma_sq - 1, 1))
loss_s_recons = tf.reduce_mean(tf.reduce_sum(binary_crossentropy(self.x_s_, x_s_recons), axis=1))
loss_s_kl = 0.5 * tf.reduce_mean(tf.reduce_sum(tf.square(z_s_mu) + tf.exp(z_s_log_sigma_sq) - z_s_log_sigma_sq - 1, 1))
loss_v = 1.0*self.params.lambda_v/self.params.lambda_r * tf.reduce_mean( tf.reduce_sum(tf.square(self.v_ - z - z_s - z_im), 1))
self.loss_e_step = loss_recons + loss_kl + loss_s_recons + loss_s_kl + loss_v + loss_im_recons + loss_im_kl
train_op = tf.train.AdamOptimizer(self.params.learning_rate).minimize(self.loss_e_step)
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
# LOAD TEXT#
ckpt = os.path.join(self.ckpt_model, "cvae_%d.ckpt"%self.model)
if self.initial:
if self.model != 6:
ckpt_file = os.path.join(self.ckpt_model, "vae_text.ckpt")
text_varlist = tf.get_collection(tf.GraphKeys.VARIABLES, scope="text")
text_saver = tf.train.Saver(var_list=text_varlist)
# if init == True:
text_saver.restore(self.sess, ckpt_file)
# LOAD IMAGE##
if self.model == 1 or self.model == 2 or self.model == 6:
ckpt_file_img = os.path.join(self.ckpt_model, "vae_image.ckpt")
img_varlist = tf.get_collection(tf.GraphKeys.VARIABLES, scope="image")
img_saver = tf.train.Saver(var_list=img_varlist)
img_saver.restore(self.sess, ckpt_file_img)
# Load Structure
if self.model == 2 or self.model == 3:
ckpt_file = os.path.join(self.ckpt_model, "vae_structure.ckpt")
structure_varlist = tf.get_collection(tf.GraphKeys.VARIABLES, scope="structure")
structure_saver = tf.train.Saver(var_list=structure_varlist)
structure_saver.restore(self.sess, ckpt_file)
self.initial = False
self.saver = tf.train.Saver()
else:
self.saver = tf.train.Saver()
self.saver.restore(self.sess, ckpt)
start = time.time()
for i in range(self.params.num_iter):
idx = np.random.choice(self.num_items, self.params.batch_size, replace=False)
x_batch = x_data[idx]
v_batch = self.V[idx]
if self.model != 0:
img_batch = im_data[idx]
str_batch = str_data[idx]
_, l = self.sess.run((train_op, self.loss_e_step),
feed_dict={self.x_:x_batch, self.v_:v_batch, self.x_s_:str_batch, self.x_im_:img_batch})
else:
_, l = self.sess.run((train_op, self.loss_e_step),
feed_dict={self.x_:x_batch, self.v_:v_batch})
if i % 50 == 0:
print("epoches: %d\t loss: %f\t time: %d s"%(i, l, time.time()-start))
if self.model != 6:
self.z_mu = z_mu
self.x_recons = x_recons
if self.model == 1 or self.model == 2:
self.z_im_mu = z_im_mu
self.x_im_recons = x_im_recons
if self.model == 2 or self.model == 3:
self.z_s_mu = z_s_mu
self.x_s_recons = x_s_recons
self.saver.save(self.sess, ckpt)
return None
def pmf_estimate(self, users, items, params):
"""
users: list of list
"""
min_iter = 1
a_minus_b = params.C_a - params.C_b
converge = 1.0
likelihood_old = 0.0
likelihood = -math.exp(20)
it = 0
while ((it < params.max_iter_m and converge > 1e-6) or it < min_iter):
likelihood_old = likelihood
likelihood = 0
# update U
# VV^T for v_j that has at least one user liked
ids = np.array([len(x) for x in items]) > 0
v = self.V[ids]
VVT = np.dot(v.T, v)
XX = VVT * params.C_b + np.eye(self.z_dim) * params.lambda_u
for i in xrange(self.num_users):
item_ids = users[i]
n = len(item_ids)
if n > 0:
A = np.copy(XX)
A += np.dot(self.V[item_ids, :].T, self.V[item_ids,:])*a_minus_b
x = params.C_a * np.sum(self.V[item_ids, :], axis=0)
self.U[i, :] = scipy.linalg.solve(A, x)
likelihood += -0.5 * params.lambda_u * np.sum(self.U[i]*self.U[i])
# update V
ids = np.array([len(x) for x in users]) > 0
u = self.U[ids]
XX = np.dot(u.T, u) * params.C_b
for j in xrange(self.num_items):
user_ids = items[j]
m = len(user_ids)
if m>0 :
A = np.copy(XX)
A += np.dot(self.U[user_ids,:].T, self.U[user_ids,:])*a_minus_b
B = np.copy(A)
A += np.eye(self.z_dim) * params.lambda_v
if self.model == 1:
x = params.C_a * np.sum(self.U[user_ids, :], axis=0) + params.lambda_v * (self.exp_z[j,:] + self.exp_z_im[j,:])
elif self.model != 6:
x = params.C_a * np.sum(self.U[user_ids, :], axis=0) + params.lambda_v * self.exp_z[j,:]
else:
x = params.C_a * np.sum(self.U[user_ids, :], axis=0) + params.lambda_v * self.exp_z_im[j,:]
self.V[j, :] = scipy.linalg.solve(A, x)
likelihood += -0.5 * m * params.C_a
likelihood += params.C_a * np.sum(np.dot(self.U[user_ids, :], self.V[j,:][:, np.newaxis]),axis=0)
if self.model == 1:
likelihood += -0.5 * self.V[j,:].dot(B).dot((self.V[j,:] - self.exp_z[j,:] - self.exp_z_im[j,:])[:,np.newaxis])
ep = self.V[j,:] - self.exp_z[j,:] - self.exp_z_im[j,:]
elif self.model == 2:
likelihood += -0.5 * self.V[j,:].dot(B).dot((self.V[j,:] - self.exp_z[j,:] - self.exp_z_im[j,:] - self.exp_z_s[j,:])[:,np.newaxis])
ep = self.V[j,:] - self.exp_z[j,:] - self.exp_z_im[j,:] - self.exp_z_s
elif self.model != 6:
likelihood += -0.5 * self.V[j,:].dot(B).dot((self.V[j,:] - self.exp_z[j,:])[:,np.newaxis])
ep = self.V[j,:] - self.exp_z[j,:]
else:
likelihood += -0.5 * self.V[j,:].dot(B).dot((self.V[j,:] - self.exp_z_im[j,:])[:,np.newaxis])
likelihood += -0.5 * params.lambda_v * np.sum(ep*ep)
else:
# m=0, this article has never been rated
A = np.copy(XX)
A += np.eye(self.z_dim) * params.lambda_v
if self.model == 1:
x = params.lambda_v * (self.exp_z[j,:] + self.exp_z_im[j,:])
elif self.model == 2:
x = params.lambda_v * (self.exp_z[j,:] + self.exp_z_im[j,:] + self.exp_z_s[j, :])
elif self.model != 6:
x = params.lambda_v * self.exp_z[j,:]
else:
x = params.lambda_v * self.exp_z_im[j,:]
self.V[j, :] = scipy.linalg.solve(A, x)
if self.model == 1:
ep = self.V[j,:] - self.exp_z[j,:]- self.exp_z_im[j,:]
elif self.model == 2:
ep = self.V[j,:] - self.exp_z[j,:]- self.exp_z_im[j,:] - self.exp_z_s[j, :]
elif self.model != 6:
ep = self.V[j,:] - self.exp_z[j,:]
else:
ep = self.V[j,:] - self.exp_z_im[j,:]
likelihood += -0.5 * params.lambda_v * np.sum(ep*ep)
# computing negative log likelihood
#likelihood += -0.5 * params.lambda_u * np.sum(self.m_U * self.m_U)
#likelihood += -0.5 * params.lambda_v * np.sum(self.m_V * self.m_V)
# split R_ij into 0 and 1
# -sum(0.5*C_ij*(R_ij - u_i^T * v_j)^2) = -sum_ij 1(R_ij=1) 0.5*C_ij +
# sum_ij 1(R_ij=1) C_ij*u_i^T * v_j - 0.5 * sum_j v_j^T * U C_i U^T * v_j
it += 1
converge = abs(1.0*(likelihood - likelihood_old)/likelihood_old)
# if self.verbose:
# if likelihood < likelihood_old:
# print("likelihood is decreasing!")
print("[iter=%04d], likelihood=%.5f, converge=%.10f" % (it, likelihood, converge))
return likelihood
def m_step(self, users, items, params):
num_users = len(users)
num_items = len(items)
print("M-step")
start =time.time()
for i in range(params.max_iter_m):
likelihood = 0
for u in range(num_users):
idx_a = np.ones(num_items) < 0
idx_a[users[u]] = True # pick those rated ids
Lambda_inv = params.C_a * np.dot(self.V[idx_a].T, self.V[idx_a]) + \
params.C_b * np.dot(self.V[~idx_a].T, self.V[~idx_a]) + \
np.eye(self.num_factors) * params.lambda_u
rx = params.C_a * np.sum(self.V[users[u], :], axis=0)
self.U[u, :] = scipy.linalg.solve(Lambda_inv, rx, check_finite=True)
likelihood += -0.5 * params.lambda_u * np.sum(self.U[u] * self.U[u])
for v in range(num_items):
idx_a = np.ones(num_users) < 0
idx_a[items[v]] = True
Lambda_inv = params.C_a * np.dot(self.U[idx_a].T, self.U[idx_a]) + \
params.C_b * np.dot(self.U[~idx_a].T, self.U[~idx_a]) + \
np.eye(self.num_factors) * params.lambda_v
if self.model == 1:
rx = params.C_a * np.sum(self.U[items[v], :], axis=0) + params.lambda_v * (self.exp_z[v, :] + self.exp_z_im[v, :])
elif self.model != 6:
rx = params.C_a * np.sum(self.U[items[v], :], axis=0) + params.lambda_v * self.exp_z[v, :]
else:
rx = params.C_a * np.sum(self.U[items[v], :], axis=0) + params.lambda_v * self.exp_z_im[v, :]
self.V[v, :] = scipy.linalg.solve(Lambda_inv, rx, check_finite=True)
print("iter: %d\t time:%d" %(i, time.time()-start))
return None
def get_exp_hidden(self, x_data, im_data, str_data):
if self.model != 6:
self.exp_z = self.sess.run(self.z_mu, feed_dict={self.x_: x_data})
else:
self.exp_z = 0
if self.model == 1 or self.model == 2 or self.model == 6:
for i in range(len(im_data), self.params.batch_size):
im_batch = im_data[i:i+self.params.batch_size]
exp_z_im = self.sess.run(self.z_im_mu, feed_dict={self.x_im_: im_batch})
self.exp_z_im = np.concatenate((self.exp_z_im, exp_z_im), axis=0)
else:
# print(self.exp_z_im.shape)
self.exp_z_im = 0
if self.model == 2 or self.model == 3:
self.exp_z_s = self.sess.run(self.z_s_mu, feed_dict={self.x_s_: str_data})
else:
self.exp_z_s = 0
return self.exp_z, self.exp_z_im, self.exp_z_s
def fit(self, users, items, x_data, params, test_users, im_data=None, str_data=None, ):
start = time.time()
self.e_step(x_data, im_data, str_data)
self.exp_z, self.exp_z_im, self.exp_z_s = self.get_exp_hidden(x_data, im_data, str_data)
for i in range(params.EM_iter):
print("iter: %d"%i)
self.pmf_estimate(users, items, params)
self.e_step(x_data, im_data, str_data)
self.exp_z, self.exp_z_im, self.exp_z_s = self.get_exp_hidden(x_data, im_data, str_data)
if i%100 == 90:
file = open(os.path.join(self.ckpt_model, "result_type_0_%d.txt"%self.model), 'a')
file.write("---------iter %d--------\n"%i)
pred_all = self.predict_all()
self.predict_val(pred_all, users, test_users, file)
self.save_model(save_path_pmf=os.path.join(self.ckpt_model, "cf_vae_%d_%d.mat"%(self.model, i)))
print(time.time() - start)
file.close()
print("time: %d"%(time.time()-start))
return None
def save_model(self, save_path_pmf):
# self.saver.save(self.sess, save_path_weights)
sio.savemat(save_path_pmf, {"U":self.U, "V":self.V, "Z":self.exp_z, "Z_im":self.exp_z_im})
print "all parameters saved"
def load_model(self, load_path_pmf):
# self.saver.restore(self.sess, load_path_weights)
data = sio.loadmat(load_path_pmf)
try:
self.U = data["U"]
self.V = data["V"]
self.exp_z = data["Z"]
print "model loaded"
except:
self.U = data["m_U"]
self.V = data["m_V"]
self.exp_z = data["m_theta"]
print "model loaded"
def predict(self, pred_all, train_users, test_users, M):
# user_all = map(add, train_users, test_users)
# user_all = np.array(user_all) # item idex from 1
user_all = test_users
ground_tr_num = [len(user) for user in user_all]
pred_all = list(pred_all)
recall_avgs = []
precision_avgs = []
mapk_avgs = []
for m in range(10, 10, 10):
print "m = " + "{:>10d}".format(m) + "done"
recall_vals = []
apk_vals = []
for i in range(len(user_all)):
train = train_users[i]
top_M = list(np.argsort(-pred_all[i])[0:(m +len(train))])
for u in train:
if u in top_M:
top_M.remove(u)
top_M = top_M[:m]
if len(top_M) != m:
print(top_M, train_users[i])
hits = set(top_M) & set(user_all[i]) # item idex from 0
hits_num = len(hits)
try:
recall_val = float(hits_num) / float(ground_tr_num[i])
except:
recall_val = 1
recall_vals.append(recall_val)
# precision = float(hits_num) / float(m)
# precision_vals.append(precision)
recall_avg = np.mean(np.array(recall_vals))
# precision_avg = np.mean(np.array(precision_vals))
# # mapk = ml_metrics.mapk([list(np.argsort(-pred_all[k])) for k in range(len(pred_all)) if len(user_all[k])!= 0],
# # [u for u in user_all if len(u)!=0], m)
mapk = np.mean(np.array(apk_vals))
print recall_avg
recall_avgs.append(recall_avg)
# precision_avgs.append(precision_avg)
mapk_avgs.append(mapk)
return recall_avgs, mapk_avgs
def predict_val(self, pred_all, train_users, test_users, file=None):
user_all = test_users
ground_tr_num = [len(user) for user in user_all]
pred_all = list(pred_all)
recall_avgs = []
precision_avgs = []
mapk_avgs = []
for m in [50]:
print "m = " + "{:>10d}".format(m) + "done"
recall_vals = []
ndcg = []
hit = 0
for i in range(len(user_all)):
top_M = list(np.argsort(-pred_all[i])[0:m])
hits = set(top_M) & set(user_all[i]) # item idex from 0
hits_num = len(hits)
if hits_num > 0:
hit += 1
try:
recall_val = float(hits_num) / float(ground_tr_num[i])
except:
recall_val = 1
recall_vals.append(recall_val)
pred = np.array(pred_all[i])
score = []
for k in range(m):
if top_M[k] in hits:
score.append(1)
else:
score.append(0)
actual = self.dcg_score(score, pred[top_M], m)
best = self.dcg_score(score, score, m)
if best ==0:
ndcg.append(0)
else:
ndcg.append(float(actual)/best)
# precision = float(hits_num) / float(m)
# precision_vals.append(precision)
recall_avg = np.mean(np.array(recall_vals))
# precision_avg = np.mean(np.array(precision_vals))
# mapk = ml_metrics.mapk([list(np.argsort(-pred_all[k])) for k in range(len(pred_all)) if len(user_all[k])!= 0],
# [u for u in user_all if len(u)!=0], m)
print("recall %f, hit: %f, NDCG: %f"%(recall_avg, float(hit)/len(user_all), np.mean(ndcg)))
#print recall_avg
if file != None:
file.write("m = %d, recall = %f\t"%(m, recall_avg))
# precision_avgs.append(precision_avg)
return recall_avg
def dcg_score(self, y_true, y_score, k=5):
"""Discounted cumulative gain (DCG) at rank K.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array, shape = [n_samples, n_classes]
Predicted scores.
k : int
Rank.
Returns
-------
score : float
"""
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
gain = 2 ** y_true - 1
discounts = np.log2(np.arange(len(y_true)) + 2)
return np.sum(gain / discounts)
def predict_all(self):
return np.dot(self.U, (self.V.T))
| [
"[email protected]"
] | |
f99a95378e65d67d88ac1e5b62f8c2a6f5b736e9 | 772a606f1f220f26210eb0ca13e45a61bca2c334 | /manage.py | 524c7dba7dd97f2902aa1a45699488cd417df15f | [] | no_license | buppter/iHome | d5427df5c96cbd05b7acd9bc62c53cdc9519ca7e | b3771b5f35826f0157c86981c74562f5ea78fcef | refs/heads/master | 2020-05-02T21:41:17.845880 | 2019-03-28T15:11:42 | 2019-03-28T15:11:42 | 178,228,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | from ihome import creat_app, db
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
# 创建flask应用对象
app = creat_app('development')
manager = Manager(app)
Migrate(app, db)
manager.add_command("db", MigrateCommand)
if __name__ == '__main__':
manager.run()
| [
"[email protected]"
] | |
e243451ce164809caa479471221ee886f2b8c8da | 41c605bf3a002a757cb2344cff526d7a7ae56ea9 | /plotly/validators/choropleth/unselected/__init__.py | 6b386c7525f160cb5f23f28d158a37c663b847da | [
"MIT"
] | permissive | Jonathan-MW/plotly.py | 9674b90b5de11fd9089e6afefd04b57bc4587829 | 7528c00772f44dee24c0df7e15d70a4852f171a8 | refs/heads/master | 2020-05-30T06:04:13.621478 | 2019-05-31T10:34:15 | 2019-05-31T10:34:15 | 189,571,988 | 2 | 0 | MIT | 2019-05-31T09:59:53 | 2019-05-31T09:59:53 | null | UTF-8 | Python | false | false | 684 | py |
import _plotly_utils.basevalidators
class MarkerValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self,
plotly_name='marker',
parent_name='choropleth.unselected',
**kwargs
):
super(MarkerValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop('data_class_str', 'Marker'),
data_docs=kwargs.pop(
'data_docs', """
opacity
Sets the marker opacity of unselected points,
applied only when a selection exists.
"""
),
**kwargs
)
| [
"[email protected]"
] | |
5eb1bd275b0eeceb4404771f560a4f54233cdbb0 | 92a27a84c0eb107f128334c453a87e05e4b5914e | /sinn.py | d1d32d362dc3873da3c0802b1ca3a10e672c824d | [] | no_license | padmavathi12345/lab1 | 5e102ba4e463539f33cd71a99bd1aeb1a9d0e43c | 52cc2c704b8f67a324fe3c03add35fe84e348b55 | refs/heads/master | 2020-05-16T05:00:06.111227 | 2019-04-22T15:57:15 | 2019-04-22T15:57:15 | 182,797,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | import numpy as np
import matplotlib.pyplot as plt
n=np.arange(0,10,1)
x1=np.sin(2*np.pi*n)
plt.stem(n,x1)
plt.title("sin wave in discrete domain")
plt.xlabel("time")
plt.ylabel("amplitude")
plt.show()
| [
"[email protected]"
] | |
47d8eb371ec3a91267c54e1671e06affa63600a7 | fa1bed2e7ec1cb7bb27b716ab836e33a1065b8a1 | /typy/google/protobuf/descriptor_pb2.py | d5f50028570f6400ecab79fa0b8da524982eb30e | [
"MIT"
] | permissive | ibelie/typy | 4e308e9548edf7b2e38eefdb1039d39715fe9d1c | 3616845fb91459aacd8df6bf82c5d91f4542bee7 | refs/heads/master | 2021-01-23T07:50:31.346947 | 2017-09-04T05:19:59 | 2017-09-04T05:19:59 | 86,455,123 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 77,113 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/descriptor.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from typy.google.protobuf import descriptor as _descriptor
from typy.google.protobuf import message as _message
from typy.google.protobuf import reflection as _reflection
from typy.google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/descriptor.proto',
package='google.protobuf',
syntax='proto2',
serialized_pb=_b('\n google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"G\n\x11\x46ileDescriptorSet\x12\x32\n\x04\x66ile\x18\x01 \x03(\x0b\x32$.google.protobuf.FileDescriptorProto\"\xdb\x03\n\x13\x46ileDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07package\x18\x02 \x01(\t\x12\x12\n\ndependency\x18\x03 \x03(\t\x12\x19\n\x11public_dependency\x18\n \x03(\x05\x12\x17\n\x0fweak_dependency\x18\x0b \x03(\x05\x12\x36\n\x0cmessage_type\x18\x04 \x03(\x0b\x32 .google.protobuf.DescriptorProto\x12\x37\n\tenum_type\x18\x05 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProto\x12\x38\n\x07service\x18\x06 \x03(\x0b\x32\'.google.protobuf.ServiceDescriptorProto\x12\x38\n\textension\x18\x07 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12-\n\x07options\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.FileOptions\x12\x39\n\x10source_code_info\x18\t \x01(\x0b\x32\x1f.google.protobuf.SourceCodeInfo\x12\x0e\n\x06syntax\x18\x0c \x01(\t\"\xf0\x04\n\x0f\x44\x65scriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x34\n\x05\x66ield\x18\x02 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12\x38\n\textension\x18\x06 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12\x35\n\x0bnested_type\x18\x03 \x03(\x0b\x32 .google.protobuf.DescriptorProto\x12\x37\n\tenum_type\x18\x04 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProto\x12H\n\x0f\x65xtension_range\x18\x05 \x03(\x0b\x32/.google.protobuf.DescriptorProto.ExtensionRange\x12\x39\n\noneof_decl\x18\x08 \x03(\x0b\x32%.google.protobuf.OneofDescriptorProto\x12\x30\n\x07options\x18\x07 \x01(\x0b\x32\x1f.google.protobuf.MessageOptions\x12\x46\n\x0ereserved_range\x18\t \x03(\x0b\x32..google.protobuf.DescriptorProto.ReservedRange\x12\x15\n\rreserved_name\x18\n \x03(\t\x1a,\n\x0e\x45xtensionRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\x1a+\n\rReservedRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\"\xbc\x05\n\x14\x46ieldDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x03 \x01(\x05\x12:\n\x05label\x18\x04 \x01(\x0e\x32+.google.protobuf.FieldDescriptorProto.Label\x12\x38\n\x04type\x18\x05 \x01(\x0e\x32*.google.protobuf.FieldDescriptorProto.Type\x12\x11\n\ttype_name\x18\x06 \x01(\t\x12\x10\n\x08\x65xtendee\x18\x02 \x01(\t\x12\x15\n\rdefault_value\x18\x07 \x01(\t\x12\x13\n\x0boneof_index\x18\t \x01(\x05\x12\x11\n\tjson_name\x18\n \x01(\t\x12.\n\x07options\x18\x08 \x01(\x0b\x32\x1d.google.protobuf.FieldOptions\"\xb6\x02\n\x04Type\x12\x0f\n\x0bTYPE_DOUBLE\x10\x01\x12\x0e\n\nTYPE_FLOAT\x10\x02\x12\x0e\n\nTYPE_INT64\x10\x03\x12\x0f\n\x0bTYPE_UINT64\x10\x04\x12\x0e\n\nTYPE_INT32\x10\x05\x12\x10\n\x0cTYPE_FIXED64\x10\x06\x12\x10\n\x0cTYPE_FIXED32\x10\x07\x12\r\n\tTYPE_BOOL\x10\x08\x12\x0f\n\x0bTYPE_STRING\x10\t\x12\x0e\n\nTYPE_GROUP\x10\n\x12\x10\n\x0cTYPE_MESSAGE\x10\x0b\x12\x0e\n\nTYPE_BYTES\x10\x0c\x12\x0f\n\x0bTYPE_UINT32\x10\r\x12\r\n\tTYPE_ENUM\x10\x0e\x12\x11\n\rTYPE_SFIXED32\x10\x0f\x12\x11\n\rTYPE_SFIXED64\x10\x10\x12\x0f\n\x0bTYPE_SINT32\x10\x11\x12\x0f\n\x0bTYPE_SINT64\x10\x12\"C\n\x05Label\x12\x12\n\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n\x0eLABEL_REQUIRED\x10\x02\x12\x12\n\x0eLABEL_REPEATED\x10\x03\"$\n\x14OneofDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x8c\x01\n\x13\x45numDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\x05value\x18\x02 \x03(\x0b\x32).google.protobuf.EnumValueDescriptorProto\x12-\n\x07options\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.EnumOptions\"l\n\x18\x45numValueDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x02 \x01(\x05\x12\x32\n\x07options\x18\x03 \x01(\x0b\x32!.google.protobuf.EnumValueOptions\"\x90\x01\n\x16ServiceDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x36\n\x06method\x18\x02 \x03(\x0b\x32&.google.protobuf.MethodDescriptorProto\x12\x30\n\x07options\x18\x03 \x01(\x0b\x32\x1f.google.protobuf.ServiceOptions\"\xc1\x01\n\x15MethodDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\ninput_type\x18\x02 \x01(\t\x12\x13\n\x0boutput_type\x18\x03 \x01(\t\x12/\n\x07options\x18\x04 \x01(\x0b\x32\x1e.google.protobuf.MethodOptions\x12\x1f\n\x10\x63lient_streaming\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x10server_streaming\x18\x06 \x01(\x08:\x05\x66\x61lse\"\x87\x05\n\x0b\x46ileOptions\x12\x14\n\x0cjava_package\x18\x01 \x01(\t\x12\x1c\n\x14java_outer_classname\x18\x08 \x01(\t\x12\"\n\x13java_multiple_files\x18\n \x01(\x08:\x05\x66\x61lse\x12,\n\x1djava_generate_equals_and_hash\x18\x14 \x01(\x08:\x05\x66\x61lse\x12%\n\x16java_string_check_utf8\x18\x1b \x01(\x08:\x05\x66\x61lse\x12\x46\n\x0coptimize_for\x18\t \x01(\x0e\x32).google.protobuf.FileOptions.OptimizeMode:\x05SPEED\x12\x12\n\ngo_package\x18\x0b \x01(\t\x12\"\n\x13\x63\x63_generic_services\x18\x10 \x01(\x08:\x05\x66\x61lse\x12$\n\x15java_generic_services\x18\x11 \x01(\x08:\x05\x66\x61lse\x12\"\n\x13py_generic_services\x18\x12 \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x17 \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x10\x63\x63_enable_arenas\x18\x1f \x01(\x08:\x05\x66\x61lse\x12\x19\n\x11objc_class_prefix\x18$ \x01(\t\x12\x18\n\x10\x63sharp_namespace\x18% \x01(\t\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\":\n\x0cOptimizeMode\x12\t\n\x05SPEED\x10\x01\x12\r\n\tCODE_SIZE\x10\x02\x12\x10\n\x0cLITE_RUNTIME\x10\x03*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08&\x10\'\"\xe6\x01\n\x0eMessageOptions\x12&\n\x17message_set_wire_format\x18\x01 \x01(\x08:\x05\x66\x61lse\x12.\n\x1fno_standard_descriptor_accessor\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x11\n\tmap_entry\x18\x07 \x01(\x08\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x98\x03\n\x0c\x46ieldOptions\x12:\n\x05\x63type\x18\x01 \x01(\x0e\x32#.google.protobuf.FieldOptions.CType:\x06STRING\x12\x0e\n\x06packed\x18\x02 \x01(\x08\x12?\n\x06jstype\x18\x06 \x01(\x0e\x32$.google.protobuf.FieldOptions.JSType:\tJS_NORMAL\x12\x13\n\x04lazy\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x13\n\x04weak\x18\n \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\"/\n\x05\x43Type\x12\n\n\x06STRING\x10\x00\x12\x08\n\x04\x43ORD\x10\x01\x12\x10\n\x0cSTRING_PIECE\x10\x02\"5\n\x06JSType\x12\r\n\tJS_NORMAL\x10\x00\x12\r\n\tJS_STRING\x10\x01\x12\r\n\tJS_NUMBER\x10\x02*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x8d\x01\n\x0b\x45numOptions\x12\x13\n\x0b\x61llow_alias\x18\x02 \x01(\x08\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"}\n\x10\x45numValueOptions\x12\x19\n\ndeprecated\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"{\n\x0eServiceOptions\x12\x19\n\ndeprecated\x18! \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"z\n\rMethodOptions\x12\x19\n\ndeprecated\x18! \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x9e\x02\n\x13UninterpretedOption\x12;\n\x04name\x18\x02 \x03(\x0b\x32-.google.protobuf.UninterpretedOption.NamePart\x12\x18\n\x10identifier_value\x18\x03 \x01(\t\x12\x1a\n\x12positive_int_value\x18\x04 \x01(\x04\x12\x1a\n\x12negative_int_value\x18\x05 \x01(\x03\x12\x14\n\x0c\x64ouble_value\x18\x06 \x01(\x01\x12\x14\n\x0cstring_value\x18\x07 \x01(\x0c\x12\x17\n\x0f\x61ggregate_value\x18\x08 \x01(\t\x1a\x33\n\x08NamePart\x12\x11\n\tname_part\x18\x01 \x02(\t\x12\x14\n\x0cis_extension\x18\x02 \x02(\x08\"\xd5\x01\n\x0eSourceCodeInfo\x12:\n\x08location\x18\x01 \x03(\x0b\x32(.google.protobuf.SourceCodeInfo.Location\x1a\x86\x01\n\x08Location\x12\x10\n\x04path\x18\x01 \x03(\x05\x42\x02\x10\x01\x12\x10\n\x04span\x18\x02 \x03(\x05\x42\x02\x10\x01\x12\x18\n\x10leading_comments\x18\x03 \x01(\t\x12\x19\n\x11trailing_comments\x18\x04 \x01(\t\x12!\n\x19leading_detached_comments\x18\x06 \x03(\t\"\xa7\x01\n\x11GeneratedCodeInfo\x12\x41\n\nannotation\x18\x01 \x03(\x0b\x32-.google.protobuf.GeneratedCodeInfo.Annotation\x1aO\n\nAnnotation\x12\x10\n\x04path\x18\x01 \x03(\x05\x42\x02\x10\x01\x12\x13\n\x0bsource_file\x18\x02 \x01(\t\x12\r\n\x05\x62\x65gin\x18\x03 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x04 \x01(\x05\x42X\n\x13\x63om.google.protobufB\x10\x44\x65scriptorProtosH\x01Z\ndescriptor\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_FIELDDESCRIPTORPROTO_TYPE = _descriptor.EnumDescriptor(
name='Type',
full_name='google.protobuf.FieldDescriptorProto.Type',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='TYPE_DOUBLE', index=0, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_FLOAT', index=1, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_INT64', index=2, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_UINT64', index=3, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_INT32', index=4, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_FIXED64', index=5, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_FIXED32', index=6, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_BOOL', index=7, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_STRING', index=8, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_GROUP', index=9, number=10,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_MESSAGE', index=10, number=11,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_BYTES', index=11, number=12,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_UINT32', index=12, number=13,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_ENUM', index=13, number=14,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_SFIXED32', index=14, number=15,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_SFIXED64', index=15, number=16,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_SINT32', index=16, number=17,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_SINT64', index=17, number=18,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1553,
serialized_end=1863,
)
_sym_db.RegisterEnumDescriptor(_FIELDDESCRIPTORPROTO_TYPE)
_FIELDDESCRIPTORPROTO_LABEL = _descriptor.EnumDescriptor(
name='Label',
full_name='google.protobuf.FieldDescriptorProto.Label',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='LABEL_OPTIONAL', index=0, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LABEL_REQUIRED', index=1, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LABEL_REPEATED', index=2, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1865,
serialized_end=1932,
)
_sym_db.RegisterEnumDescriptor(_FIELDDESCRIPTORPROTO_LABEL)
_FILEOPTIONS_OPTIMIZEMODE = _descriptor.EnumDescriptor(
name='OptimizeMode',
full_name='google.protobuf.FileOptions.OptimizeMode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='SPEED', index=0, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CODE_SIZE', index=1, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LITE_RUNTIME', index=2, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=3141,
serialized_end=3199,
)
_sym_db.RegisterEnumDescriptor(_FILEOPTIONS_OPTIMIZEMODE)
_FIELDOPTIONS_CTYPE = _descriptor.EnumDescriptor(
name='CType',
full_name='google.protobuf.FieldOptions.CType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STRING', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CORD', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STRING_PIECE', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=3747,
serialized_end=3794,
)
_sym_db.RegisterEnumDescriptor(_FIELDOPTIONS_CTYPE)
_FIELDOPTIONS_JSTYPE = _descriptor.EnumDescriptor(
name='JSType',
full_name='google.protobuf.FieldOptions.JSType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='JS_NORMAL', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JS_STRING', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JS_NUMBER', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=3796,
serialized_end=3849,
)
_sym_db.RegisterEnumDescriptor(_FIELDOPTIONS_JSTYPE)
_FILEDESCRIPTORSET = _descriptor.Descriptor(
name='FileDescriptorSet',
full_name='google.protobuf.FileDescriptorSet',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='file', full_name='google.protobuf.FileDescriptorSet.file', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=53,
serialized_end=124,
)
_FILEDESCRIPTORPROTO = _descriptor.Descriptor(
name='FileDescriptorProto',
full_name='google.protobuf.FileDescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.FileDescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='package', full_name='google.protobuf.FileDescriptorProto.package', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dependency', full_name='google.protobuf.FileDescriptorProto.dependency', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='public_dependency', full_name='google.protobuf.FileDescriptorProto.public_dependency', index=3,
number=10, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='weak_dependency', full_name='google.protobuf.FileDescriptorProto.weak_dependency', index=4,
number=11, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='message_type', full_name='google.protobuf.FileDescriptorProto.message_type', index=5,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='enum_type', full_name='google.protobuf.FileDescriptorProto.enum_type', index=6,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='service', full_name='google.protobuf.FileDescriptorProto.service', index=7,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='extension', full_name='google.protobuf.FileDescriptorProto.extension', index=8,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.FileDescriptorProto.options', index=9,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='source_code_info', full_name='google.protobuf.FileDescriptorProto.source_code_info', index=10,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='syntax', full_name='google.protobuf.FileDescriptorProto.syntax', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=127,
serialized_end=602,
)
_DESCRIPTORPROTO_EXTENSIONRANGE = _descriptor.Descriptor(
name='ExtensionRange',
full_name='google.protobuf.DescriptorProto.ExtensionRange',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='start', full_name='google.protobuf.DescriptorProto.ExtensionRange.start', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='end', full_name='google.protobuf.DescriptorProto.ExtensionRange.end', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1140,
serialized_end=1184,
)
_DESCRIPTORPROTO_RESERVEDRANGE = _descriptor.Descriptor(
name='ReservedRange',
full_name='google.protobuf.DescriptorProto.ReservedRange',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='start', full_name='google.protobuf.DescriptorProto.ReservedRange.start', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='end', full_name='google.protobuf.DescriptorProto.ReservedRange.end', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1186,
serialized_end=1229,
)
_DESCRIPTORPROTO = _descriptor.Descriptor(
name='DescriptorProto',
full_name='google.protobuf.DescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.DescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='field', full_name='google.protobuf.DescriptorProto.field', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='extension', full_name='google.protobuf.DescriptorProto.extension', index=2,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='nested_type', full_name='google.protobuf.DescriptorProto.nested_type', index=3,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='enum_type', full_name='google.protobuf.DescriptorProto.enum_type', index=4,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='extension_range', full_name='google.protobuf.DescriptorProto.extension_range', index=5,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='oneof_decl', full_name='google.protobuf.DescriptorProto.oneof_decl', index=6,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.DescriptorProto.options', index=7,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reserved_range', full_name='google.protobuf.DescriptorProto.reserved_range', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reserved_name', full_name='google.protobuf.DescriptorProto.reserved_name', index=9,
number=10, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_DESCRIPTORPROTO_EXTENSIONRANGE, _DESCRIPTORPROTO_RESERVEDRANGE, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=605,
serialized_end=1229,
)
_FIELDDESCRIPTORPROTO = _descriptor.Descriptor(
name='FieldDescriptorProto',
full_name='google.protobuf.FieldDescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.FieldDescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number', full_name='google.protobuf.FieldDescriptorProto.number', index=1,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='label', full_name='google.protobuf.FieldDescriptorProto.label', index=2,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='google.protobuf.FieldDescriptorProto.type', index=3,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type_name', full_name='google.protobuf.FieldDescriptorProto.type_name', index=4,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='extendee', full_name='google.protobuf.FieldDescriptorProto.extendee', index=5,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='default_value', full_name='google.protobuf.FieldDescriptorProto.default_value', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='oneof_index', full_name='google.protobuf.FieldDescriptorProto.oneof_index', index=7,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='json_name', full_name='google.protobuf.FieldDescriptorProto.json_name', index=8,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.FieldDescriptorProto.options', index=9,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_FIELDDESCRIPTORPROTO_TYPE,
_FIELDDESCRIPTORPROTO_LABEL,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1232,
serialized_end=1932,
)
_ONEOFDESCRIPTORPROTO = _descriptor.Descriptor(
name='OneofDescriptorProto',
full_name='google.protobuf.OneofDescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.OneofDescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1934,
serialized_end=1970,
)
_ENUMDESCRIPTORPROTO = _descriptor.Descriptor(
name='EnumDescriptorProto',
full_name='google.protobuf.EnumDescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.EnumDescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.EnumDescriptorProto.value', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.EnumDescriptorProto.options', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1973,
serialized_end=2113,
)
_ENUMVALUEDESCRIPTORPROTO = _descriptor.Descriptor(
name='EnumValueDescriptorProto',
full_name='google.protobuf.EnumValueDescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.EnumValueDescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number', full_name='google.protobuf.EnumValueDescriptorProto.number', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.EnumValueDescriptorProto.options', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2115,
serialized_end=2223,
)
_SERVICEDESCRIPTORPROTO = _descriptor.Descriptor(
name='ServiceDescriptorProto',
full_name='google.protobuf.ServiceDescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.ServiceDescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='method', full_name='google.protobuf.ServiceDescriptorProto.method', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.ServiceDescriptorProto.options', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2226,
serialized_end=2370,
)
_METHODDESCRIPTORPROTO = _descriptor.Descriptor(
name='MethodDescriptorProto',
full_name='google.protobuf.MethodDescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.MethodDescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='input_type', full_name='google.protobuf.MethodDescriptorProto.input_type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='output_type', full_name='google.protobuf.MethodDescriptorProto.output_type', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.MethodDescriptorProto.options', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='client_streaming', full_name='google.protobuf.MethodDescriptorProto.client_streaming', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='server_streaming', full_name='google.protobuf.MethodDescriptorProto.server_streaming', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2373,
serialized_end=2566,
)
_FILEOPTIONS = _descriptor.Descriptor(
name='FileOptions',
full_name='google.protobuf.FileOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='java_package', full_name='google.protobuf.FileOptions.java_package', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='java_outer_classname', full_name='google.protobuf.FileOptions.java_outer_classname', index=1,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='java_multiple_files', full_name='google.protobuf.FileOptions.java_multiple_files', index=2,
number=10, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='java_generate_equals_and_hash', full_name='google.protobuf.FileOptions.java_generate_equals_and_hash', index=3,
number=20, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='java_string_check_utf8', full_name='google.protobuf.FileOptions.java_string_check_utf8', index=4,
number=27, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='optimize_for', full_name='google.protobuf.FileOptions.optimize_for', index=5,
number=9, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='go_package', full_name='google.protobuf.FileOptions.go_package', index=6,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cc_generic_services', full_name='google.protobuf.FileOptions.cc_generic_services', index=7,
number=16, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='java_generic_services', full_name='google.protobuf.FileOptions.java_generic_services', index=8,
number=17, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='py_generic_services', full_name='google.protobuf.FileOptions.py_generic_services', index=9,
number=18, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='deprecated', full_name='google.protobuf.FileOptions.deprecated', index=10,
number=23, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cc_enable_arenas', full_name='google.protobuf.FileOptions.cc_enable_arenas', index=11,
number=31, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='objc_class_prefix', full_name='google.protobuf.FileOptions.objc_class_prefix', index=12,
number=36, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='csharp_namespace', full_name='google.protobuf.FileOptions.csharp_namespace', index=13,
number=37, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uninterpreted_option', full_name='google.protobuf.FileOptions.uninterpreted_option', index=14,
number=999, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_FILEOPTIONS_OPTIMIZEMODE,
],
options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000, 536870912), ],
oneofs=[
],
serialized_start=2569,
serialized_end=3216,
)
_MESSAGEOPTIONS = _descriptor.Descriptor(
name='MessageOptions',
full_name='google.protobuf.MessageOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='message_set_wire_format', full_name='google.protobuf.MessageOptions.message_set_wire_format', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='no_standard_descriptor_accessor', full_name='google.protobuf.MessageOptions.no_standard_descriptor_accessor', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='deprecated', full_name='google.protobuf.MessageOptions.deprecated', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='map_entry', full_name='google.protobuf.MessageOptions.map_entry', index=3,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uninterpreted_option', full_name='google.protobuf.MessageOptions.uninterpreted_option', index=4,
number=999, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000, 536870912), ],
oneofs=[
],
serialized_start=3219,
serialized_end=3449,
)
_FIELDOPTIONS = _descriptor.Descriptor(
name='FieldOptions',
full_name='google.protobuf.FieldOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ctype', full_name='google.protobuf.FieldOptions.ctype', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='packed', full_name='google.protobuf.FieldOptions.packed', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='jstype', full_name='google.protobuf.FieldOptions.jstype', index=2,
number=6, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lazy', full_name='google.protobuf.FieldOptions.lazy', index=3,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='deprecated', full_name='google.protobuf.FieldOptions.deprecated', index=4,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='weak', full_name='google.protobuf.FieldOptions.weak', index=5,
number=10, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uninterpreted_option', full_name='google.protobuf.FieldOptions.uninterpreted_option', index=6,
number=999, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_FIELDOPTIONS_CTYPE,
_FIELDOPTIONS_JSTYPE,
],
options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000, 536870912), ],
oneofs=[
],
serialized_start=3452,
serialized_end=3860,
)
_ENUMOPTIONS = _descriptor.Descriptor(
name='EnumOptions',
full_name='google.protobuf.EnumOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='allow_alias', full_name='google.protobuf.EnumOptions.allow_alias', index=0,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='deprecated', full_name='google.protobuf.EnumOptions.deprecated', index=1,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uninterpreted_option', full_name='google.protobuf.EnumOptions.uninterpreted_option', index=2,
number=999, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000, 536870912), ],
oneofs=[
],
serialized_start=3863,
serialized_end=4004,
)
_ENUMVALUEOPTIONS = _descriptor.Descriptor(
name='EnumValueOptions',
full_name='google.protobuf.EnumValueOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='deprecated', full_name='google.protobuf.EnumValueOptions.deprecated', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uninterpreted_option', full_name='google.protobuf.EnumValueOptions.uninterpreted_option', index=1,
number=999, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000, 536870912), ],
oneofs=[
],
serialized_start=4006,
serialized_end=4131,
)
_SERVICEOPTIONS = _descriptor.Descriptor(
name='ServiceOptions',
full_name='google.protobuf.ServiceOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='deprecated', full_name='google.protobuf.ServiceOptions.deprecated', index=0,
number=33, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uninterpreted_option', full_name='google.protobuf.ServiceOptions.uninterpreted_option', index=1,
number=999, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000, 536870912), ],
oneofs=[
],
serialized_start=4133,
serialized_end=4256,
)
_METHODOPTIONS = _descriptor.Descriptor(
name='MethodOptions',
full_name='google.protobuf.MethodOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='deprecated', full_name='google.protobuf.MethodOptions.deprecated', index=0,
number=33, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uninterpreted_option', full_name='google.protobuf.MethodOptions.uninterpreted_option', index=1,
number=999, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000, 536870912), ],
oneofs=[
],
serialized_start=4258,
serialized_end=4380,
)
_UNINTERPRETEDOPTION_NAMEPART = _descriptor.Descriptor(
name='NamePart',
full_name='google.protobuf.UninterpretedOption.NamePart',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name_part', full_name='google.protobuf.UninterpretedOption.NamePart.name_part', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_extension', full_name='google.protobuf.UninterpretedOption.NamePart.is_extension', index=1,
number=2, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=4618,
serialized_end=4669,
)
_UNINTERPRETEDOPTION = _descriptor.Descriptor(
name='UninterpretedOption',
full_name='google.protobuf.UninterpretedOption',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.UninterpretedOption.name', index=0,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='identifier_value', full_name='google.protobuf.UninterpretedOption.identifier_value', index=1,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='positive_int_value', full_name='google.protobuf.UninterpretedOption.positive_int_value', index=2,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='negative_int_value', full_name='google.protobuf.UninterpretedOption.negative_int_value', index=3,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='double_value', full_name='google.protobuf.UninterpretedOption.double_value', index=4,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='string_value', full_name='google.protobuf.UninterpretedOption.string_value', index=5,
number=7, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='aggregate_value', full_name='google.protobuf.UninterpretedOption.aggregate_value', index=6,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_UNINTERPRETEDOPTION_NAMEPART, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=4383,
serialized_end=4669,
)
_SOURCECODEINFO_LOCATION = _descriptor.Descriptor(
name='Location',
full_name='google.protobuf.SourceCodeInfo.Location',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='path', full_name='google.protobuf.SourceCodeInfo.Location.path', index=0,
number=1, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='span', full_name='google.protobuf.SourceCodeInfo.Location.span', index=1,
number=2, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='leading_comments', full_name='google.protobuf.SourceCodeInfo.Location.leading_comments', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='trailing_comments', full_name='google.protobuf.SourceCodeInfo.Location.trailing_comments', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='leading_detached_comments', full_name='google.protobuf.SourceCodeInfo.Location.leading_detached_comments', index=4,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=4751,
serialized_end=4885,
)
_SOURCECODEINFO = _descriptor.Descriptor(
name='SourceCodeInfo',
full_name='google.protobuf.SourceCodeInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='location', full_name='google.protobuf.SourceCodeInfo.location', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_SOURCECODEINFO_LOCATION, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=4672,
serialized_end=4885,
)
_GENERATEDCODEINFO_ANNOTATION = _descriptor.Descriptor(
name='Annotation',
full_name='google.protobuf.GeneratedCodeInfo.Annotation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='path', full_name='google.protobuf.GeneratedCodeInfo.Annotation.path', index=0,
number=1, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='source_file', full_name='google.protobuf.GeneratedCodeInfo.Annotation.source_file', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='begin', full_name='google.protobuf.GeneratedCodeInfo.Annotation.begin', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='end', full_name='google.protobuf.GeneratedCodeInfo.Annotation.end', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=4976,
serialized_end=5055,
)
_GENERATEDCODEINFO = _descriptor.Descriptor(
name='GeneratedCodeInfo',
full_name='google.protobuf.GeneratedCodeInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='annotation', full_name='google.protobuf.GeneratedCodeInfo.annotation', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_GENERATEDCODEINFO_ANNOTATION, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=4888,
serialized_end=5055,
)
_FILEDESCRIPTORSET.fields_by_name['file'].message_type = _FILEDESCRIPTORPROTO
_FILEDESCRIPTORPROTO.fields_by_name['message_type'].message_type = _DESCRIPTORPROTO
_FILEDESCRIPTORPROTO.fields_by_name['enum_type'].message_type = _ENUMDESCRIPTORPROTO
_FILEDESCRIPTORPROTO.fields_by_name['service'].message_type = _SERVICEDESCRIPTORPROTO
_FILEDESCRIPTORPROTO.fields_by_name['extension'].message_type = _FIELDDESCRIPTORPROTO
_FILEDESCRIPTORPROTO.fields_by_name['options'].message_type = _FILEOPTIONS
_FILEDESCRIPTORPROTO.fields_by_name['source_code_info'].message_type = _SOURCECODEINFO
_DESCRIPTORPROTO_EXTENSIONRANGE.containing_type = _DESCRIPTORPROTO
_DESCRIPTORPROTO_RESERVEDRANGE.containing_type = _DESCRIPTORPROTO
_DESCRIPTORPROTO.fields_by_name['field'].message_type = _FIELDDESCRIPTORPROTO
_DESCRIPTORPROTO.fields_by_name['extension'].message_type = _FIELDDESCRIPTORPROTO
_DESCRIPTORPROTO.fields_by_name['nested_type'].message_type = _DESCRIPTORPROTO
_DESCRIPTORPROTO.fields_by_name['enum_type'].message_type = _ENUMDESCRIPTORPROTO
_DESCRIPTORPROTO.fields_by_name['extension_range'].message_type = _DESCRIPTORPROTO_EXTENSIONRANGE
_DESCRIPTORPROTO.fields_by_name['oneof_decl'].message_type = _ONEOFDESCRIPTORPROTO
_DESCRIPTORPROTO.fields_by_name['options'].message_type = _MESSAGEOPTIONS
_DESCRIPTORPROTO.fields_by_name['reserved_range'].message_type = _DESCRIPTORPROTO_RESERVEDRANGE
_FIELDDESCRIPTORPROTO.fields_by_name['label'].enum_type = _FIELDDESCRIPTORPROTO_LABEL
_FIELDDESCRIPTORPROTO.fields_by_name['type'].enum_type = _FIELDDESCRIPTORPROTO_TYPE
_FIELDDESCRIPTORPROTO.fields_by_name['options'].message_type = _FIELDOPTIONS
_FIELDDESCRIPTORPROTO_TYPE.containing_type = _FIELDDESCRIPTORPROTO
_FIELDDESCRIPTORPROTO_LABEL.containing_type = _FIELDDESCRIPTORPROTO
_ENUMDESCRIPTORPROTO.fields_by_name['value'].message_type = _ENUMVALUEDESCRIPTORPROTO
_ENUMDESCRIPTORPROTO.fields_by_name['options'].message_type = _ENUMOPTIONS
_ENUMVALUEDESCRIPTORPROTO.fields_by_name['options'].message_type = _ENUMVALUEOPTIONS
_SERVICEDESCRIPTORPROTO.fields_by_name['method'].message_type = _METHODDESCRIPTORPROTO
_SERVICEDESCRIPTORPROTO.fields_by_name['options'].message_type = _SERVICEOPTIONS
_METHODDESCRIPTORPROTO.fields_by_name['options'].message_type = _METHODOPTIONS
_FILEOPTIONS.fields_by_name['optimize_for'].enum_type = _FILEOPTIONS_OPTIMIZEMODE
_FILEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
_FILEOPTIONS_OPTIMIZEMODE.containing_type = _FILEOPTIONS
_MESSAGEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
_FIELDOPTIONS.fields_by_name['ctype'].enum_type = _FIELDOPTIONS_CTYPE
_FIELDOPTIONS.fields_by_name['jstype'].enum_type = _FIELDOPTIONS_JSTYPE
_FIELDOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
_FIELDOPTIONS_CTYPE.containing_type = _FIELDOPTIONS
_FIELDOPTIONS_JSTYPE.containing_type = _FIELDOPTIONS
_ENUMOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
_ENUMVALUEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
_SERVICEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
_METHODOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
_UNINTERPRETEDOPTION_NAMEPART.containing_type = _UNINTERPRETEDOPTION
_UNINTERPRETEDOPTION.fields_by_name['name'].message_type = _UNINTERPRETEDOPTION_NAMEPART
_SOURCECODEINFO_LOCATION.containing_type = _SOURCECODEINFO
_SOURCECODEINFO.fields_by_name['location'].message_type = _SOURCECODEINFO_LOCATION
_GENERATEDCODEINFO_ANNOTATION.containing_type = _GENERATEDCODEINFO
_GENERATEDCODEINFO.fields_by_name['annotation'].message_type = _GENERATEDCODEINFO_ANNOTATION
DESCRIPTOR.message_types_by_name['FileDescriptorSet'] = _FILEDESCRIPTORSET
DESCRIPTOR.message_types_by_name['FileDescriptorProto'] = _FILEDESCRIPTORPROTO
DESCRIPTOR.message_types_by_name['DescriptorProto'] = _DESCRIPTORPROTO
DESCRIPTOR.message_types_by_name['FieldDescriptorProto'] = _FIELDDESCRIPTORPROTO
DESCRIPTOR.message_types_by_name['OneofDescriptorProto'] = _ONEOFDESCRIPTORPROTO
DESCRIPTOR.message_types_by_name['EnumDescriptorProto'] = _ENUMDESCRIPTORPROTO
DESCRIPTOR.message_types_by_name['EnumValueDescriptorProto'] = _ENUMVALUEDESCRIPTORPROTO
DESCRIPTOR.message_types_by_name['ServiceDescriptorProto'] = _SERVICEDESCRIPTORPROTO
DESCRIPTOR.message_types_by_name['MethodDescriptorProto'] = _METHODDESCRIPTORPROTO
DESCRIPTOR.message_types_by_name['FileOptions'] = _FILEOPTIONS
DESCRIPTOR.message_types_by_name['MessageOptions'] = _MESSAGEOPTIONS
DESCRIPTOR.message_types_by_name['FieldOptions'] = _FIELDOPTIONS
DESCRIPTOR.message_types_by_name['EnumOptions'] = _ENUMOPTIONS
DESCRIPTOR.message_types_by_name['EnumValueOptions'] = _ENUMVALUEOPTIONS
DESCRIPTOR.message_types_by_name['ServiceOptions'] = _SERVICEOPTIONS
DESCRIPTOR.message_types_by_name['MethodOptions'] = _METHODOPTIONS
DESCRIPTOR.message_types_by_name['UninterpretedOption'] = _UNINTERPRETEDOPTION
DESCRIPTOR.message_types_by_name['SourceCodeInfo'] = _SOURCECODEINFO
DESCRIPTOR.message_types_by_name['GeneratedCodeInfo'] = _GENERATEDCODEINFO
FileDescriptorSet = _reflection.GeneratedProtocolMessageType('FileDescriptorSet', (_message.Message,), dict(
DESCRIPTOR = _FILEDESCRIPTORSET,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.FileDescriptorSet)
))
_sym_db.RegisterMessage(FileDescriptorSet)
FileDescriptorProto = _reflection.GeneratedProtocolMessageType('FileDescriptorProto', (_message.Message,), dict(
DESCRIPTOR = _FILEDESCRIPTORPROTO,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.FileDescriptorProto)
))
_sym_db.RegisterMessage(FileDescriptorProto)
DescriptorProto = _reflection.GeneratedProtocolMessageType('DescriptorProto', (_message.Message,), dict(
ExtensionRange = _reflection.GeneratedProtocolMessageType('ExtensionRange', (_message.Message,), dict(
DESCRIPTOR = _DESCRIPTORPROTO_EXTENSIONRANGE,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.DescriptorProto.ExtensionRange)
))
,
ReservedRange = _reflection.GeneratedProtocolMessageType('ReservedRange', (_message.Message,), dict(
DESCRIPTOR = _DESCRIPTORPROTO_RESERVEDRANGE,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.DescriptorProto.ReservedRange)
))
,
DESCRIPTOR = _DESCRIPTORPROTO,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.DescriptorProto)
))
_sym_db.RegisterMessage(DescriptorProto)
_sym_db.RegisterMessage(DescriptorProto.ExtensionRange)
_sym_db.RegisterMessage(DescriptorProto.ReservedRange)
FieldDescriptorProto = _reflection.GeneratedProtocolMessageType('FieldDescriptorProto', (_message.Message,), dict(
DESCRIPTOR = _FIELDDESCRIPTORPROTO,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.FieldDescriptorProto)
))
_sym_db.RegisterMessage(FieldDescriptorProto)
OneofDescriptorProto = _reflection.GeneratedProtocolMessageType('OneofDescriptorProto', (_message.Message,), dict(
DESCRIPTOR = _ONEOFDESCRIPTORPROTO,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.OneofDescriptorProto)
))
_sym_db.RegisterMessage(OneofDescriptorProto)
EnumDescriptorProto = _reflection.GeneratedProtocolMessageType('EnumDescriptorProto', (_message.Message,), dict(
DESCRIPTOR = _ENUMDESCRIPTORPROTO,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.EnumDescriptorProto)
))
_sym_db.RegisterMessage(EnumDescriptorProto)
EnumValueDescriptorProto = _reflection.GeneratedProtocolMessageType('EnumValueDescriptorProto', (_message.Message,), dict(
DESCRIPTOR = _ENUMVALUEDESCRIPTORPROTO,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.EnumValueDescriptorProto)
))
_sym_db.RegisterMessage(EnumValueDescriptorProto)
ServiceDescriptorProto = _reflection.GeneratedProtocolMessageType('ServiceDescriptorProto', (_message.Message,), dict(
DESCRIPTOR = _SERVICEDESCRIPTORPROTO,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.ServiceDescriptorProto)
))
_sym_db.RegisterMessage(ServiceDescriptorProto)
MethodDescriptorProto = _reflection.GeneratedProtocolMessageType('MethodDescriptorProto', (_message.Message,), dict(
DESCRIPTOR = _METHODDESCRIPTORPROTO,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.MethodDescriptorProto)
))
_sym_db.RegisterMessage(MethodDescriptorProto)
FileOptions = _reflection.GeneratedProtocolMessageType('FileOptions', (_message.Message,), dict(
DESCRIPTOR = _FILEOPTIONS,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.FileOptions)
))
_sym_db.RegisterMessage(FileOptions)
MessageOptions = _reflection.GeneratedProtocolMessageType('MessageOptions', (_message.Message,), dict(
DESCRIPTOR = _MESSAGEOPTIONS,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.MessageOptions)
))
_sym_db.RegisterMessage(MessageOptions)
FieldOptions = _reflection.GeneratedProtocolMessageType('FieldOptions', (_message.Message,), dict(
DESCRIPTOR = _FIELDOPTIONS,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.FieldOptions)
))
_sym_db.RegisterMessage(FieldOptions)
EnumOptions = _reflection.GeneratedProtocolMessageType('EnumOptions', (_message.Message,), dict(
DESCRIPTOR = _ENUMOPTIONS,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.EnumOptions)
))
_sym_db.RegisterMessage(EnumOptions)
EnumValueOptions = _reflection.GeneratedProtocolMessageType('EnumValueOptions', (_message.Message,), dict(
DESCRIPTOR = _ENUMVALUEOPTIONS,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.EnumValueOptions)
))
_sym_db.RegisterMessage(EnumValueOptions)
ServiceOptions = _reflection.GeneratedProtocolMessageType('ServiceOptions', (_message.Message,), dict(
DESCRIPTOR = _SERVICEOPTIONS,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.ServiceOptions)
))
_sym_db.RegisterMessage(ServiceOptions)
MethodOptions = _reflection.GeneratedProtocolMessageType('MethodOptions', (_message.Message,), dict(
DESCRIPTOR = _METHODOPTIONS,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.MethodOptions)
))
_sym_db.RegisterMessage(MethodOptions)
UninterpretedOption = _reflection.GeneratedProtocolMessageType('UninterpretedOption', (_message.Message,), dict(
NamePart = _reflection.GeneratedProtocolMessageType('NamePart', (_message.Message,), dict(
DESCRIPTOR = _UNINTERPRETEDOPTION_NAMEPART,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.UninterpretedOption.NamePart)
))
,
DESCRIPTOR = _UNINTERPRETEDOPTION,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.UninterpretedOption)
))
_sym_db.RegisterMessage(UninterpretedOption)
_sym_db.RegisterMessage(UninterpretedOption.NamePart)
SourceCodeInfo = _reflection.GeneratedProtocolMessageType('SourceCodeInfo', (_message.Message,), dict(
Location = _reflection.GeneratedProtocolMessageType('Location', (_message.Message,), dict(
DESCRIPTOR = _SOURCECODEINFO_LOCATION,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.SourceCodeInfo.Location)
))
,
DESCRIPTOR = _SOURCECODEINFO,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.SourceCodeInfo)
))
_sym_db.RegisterMessage(SourceCodeInfo)
_sym_db.RegisterMessage(SourceCodeInfo.Location)
GeneratedCodeInfo = _reflection.GeneratedProtocolMessageType('GeneratedCodeInfo', (_message.Message,), dict(
Annotation = _reflection.GeneratedProtocolMessageType('Annotation', (_message.Message,), dict(
DESCRIPTOR = _GENERATEDCODEINFO_ANNOTATION,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.GeneratedCodeInfo.Annotation)
))
,
DESCRIPTOR = _GENERATEDCODEINFO,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.GeneratedCodeInfo)
))
_sym_db.RegisterMessage(GeneratedCodeInfo)
_sym_db.RegisterMessage(GeneratedCodeInfo.Annotation)
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
167145af30f614f325c70b71cb60dcb4eea9155c | 3fbf42864ad8ee4f53597a1cf5b2e5a54268f571 | /Graph-Theory/(1167)트리의 지름.py | 6855987323aa602e0c96464a12115e7e6be354b7 | [] | no_license | upskyy/Baekjoon-Online-Judge | 1542f7c99b1a34ca0fbc810aa2cefd493d783103 | 35389d54ea2c1a25b9f0ebe07d417debff2651ac | refs/heads/main | 2023-04-26T05:51:20.306414 | 2021-05-06T07:40:54 | 2021-05-06T07:40:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 709 | py | from collections import deque
def bfs(x, mode):
q = deque()
q.append(x)
c = [-1 for _ in range(n + 1)]
c[x] = 0
while q:
x = q.popleft()
for nx, w in maps[x]:
if c[nx] == -1: # 방문 했는지 확인
c[nx] = c[x] + w
q.append(nx)
if mode == 1:
return c.index(max(c))
else:
return max(c)
n = int(input())
maps = [[] for _ in range(n + 1)]
for _ in range(n):
info = list(map(int, input().split()))
for i in range((len(info) // 2) - 1):
maps[info[0]].append([info[2 * i + 1], info[2 * i + 2]])
maps[info[2 * i + 1]].append([info[0], info[2 * i + 2]])
print(bfs(bfs(1, 1), 2))
| [
"[email protected]"
] | |
e1b8a0a17a261c0269fad33d85268864a07cfa4b | 947a5af27986d7b1f734c89d236c596849612bac | /adrien/doWork.py | bed4ed6a4ad3579a130ac800d680515cf85d97ee | [
"MIT",
"BSD-3-Clause"
] | permissive | seacheo/mindUPCODE | 98a33964d6112db5a0bdd4c22dfaa0d219230972 | 204a0dc3d2bf822790a1e284f3ed20950299b034 | refs/heads/master | 2022-04-24T11:54:04.137606 | 2020-04-26T03:06:41 | 2020-04-26T03:06:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,449 | py | import sys
sys.path.append("/home/sean/pench")
sys.path.append("/network/lustre/iss01/home/adrien.martel")
import os
import argparse
parser = argparse.ArgumentParser(description='Do ML')
parser.add_argument('file', type=str, help='filename')
parser.add_argument('gpu', type=int, help='which gpu')
parser.add_argument('modelNum', type=int, help='which model')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"]=str(args.gpu)
# !git clone https://github.com/vlawhern/arl-eegmodels.git
from eegmodels.EEGModels import EEGNet, ShallowConvNet, DeepConvNet
from myModels import dualLSTM, singleLSTM
import tensorflow as tf
from tensorflow import keras
tf.enable_eager_execution()
from threading import Thread
from math import sqrt
from numpy import concatenate
from matplotlib import pyplot
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import normalize
import math
import threading
import pickle
import numpy as np
from tensorflow.python.client import device_lib
from tensorflow.keras.utils import to_categorical
# from tensorflow import tensorflow.keras.backend as K
# import keras
# from tqdm.keras import TqdmCallback
print(device_lib.list_local_devices()) # list of DeviceAttributes
# %gui qt
import numpy as np
# import mne
import pickle
import os
import matplotlib
import matplotlib.pyplot as plt
from multiprocessing import Pool, Queue
import multiprocessing
# tf.enable_eager_execution()
from collections import deque
from tensorflow.keras.backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU
config.log_device_placement = True # to log device placement (on which device the operation ran)
sess = tf.Session(config=config)
set_session(sess)
sam=2560
chans=62
numClasses=2
file = args.file
whichModel= args.modelNum
models = [
[EEGNet(nb_classes=numClasses, Chans=chans, Samples=sam), True, 'EEGNet-V1'],
[ShallowConvNet(nb_classes=numClasses, Chans=chans, Samples =sam), True, 'ShallowConvNet-V1'],
[DeepConvNet(nb_classes=numClasses, Chans=chans, Samples=sam), True, 'DeepConvNet-V1'],
[singleLSTM(clas=numClasses, sam=sam, chans=chans), False, 'singleLSTM-V1'],
[dualLSTM(clas=numClasses, sam=sam, chans=chans), False, 'dualLSTM-V1'],
]
folder=models[whichModel][2]
def randomize(a, b, c):
# Generate the permutation index array.
permutation = np.random.permutation(a.shape[0])
# Shuffle the arrays by giving the permutation in the square brackets.
shuffled_a = a[permutation]
shuffled_b = b[permutation]
shuffled_c = c[permutation]
return shuffled_a, shuffled_b, shuffled_c
def createData(file):
baseFolder='one/'
data=pickle.load(open(baseFolder+file, 'rb'))
sfreq=512
features=[]
flipFeatures=[]
labels=[]
for i in range(numClasses):
for k in range(len(data[i])):
labels.append(i)
features.append(data[i][k])
flipFeatures.append([np.transpose(data[i][k])])
labels=np.array(labels)
features=np.array(features)
flipFeatures=np.array(flipFeatures)
labels, features, flipFeatures = randomize(labels, features, flipFeatures)
labels = to_categorical(labels, num_classes=numClasses)
return [features, flipFeatures, labels]
def createWork(file):
# arc=inps[n][0]
# file=inps[n][1]
global whichModel
features, flipFeatures, labels = createData(file)
if models[whichModel][1]:
train_X = np.array(flipFeatures[0:int(7*len(labels)/10)])
test_X = np.array(flipFeatures[int(7*len(labels)/10):-1])
else:
train_X = np.array(features[0:int(7*len(labels)/10)])
test_X = np.array(features[int(7*len(labels)/10):-1])
train_y = np.array(labels[0:int(7*len(labels)/10)])
test_y = np.array(labels[int(7*len(labels)/10):-1])
# out.put([arc[0], train_X, test_X, train_y, test_y, file, arc[2]])
# print(out.empty())
return [train_X, test_X, train_y, test_y]
dat= createWork(file)
train_X=dat[0]
test_X=dat[1]
train_y=dat[2]
test_y=dat[3]
model=models[whichModel[0]]
# print('processed')
# sgd = keras.optimizers.SGD(learning_rate=0.015, momentum=0.0, nesterov=False)
# adam = keras.optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, amsgrad=False)
print('Done getting data')
# sgd = keras.optimizers.SGD()
adam = tf.train.AdamOptimizer(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, use_locking=False,name='Adam')
print('Compiling model')
# break
model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])
# fit network
history = model.fit(train_X, train_y, epochs=10, batch_size=2, validation_data=(test_X, test_y), verbose=0, shuffle=True)
# plot history
print(history.history.keys())
pyplot.figure(figsize=(25,10), dpi=250)
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.plot(history.history['acc'], label='accuracy')
pyplot.plot(history.history['val_acc'], label='test accuracy')
pyplot.legend()
pyplot.savefig(folder+'/'+file + '.png')
pickle.dump(history, open(folder+'/'+file+'-hist.p', "wb"))
model.save(folder+'/'+file+'.h5')
print('done') | [
"[email protected]"
] | |
d0975cee5a2b218719c01a60b4b5652605b943fa | 25df7e1dc67b7b6abf302533ccf8e816c4cc0e7c | /setup.py | 4e04c591843b0aaa9086455f9da9c55ee565f2f2 | [
"MIT"
] | permissive | nivlab/NivLink | c7647339b9b11b82a824798c284a9b5ec914b8a3 | c5c03f36975a10445e586499fa516e2e38feae2b | refs/heads/master | 2020-03-26T00:55:59.922528 | 2019-10-15T15:53:23 | 2019-10-15T15:53:23 | 144,344,474 | 7 | 2 | MIT | 2019-10-10T21:15:37 | 2018-08-11T00:53:51 | Python | UTF-8 | Python | false | false | 1,530 | py | #! /usr/bin/env python
#
# Copyright (c) 2018 Niv Lab
# https://www.princeton.edu/~nivlab/
import os, sys
from setuptools import setup, find_packages
path = os.path.abspath(os.path.dirname(__file__))
## Metadata
DISTNAME = 'nivlink'
MAINTAINER = 'Sam Zorowitz'
MAINTAINER_EMAIL = '[email protected]'
DESCRIPTION = 'Niv Lab software for preprocessing eyelink eyetracking data.'
URL = 'https://www.princeton.edu/~nivlab/'
LICENSE = 'MIT'
DOWNLOAD_URL = 'http://github.com/nivlab/nivlink'
with open(os.path.join(path, 'README.rst'), encoding='utf-8') as readme_file:
README = readme_file.read()
with open(os.path.join(path, 'requirements.txt')) as requirements_file:
# Parse requirements.txt, ignoring any commented-out lines.
requirements = [line for line in requirements_file.read().splitlines()
if not line.startswith('#')]
VERSION = None
with open(os.path.join('nivlink', '__init__.py'), 'r') as fid:
for line in (line.strip() for line in fid):
if line.startswith('__version__'):
VERSION = line.split('=')[1].strip().strip('\'')
break
if VERSION is None:
raise RuntimeError('Could not determine version')
setup(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=README,
packages=find_packages(exclude=['docs', 'tests']),
install_requires=requirements,
license=LICENSE
) | [
"[email protected]"
] | |
06dec5bffda4f9bce976bfa3abf34ab323768695 | c29de7ce2d91f572aeb4da56801de7a1dc034054 | /st2/experiments/cifar10/exp011.py | f2fd3d516a219c49e1c585326c4a98eaf1043f51 | [] | no_license | kzky/works | 18b8d754bfc2b1da22022926d882dfe92ea785e6 | b8708c305e52f924ea5a7071e0dfe5f2feb7a0a3 | refs/heads/master | 2021-01-10T08:04:44.831232 | 2018-03-01T15:09:47 | 2018-03-01T15:09:47 | 54,316,791 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,121 | py | import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.solvers as S
import nnabla
from nnabla.contrib.context import extension_context
import numpy as np
import os
import time
import argparse
from st2.cifar10.cnn_model_011 import cnn_model_003, ce_loss, sr_loss, er_loss, \
GradScaleContainer
from st2.cifar10.datasets import Cifar10DataReader, Separator
"""
The same script as the `st` module but with nnabla.
- ConvPool-CNN-C (Springenberg et al., 2014, Salimans&Kingma (2016))
- Stochastic Regularization
- Entropy Regularization for the outputs before CE loss and SR loss
- Gradient scaling: just consider large gradients of g_u
"""
def categorical_error(pred, label):
"""
Compute categorical error given score vectors and labels as
numpy.ndarray.
"""
pred_label = pred.argmax(1)
return (pred_label != label.flat).mean()
def main(args):
# Settings
device_id = args.device_id
batch_size = 100
batch_size_eval = 100
n_l_train_data = 4000
n_train_data = 50000
n_cls = 10
learning_rate = 1. * 1e-3
n_epoch = 300
act = F.relu
iter_epoch = n_train_data / batch_size
n_iter = n_epoch * iter_epoch
extension_module = args.context
# Model
## supervised
batch_size, m, h, w = batch_size, 3, 32, 32
ctx = extension_context(extension_module, device_id=device_id)
x_l = nn.Variable((batch_size, m, h, w))
y_l = nn.Variable((batch_size, 1))
pred = cnn_model_003(ctx, x_l)
loss_ce = ce_loss(ctx, pred, y_l)
loss_er = er_loss(ctx, pred)
loss_supervised = loss_ce + loss_er
## stochastic regularization
x_u0 = nn.Variable((batch_size, m, h, w))
x_u1 = nn.Variable((batch_size, m, h, w))
pred_x_u0 = cnn_model_003(ctx, x_u0)
pred_x_u1 = cnn_model_003(ctx, x_u1)
loss_sr = sr_loss(ctx, pred_x_u0, pred_x_u1)
loss_er0 = er_loss(ctx, pred_x_u0)
loss_er1 = er_loss(ctx, pred_x_u1)
loss_unsupervised = loss_sr + loss_er0 + loss_er1
## evaluate
batch_size_eval, m, h, w = batch_size, 3, 32, 32
x_eval = nn.Variable((batch_size_eval, m, h, w))
pred_eval = cnn_model_003(ctx, x_eval, test=True)
# Solver
with nn.context_scope(ctx):
solver = S.Adam(alpha=learning_rate)
solver.set_parameters(nn.get_parameters())
# Gradient Scale Container
gsc = GradScaleContainer(len(nn.get_parameters()))
# Dataset
## separate dataset
home = os.environ.get("HOME")
fpath = os.path.join(home, "datasets/cifar10/cifar-10.npz")
separator = Separator(n_l_train_data)
separator.separate_then_save(fpath)
l_train_path = os.path.join(home, "datasets/cifar10/l_cifar-10.npz")
u_train_path = os.path.join(home, "datasets/cifar10/cifar-10.npz")
test_path = os.path.join(home, "datasets/cifar10/cifar-10.npz")
# data reader
data_reader = Cifar10DataReader(l_train_path, u_train_path, test_path,
batch_size=batch_size,
n_cls=n_cls,
da=True, #TODO: use F.image_augmentation
shape=True)
# Training loop
print("# Training loop")
epoch = 1
st = time.time()
acc_prev = 0.
for i in range(n_iter):
# Get data and set it to the varaibles
x_l0_data, x_l1_data, y_l_data = data_reader.get_l_train_batch()
x_u0_data, x_u1_data, y_u_data = data_reader.get_u_train_batch()
x_l.d, _ , y_l.d= x_l0_data, x_l1_data, y_l_data
x_u0.d, x_u1.d= x_u0_data, x_u1_data
# Train
loss_supervised.forward(clear_no_need_grad=True)
loss_unsupervised.forward(clear_no_need_grad=True)
solver.zero_grad()
loss_unsupervised.backward(clear_buffer=True)
gsc.scale_grad(ctx, nn.get_parameters())
loss_supervised.backward(clear_buffer=True)
## update
solver.update()
# Evaluate
if (i+1) % iter_epoch == 0:
# Get data and set it to the varaibles
x_data, y_data = data_reader.get_test_batch()
# Evaluation loop
ve = 0.
iter_val = 0
for k in range(0, len(x_data), batch_size_eval):
x_eval.d = x_data[k:k+batch_size_eval, :]
label = y_data[k:k+batch_size_eval, :]
pred_eval.forward(clear_buffer=True)
ve += categorical_error(pred_eval.d, label)
iter_val += 1
msg = "Epoch:{},ElapsedTime:{},Acc:{:02f}".format(
epoch,
time.time() - st,
(1. - ve / iter_val) * 100)
print(msg)
st = time.time()
epoch +=1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--device_id", "-d", type=int, default=0)
parser.add_argument('--context', '-c', type=str,
default="cpu", help="Extension modules. ex) 'cpu', 'cuda.cudnn'.")
args = parser.parse_args()
main(args)
| [
"[email protected]"
] | |
78b480c59e1129fef3f5117392043d5251f5e5cb | 7c551e749064b25af706b9167211050f8c6ad0a9 | /signatures/windows/trojan_rovnix.py | f9b6b29446060b6a111cd040ea82c6e53ff79178 | [] | no_license | dashjuvi/Cuckoo-Sandbox-vbox-win7 | fa382828b4895c5e1ee60b37a840edd395bf1588 | a3a26b539b06db15176deadeae46fc0476e78998 | refs/heads/master | 2020-03-12T08:33:06.231245 | 2019-01-14T23:09:02 | 2019-01-14T23:09:02 | 130,529,882 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,606 | py | # Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
from lib.cuckoo.common.abstracts import Signature
class Rovnix(Signature):
name = "rovnix"
description = "Rovnix Trojan"
severity = 3
categories = ["banker", "trojan"]
authors = ["Mikael Keri"]
minimum = "2.0"
files_re = [
".*\\\\AppData\\\\Local\\\\Temp\\\\L[0-9]{9}",
".*\\\\AppData\\\\Roaming\\\\Microsoft\\\\Crypto\\\\RSA\\\\RSA[0-9]{9}.dll",
".*\\\\AppData\\\\Roaming\\\\Microsoft\\\\Crypto\\\\RSA\\\\KEYS\\\\CFG[0-9]{9}.dll",
".*\\\\AppData\\\\Roaming\\\\Microsoft\\\\Crypto\\\\RSA\\\\KEYS\\\\DB[0-9]{9}.dll",
]
regkeys_re = [
".*\\\\Software\\\\Microsoft\\\\Installer\\\\Products\\\\B[0-9]{9}",
]
mutexes_re = [
".*UACNTFS[0-9]{9}",
".*INSNTFS[0-9]{9}",
".*BDNTFS[0-9]{9}",
".*PL6NTFS[0-9]{9}",
".*PL1NTFS[0-9]{9}",
]
def on_complete(self):
for indicator in self.mutexes_re:
for mutex in self.check_mutex(pattern=indicator, regex=True, all=True):
self.mark_ioc("mutex", mutex)
for indicator in self.regkeys_re:
for regkey in self.check_key(pattern=indicator, regex=True, all=True):
self.mark_ioc("registry", regkey)
for indicator in self.files_re:
for regkey in self.check_file(pattern=indicator, regex=True, all=True):
self.mark_ioc("file", regkey)
return self.has_marks()
| [
"[email protected]"
] | |
5b8a062661b1e9d450b7451eecfe4299b5504ff4 | 3b2a766682caa36a38ef7f85b9915096b0f47278 | /uiview/ui_addSPOB.py | c241d6d543a264ec8a3277f39764917386980208 | [] | no_license | himalsaman/kmfmaintenance | 0d8252faaf749620628493a876cd759c3a6b8ed4 | 26d153bfa90b97da9f378d50a025f2660646435f | refs/heads/master | 2020-12-24T12:04:48.875927 | 2016-12-20T07:41:59 | 2016-12-20T07:41:59 | 73,080,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,359 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_addSPOB.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
import sys
from datetime import datetime
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QDialog
from Control.materialsControl import decreaseSparePartsInvQty
from Control.ouboundControl import OutBCode
from models.dbUtile import Employees, Customers
from models.ouboundModel import add_outbound
from models.sparePartsModel import select_spare_parts_bycode, select_all_spare_parts
class Ui_addSPOBDialog(QDialog):
def __init__(self, obj):
super(Ui_addSPOBDialog, self).__init__()
self.obj = obj
self.setupUi(self)
def setupUi(self, addSPOBDialog):
self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowCloseButtonHint)
addSPOBDialog.setObjectName("addSPOBDialog")
addSPOBDialog.resize(726, 266)
self.label = QtWidgets.QLabel(addSPOBDialog)
self.label.setGeometry(QtCore.QRect(10, 10, 150, 13))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(addSPOBDialog)
self.label_2.setGeometry(QtCore.QRect(380, 16, 120, 13))
self.label_2.setObjectName("label_2")
self.spnameled = QtWidgets.QLineEdit(addSPOBDialog)
self.spnameled.setEnabled(False)
self.spnameled.setGeometry(QtCore.QRect(486, 14, 230, 20))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.spnameled.setFont(font)
self.spnameled.setStyleSheet("color: rgb(255, 0, 0);")
self.spnameled.setObjectName("spnameled")
self.spcodeled = QtWidgets.QLineEdit(addSPOBDialog)
self.spcodeled.setEnabled(False)
self.spcodeled.setGeometry(QtCore.QRect(418, 46, 90, 20))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.spcodeled.setFont(font)
self.spcodeled.setStyleSheet("color: rgb(255, 0, 0);")
self.spcodeled.setObjectName("spcodeled")
self.label_3 = QtWidgets.QLabel(addSPOBDialog)
self.label_3.setGeometry(QtCore.QRect(383, 48, 30, 13))
self.label_3.setObjectName("label_3")
self.spinqtyled = QtWidgets.QLineEdit(addSPOBDialog)
self.spinqtyled.setEnabled(False)
self.spinqtyled.setGeometry(QtCore.QRect(596, 46, 80, 20))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.spinqtyled.setFont(font)
self.spinqtyled.setStyleSheet("color: rgb(255, 0, 0);")
self.spinqtyled.setObjectName("spinqtyled")
self.label_4 = QtWidgets.QLabel(addSPOBDialog)
self.label_4.setGeometry(QtCore.QRect(517, 49, 80, 13))
self.label_4.setObjectName("label_4")
self.reqqtyled_2 = QtWidgets.QLineEdit(addSPOBDialog)
self.reqqtyled_2.setGeometry(QtCore.QRect(466, 134, 150, 20))
self.reqqtyled_2.setObjectName("reqqtyled_2")
self.reqqtyled = QtWidgets.QLabel(addSPOBDialog)
self.reqqtyled.setGeometry(QtCore.QRect(478, 113, 130, 13))
self.reqqtyled.setObjectName("reqqtyled")
self.spgencodeled = QtWidgets.QLineEdit(addSPOBDialog)
self.spgencodeled.setEnabled(False)
self.spgencodeled.setGeometry(QtCore.QRect(456, 76, 160, 20))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.spgencodeled.setFont(font)
self.spgencodeled.setStyleSheet("color: rgb(255, 0, 0);")
self.spgencodeled.setObjectName("spgencodeled")
self.label_6 = QtWidgets.QLabel(addSPOBDialog)
self.label_6.setGeometry(QtCore.QRect(385, 79, 70, 13))
self.label_6.setObjectName("label_6")
self.line = QtWidgets.QFrame(addSPOBDialog)
self.line.setGeometry(QtCore.QRect(380, 104, 340, 3))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.addbtn = QtWidgets.QPushButton(addSPOBDialog)
self.addbtn.setGeometry(QtCore.QRect(465, 232, 70, 30))
self.addbtn.setObjectName("addbtn")
self.closebtn = QtWidgets.QPushButton(addSPOBDialog)
self.closebtn.setGeometry(QtCore.QRect(555, 232, 70, 30))
self.closebtn.setObjectName("closebtn")
self.listView = QtWidgets.QListWidget(addSPOBDialog)
self.listView.setGeometry(QtCore.QRect(8, 30, 360, 230))
self.listView.setObjectName("listView")
self.line_2 = QtWidgets.QFrame(addSPOBDialog)
self.line_2.setGeometry(QtCore.QRect(374, 3, 3, 260))
self.line_2.setFrameShape(QtWidgets.QFrame.VLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.label_5 = QtWidgets.QLabel(addSPOBDialog)
self.label_5.setGeometry(QtCore.QRect(521, 168, 40, 13))
self.label_5.setObjectName("label_5")
self.resonled = QtWidgets.QLineEdit(addSPOBDialog)
self.resonled.setGeometry(QtCore.QRect(387, 189, 330, 20))
self.resonled.setObjectName("resonled")
self.line_3 = QtWidgets.QFrame(addSPOBDialog)
self.line_3.setGeometry(QtCore.QRect(379, 221, 340, 3))
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
for item in select_all_spare_parts():
self.listView.addItem(item.gen_code + " - " + item.name + '({})'.format(item.code))
self.listView.itemClicked.connect(self.Clicked)
self.addbtn.clicked.connect(self.do_add)
self.closebtn.clicked.connect(self.close)
self.retranslateUi(addSPOBDialog)
QtCore.QMetaObject.connectSlotsByName(addSPOBDialog)
def retranslateUi(self, addSPOBDialog):
_translate = QtCore.QCoreApplication.translate
addSPOBDialog.setWindowTitle(_translate("addSPOBDialog", "Add Spare Part Outbound"))
self.label.setText(_translate("addSPOBDialog", "Select Spare Part From List :"))
self.label_2.setText(_translate("addSPOBDialog", "Selected Spare Part :"))
self.label_3.setText(_translate("addSPOBDialog", "Code :"))
self.label_4.setText(_translate("addSPOBDialog", "Inventory QTY:"))
self.reqqtyled.setText(_translate("addSPOBDialog", "How mauch qty you want ?"))
self.label_6.setText(_translate("addSPOBDialog", "System Code :"))
self.addbtn.setText(_translate("addSPOBDialog", "Add"))
self.closebtn.setText(_translate("addSPOBDialog", "Close"))
self.label_5.setText(_translate("addSPOBDialog", "Reason"))
def Clicked(self, item):
self.addbtn.setEnabled(True)
code = before(item.text(), '-')
if select_spare_parts_bycode(code):
rawMat = select_spare_parts_bycode(code)
self.spnameled.setText(rawMat.name)
self.spcodeled.setText(rawMat.code)
self.spinqtyled.setText(str(rawMat.inv_qty))
self.spgencodeled.setText(rawMat.gen_code)
return rawMat
def do_add(self):
datetimestr = datetime.now()
timestampstr = datetimestr.strftime('%Y-%m-%d %H:%M:%S')
code = self.spgencodeled.text()
rawmat = select_spare_parts_bycode(code)
qty = self.reqqtyled_2.text()
reas = self.resonled.text()
if qty != '' or reas != '':
if type(self.obj) == Employees:
add_outbound(OutBCode(), timestampstr, reas, None, self.obj.id, None
, rawmat.id, None, None, qty, 1)
if type(self.obj) == Customers:
add_outbound(OutBCode(), timestampstr, reas, self.obj.id, None, None
, rawmat.id, None, None, qty, 1)
decreaseSparePartsInvQty(rawmat, int(qty))
self.close()
# print(str(OutBCode()))
def before(value, a):
# Find first part and return slice before it.
pos_a = value.find(a)
if pos_a == -1: return ""
return value[0:pos_a]
# if __name__ == '__main__':
# app = QtWidgets.QApplication(sys.argv)
# myapp = Ui_addSPOBDialog()
# myapp.exec_()
| [
"[email protected]"
] | |
d5a848de74885296fc87f332d593ba4699797367 | ad1721c1a408d699b71d0dd1c89b2628b4011d61 | /TQC+ 網頁資料擷取與分析 第1類:資料處理能力/103 勞保投保薪資分級表/PYD01_原.py | a0f9ca0c87f201c6c00fb01abed06a35260dadb8 | [] | no_license | eclairsameal/TQC-Python | 426267ee0b1fd1aace78c7c8d49cc4907a399711 | 73ebb7dd905c59fe67240d0fa5a3381a6e72236b | refs/heads/master | 2021-10-27T17:26:10.214806 | 2021-10-21T05:24:40 | 2021-10-21T05:24:40 | 207,247,067 | 3 | 5 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | # 載入 yaml 與 json 模組
import ___
import ___
# 讀取 json 檔案
with ___("___", encoding='utf-8-sig') as file:
data = ___.___(___)
# 寫入 yaml 檔案
with ___("___", "___", encoding="utf-8") as f:
___.___(data, f, default_flow_style=False, allow_unicode=True)
| [
"[email protected]"
] | |
5f90e28c9ccadb4bcc73abb716a803b9e970acfd | 462c0aa6751a720c0a53f6215271adb1aeab8e6a | /src/builtins/networking/LearningSelect/01_select_echo_server.py | 9651302b04efc7bc80ece537aed513a9976eb998 | [] | no_license | SugarP1g/LearningPython | 9cddfb657c5d9139f7159c95e0ab2bcd7558efed | 357d2d445c6d9da96b7a69414249f41aac4d0b2b | refs/heads/master | 2023-07-14T01:22:24.976941 | 2021-08-12T10:48:31 | 2021-08-12T10:48:31 | 266,500,311 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,974 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import select
import socket
import sys
import queue
# 创建 TCP/IP 套接字
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setblocking(False)
# 绑定套接字到端口
server_address = ('localhost', 10000)
print('starting up on {} port {}'.format(*server_address),
file=sys.stderr)
server.bind(server_address)
# 监听即将到来的连接
server.listen(5)
# 我们想读的套接字
inputs = [server]
# 我们想写的套接字
outputs = []
# 消息传出队列 格式:(socket:Queue)
message_queues = {}
while inputs:
# 等待至少有一个套接字准备好了进行后续处理。
print('waiting for the next event', file=sys.stderr)
readable, writable, exceptional = select.select(inputs, outputs, inputs)
# inputs 处理
for s in readable:
if s is server:
# 可读的套接字需要准备好接收连接。
connection, client_address = s.accept()
print(' connection from', client_address, file=sys.stderr)
connection.setblocking(0)
inputs.append(connection)
# 把我们想发送的数据队列给它。
message_queues[connection] = queue.Queue()
else:
data = s.recv(1024)
if data:
# 一个有数据的可读客户端
print(' received {!r} from {}'.format(
data, s.getpeername()), file=sys.stderr,
)
message_queues[s].put(data)
# 添加到输出列表用来做响应
if s not in outputs:
outputs.append(s)
else:
# 空结果表明要关闭连接
print(' closing', client_address,
file=sys.stderr)
# 停止监听该链接的输入
if s in outputs:
outputs.remove(s)
inputs.remove(s)
s.close()
# 删除这个消息队列
del message_queues[s]
# outputs 处理
for s in writable:
try:
next_msg = message_queues[s].get_nowait()
except queue.Empty:
# 没有消息在等待,我们要关闭掉。
print(' ', s.getpeername(), 'queue empty',
file=sys.stderr)
outputs.remove(s)
else:
print(' sending {!r} to {}'.format(next_msg,
s.getpeername()),
file=sys.stderr)
s.send(next_msg)
# 处理 「异常状况」
for s in exceptional:
print('exception condition on', s.getpeername(),
file=sys.stderr)
# 停止监听此连接的输入。
inputs.remove(s)
if s in outputs:
outputs.remove(s)
s.close()
# 移除此消息队列。
del message_queues[s]
| [
"[email protected]"
] | |
6e9b18b2343f1cb01696192bc50ddc51abe670fd | b5b888fd9cfa17c9a3ef6526cd9ffc476f939da4 | /class7/lc2.py | 0295b6a8b941d808d52230b36243ee7c81fa7236 | [] | no_license | javatican/migulu_python | 77fd65754d30da699f35f800cbd5de8e73e35c64 | f1adecf02f7beed8313fb72bf786d292fc733d34 | refs/heads/master | 2020-12-30T14:33:40.239905 | 2017-05-27T02:52:37 | 2017-05-27T02:52:37 | 91,322,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | x = [1,2,3,4,5,6]
y = ['a','b','c','e','f','e']
z = []
for a in x:
for b in y:
z.append((a,b))
print(z)
z = [(a,b) for a in x for b in y]
print(z) | [
"[email protected]"
] | |
6c5f0c0fd3064f9bf780135957a6f660e066f185 | da25cac354ce2de39c07ef2fc6ef45b4d3c3cea8 | /source/main.py | fb63179c98c6d8e4a7ff7f5bd9de8440e93b4ed4 | [] | no_license | jwierzb/KINDERGARTEN-DECISSION-PREDICTION | 0301cee6bf33bf6f432a1ae106c13419248cdeeb | f12bc860b8183ec377bd107de3d4949603e676e0 | refs/heads/master | 2020-04-09T08:06:51.330702 | 2018-12-04T04:36:01 | 2018-12-04T04:36:01 | 160,182,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,297 | py | import pandas as pd
from c50.c50 import C50
# Config
summary_txt = '../result/summary.txt'
result_txt = '../result/prediction_result.txt'
result_csv = '../result/prediction_result.csv'
training_data = "../data/train.csv"
test_data = "../data/test.csv"
plot_file = "../result/tree.png"
accTest = False
trials = 1
subset = 0.9 # coefficient of subset size for acc test
# Import data
print("Loading training dataset\n")
dataFrame = pd.read_csv(training_data)
# Replace ? values with null (for rpy2)
dataFrame.replace({"?": None})
# if working on a sample of data
if accTest:
print('Working on smaller set of data (rest goes for accuracy test)\n')
dataFrame = dataFrame[:int(len(dataFrame)*subset)]
acc_set = dataFrame[int(len(dataFrame)*subset):]
subset = 1 # if acc test so we are working on smaller subset
# Divide dataframe into values and labels
Xtrain = dataFrame.drop(dataFrame.columns[len(dataFrame.columns)-1], axis=1) # Data
Ytrain = dataFrame.loc[:, dataFrame.columns[len(dataFrame.columns)-1]] # Labels
# Defining classifier
classifier = C50(Xtrain, Ytrain)
# Training classifier
print(f"Training: Training sample part = {subset}, trials = {trials}\n")
classifier.train(trials=trials, subset=subset)
# Print C50 package summary about decission tree
print(f"Printing summary and saving to file /{summary_txt}\n")
classifier.print_summary(summary_txt)
# Defining test dataframe
print("Loading test set\n")
X = pd.read_csv(test_data)
# Save result of prediction on pd.read_csv('../data/test.csv') dataframe (just predicted values)
print(f"Saving predicted values in {result_txt}\n")
classifier.predict_and_save(X, result_txt)
# Save result of prediction with predicting rows on csvread_csv('../data/test.csv') dataframe
print(f"Saving predicted values with predicting rows in {result_csv}\n")
classifier.predict_to_csv(X, result_csv)
# Accuracy test
if accTest:
Xtest = acc_set.drop(acc_set.columns[len(acc_set.columns)-1], axis=1) # Data
Xtest = Xtest.reset_index(drop=True)
Ytest = acc_set.loc[:, acc_set.columns[len(acc_set.columns)-1]].values # Labels
Y = classifier.predict(Xtest)
count = 0
for i in range(0, len(Y)):
if Y[i] == str(Ytest[i]):
count += 1
print(f"Good/bad {count/(len(Y))} coefficient") | [
"[email protected]"
] | |
eab94ff6951109df883220329881304e35f63c38 | b8f11e4e49405765153adf0e0cfb63947e66a87f | /brb/__init__.py | 9ab16122ba1b0a2f058027c6afca4c736342ec71 | [
"MIT"
] | permissive | cyberjunky/binance-report-bot | 6bed327a76af84c60d07e98fd4e1f2663cdb0f52 | 04d0f06e814d934ffe3684025875731efd655171 | refs/heads/main | 2023-07-17T21:02:37.613312 | 2021-09-03T07:29:47 | 2021-09-03T07:29:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,057 | py | import logging
import datetime as dt
import apprise
import brb.conf as conf
import os
log_level = logging.DEBUG if os.environ.get("BRB_DEBUG") is not None else logging.WARNING
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=log_level
)
logger = logging.getLogger(__name__)
notifier = None
if len(conf.APPRISE_URLS) > 0:
notifier = apprise.Apprise()
for url in conf.APPRISE_URLS:
notifier.add(url)
class ErrorAppriseNotifier(logging.Handler):
def __init__(self):
"""
Logging Handler that sends an apprise notification when an error is raised
"""
super().__init__()
def emit(self, record):
if record.levelno >= 30: # warning, critical or error
date = str(dt.datetime.fromtimestamp(int(record.created)))
notifier.notify(
body=f"```{date} - {record.name} - {record.levelname} - {record.msg}```",
)
logger.addHandler(ErrorAppriseNotifier())
| [
"[email protected]"
] | |
079f1b1704ab3757510601469e97519e3f907396 | 9b84b5709b900085d0a735f2275b5fd755848d48 | /account/admin.py | 1e2be640bc916a52f6c1f542f127155d8bec8092 | [] | no_license | AzhKay/blog | 97d0ff438c91e49d2ba81f05daad9e8d1e4395a2 | 4cbf0c5fb046adcec0cf08ee738b6902d37fd64f | refs/heads/master | 2023-03-21T20:20:57.782671 | 2021-03-18T13:16:33 | 2021-03-18T13:16:33 | 349,057,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | from django.contrib import admin
from account.models import MyUser
admin.site.register(MyUser)
| [
"[email protected]"
] | |
1370beb4af9883bbc46059d5e9d0b395c36a094f | 8fa5521679cf0cb3c558240085e605a93486cfc6 | /navigation/voxel_grid/catkin_generated/pkg.installspace.context.pc.py | 8beb527fa184e0eea8142ed377e577de45760ba2 | [
"MIT"
] | permissive | mrsd16teamd/loco_car | 3c89e1f13af7810c293f4589d959ce371e6a969a | 36e4ed685f9463ad689ca72eec80e0f05f1ad66c | refs/heads/master | 2021-05-03T23:55:45.176412 | 2020-11-24T16:41:02 | 2020-11-24T16:41:02 | 71,826,846 | 52 | 23 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/usr/local/include".split(';') if "/usr/local/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lvoxel_grid".split(';') if "-lvoxel_grid" != "" else []
PROJECT_NAME = "voxel_grid"
PROJECT_SPACE_DIR = "/usr/local"
PROJECT_VERSION = "1.13.1"
| [
"[email protected]"
] | |
f18ecf944e7bcbec951b822078d00c18162f7e3e | 161c4d331f97e6c082b5d2d7c5ecd4a5341a365d | /final_test.py | 506f6620fe5d644864914f561ff97adcfc7fe4cb | [] | no_license | 02san02/Cancer-Prediction | e4d285335936745e79f591db608c846a115e5c30 | 002e8172658a849dc0a83a6759b6aabd240a7676 | refs/heads/main | 2023-04-06T00:06:01.413625 | 2021-04-27T09:20:10 | 2021-04-27T09:20:10 | 319,929,528 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,804 | py | from tensorflow.keras.models import model_from_json
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import argparse
# Input the following arguments
parser = argparse.ArgumentParser()
parser.add_argument("-m1", help="Path with name of json and h5 file", type=str) # Example, "data/model1", not specifying the extensions. Make sure both the files have the same name
parser.add_argument("-m2", help="Path with name of json and h5 file", type=str) # Example, "data/model2", not specifying the extensions. Make sure both the files have the same name
parser.add_argument("-t", help="Folder path where all the tiles to be tested are present", type=str)
args = parser.parse_args()
m1, m2, t = args.m1, args.m2, args.t
if not m1:
raise("Model1 path not specified")
if not m2:
raise("Model2 path not specified")
if not t:
raise("Test folder path not specified")
# Function used to load the pretained model
def load_model(path=""):
json_file = open(path+".json", 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights(path+".h5")
print("Model loaded successfully")
return loaded_model
def final_evaluate(model1, model2, path="", folder_name=""):
# path is the directory where the folder_name named folder is present and folder_name has the tiles which are to be evaluated
# model1 => [normal, cancer]
# model2 => [luad, lusc]
# Generating the generator for loading the test slides
datagen = ImageDataGenerator(rescale=1/255)
test = datagen.flow_from_directory(path, target_size=(224, 224), batch_size=1, classes=[folder_name], class_mode=None, shuffle=False)
# Predicting the probability of tile for each model
test.reset()
p1 = model1.predict(test, verbose=1, max_queue_size=200, workers=200)
test.reset()
p2 = model2.predict(test, verbose=1, max_queue_size=200, workers=200)
# calculating the percentage of each classes
c1 = 0
c2 = 0
c3 = 0
c4 = 0
for i in range(p1.shape[0]):
if(p1[i][0] > p1[i][1]):
c1 += 1
else:
c2 += 1
if(p2[i][0] > p2[i][1]):
c3 += 1
else:
c4 += 1
print("The precentage of cancer is ", c2/(c1+c2))
print(["luad", "lusc"][c3 < c4], "cancer detected with", c3/(c3+c4) if c3>c4 else c4/(c3+c4), "probability")
return [c1, c2], [c3, c4]
t = t if t[-1]!="/" else t[:-1]
# Loading both the models
model1 = load_model(m1)
model2 = load_model(m2)
path = ""
for i in t.split("/")[:-1]:
path += i+"/"
# Predicting the final probabilities
print(final_evaluate(model1, model2, path, t.split("/")[-1]))
| [
"[email protected]"
] | |
fb4055ed05dc497e1fbf506797c4d8371e6725f6 | bd053d2bf5444ab8f0b8b0ff56772fa75281e38d | /qchem/tests/test_observable.py | 09b74d615188f94b288e790d8fe1d3a885eb13cd | [
"Apache-2.0"
] | permissive | johannesjmeyer/pennylane | bcb762583e95537b04a9b38756369571f957d2e5 | 8f602312baea107d5248267fb3dc1593722810e0 | refs/heads/master | 2023-07-11T18:21:31.086858 | 2021-08-14T19:21:42 | 2021-08-14T19:21:42 | 341,190,636 | 3 | 1 | Apache-2.0 | 2021-06-16T09:01:58 | 2021-02-22T12:19:10 | Python | UTF-8 | Python | false | false | 4,359 | py | import os
import numpy as np
import pytest
from pennylane import qchem
from openfermion import FermionOperator, QubitOperator
t = FermionOperator("0^ 0", 0.5) + FermionOperator("1^ 1", -0.5)
v = (
FermionOperator("0^ 0^ 0 0", 0.25)
+ FermionOperator("0^ 1^ 1 0", -0.25)
+ FermionOperator("1^ 0^ 0 1", -0.5)
)
v1 = (
FermionOperator("0^ 0^ 0 0", 0.25)
+ FermionOperator("0^ 1^ 1 0", -0.25)
+ FermionOperator("0^ 2^ 2 0", 0.25)
+ FermionOperator("0^ 3^ 3 0", -0.25)
+ FermionOperator("1^ 0^ 0 1", -0.25)
+ FermionOperator("2^ 0^ 0 2", 0.25)
)
v2 = (
FermionOperator("0^ 0^ 0 0", 0.5)
+ FermionOperator("0^ 1^ 1 0", -0.25)
+ FermionOperator("0^ 2^ 2 0", 0.5)
+ FermionOperator("0^ 3^ 3 0", -0.25)
+ FermionOperator("1^ 0^ 0 1", -0.25)
+ FermionOperator("2^ 0^ 0 2", -0.25)
)
@pytest.mark.parametrize(
("fermion_ops", "init_term", "mapping", "terms_exp"),
[
(
[t, v],
1 / 4,
"bravyi_KITAEV",
{
(): (0.0625 + 0j),
((0, "Z"),): (-0.0625 + 0j),
((0, "Z"), (1, "Z")): (0.4375 + 0j),
((1, "Z"),): (-0.1875 + 0j),
},
),
(
[t, v],
1 / 4,
"JORDAN_wigner",
{
(): (0.0625 + 0j),
((0, "Z"),): (-0.0625 + 0j),
((1, "Z"),): (0.4375 + 0j),
((0, "Z"), (1, "Z")): (-0.1875 + 0j),
},
),
(
[t],
1 / 2,
"JORDAN_wigner",
{(): (0.5 + 0j), ((0, "Z"),): (-0.25 + 0j), ((1, "Z"),): (0.25 + 0j)},
),
(
[t],
0,
"JORDAN_wigner",
{((0, "Z"),): (-0.25 + 0j), ((1, "Z"),): (0.25 + 0j)},
),
(
[v1],
1 / 2,
"JORDAN_wigner",
{
(): (0.4375 + 0j),
((1, "Z"),): (0.125 + 0j),
((0, "Z"), (1, "Z")): (-0.125 + 0j),
((2, "Z"),): (-0.125 + 0j),
((0, "Z"), (2, "Z")): (0.125 + 0j),
((0, "Z"),): (0.0625 + 0j),
((3, "Z"),): (0.0625 + 0j),
((0, "Z"), (3, "Z")): (-0.0625 + 0j),
},
),
(
[v2],
1 / 4,
"bravyi_KITAEV",
{
(): (0.125 + 0j),
((0, "Z"), (1, "Z")): (0.125 + 0j),
((1, "Z"),): (-0.125 + 0j),
((2, "Z"),): (-0.0625 + 0j),
((0, "Z"), (2, "Z")): (0.0625 + 0j),
((1, "Z"), (2, "Z"), (3, "Z")): (0.0625 + 0j),
((0, "Z"), (1, "Z"), (2, "Z"), (3, "Z")): (-0.0625 + 0j),
((0, "Z"),): (0.125 + 0j),
},
),
],
)
def test_observable(fermion_ops, init_term, mapping, terms_exp, custom_wires, monkeypatch):
r"""Tests the correctness of the 'observable' function used to build many-body observables.
The parametrized inputs `terms_exp` are `.terms` attribute of the corresponding
`QubitOperator. The equality checking is implemented in the `qchem` module itself
as it could be something useful to the users as well.
"""
res_obs = qchem.observable(
fermion_ops, init_term=init_term, mapping=mapping, wires=custom_wires
)
qubit_op = QubitOperator()
monkeypatch.setattr(qubit_op, "terms", terms_exp)
assert qchem._qubit_operators_equivalent(qubit_op, res_obs, wires=custom_wires)
msg1 = "Elements in the lists are expected to be of type 'FermionOperator'"
msg2 = "Please set 'mapping' to 'jordan_wigner' or 'bravyi_kitaev'"
@pytest.mark.parametrize(
("fermion_ops", "mapping", "msg_match"),
[
([FermionOperator("0^ 0", 0.5), "notFermionOperator"], "JORDAN_wigner", msg1),
([FermionOperator("0^ 0", 0.5)], "no_valid_transformation", msg2),
],
)
def test_exceptions_observable(fermion_ops, mapping, msg_match):
"""Test that the 'observable' function throws an exception if any element
in the list 'fermion_ops' is not a FermionOperator objector or if the
fermionic-to-qubit transformation is not properly defined."""
with pytest.raises(TypeError, match=msg_match):
qchem.observable(fermion_ops, mapping=mapping)
| [
"[email protected]"
] | |
b669147427df0dff94fdadcb919a8b5140a9a6ef | 9079b9ef5381e72df1b039ebd154c5ab7a2aba01 | /vnpy/app/data_recorder/ui/widget.py | 55e2570b105cf562ee87b23c3f11fc17f533e0f3 | [] | no_license | dxcv/jiangyx_vnpy | 4ffd2034c54721fe3dddf53ab175aebbbae3d388 | f486bb63b1fd6ab6ab7c4f590411f8a5de4d090a | refs/heads/master | 2020-06-21T20:32:28.390512 | 2019-07-05T15:24:31 | 2019-07-05T15:24:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,435 | py | from datetime import datetime
from vnpy.event import Event, EventEngine
from vnpy.trader.engine import MainEngine
from vnpy.trader.ui import QtCore, QtWidgets
from vnpy.trader.event import EVENT_CONTRACT
from ..engine import (
APP_NAME,
EVENT_RECORDER_LOG,
EVENT_RECORDER_UPDATE
)
class RecorderManager(QtWidgets.QWidget):
""""""
signal_log = QtCore.pyqtSignal(Event)
signal_update = QtCore.pyqtSignal(Event)
signal_contract = QtCore.pyqtSignal(Event)
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
super().__init__()
self.main_engine = main_engine
self.event_engine = event_engine
# APP_NAME = "DataRecorder" 从主引擎里获取数据存储引擎对象
self.recorder_engine = main_engine.get_engine(APP_NAME)
self.init_ui()
self.register_event()
self.recorder_engine.put_event()
def init_ui(self):
""""""
self.setWindowTitle("行情记录")
self.resize(1000, 600)
# Create widgets
self.symbol_line = QtWidgets.QLineEdit()
self.symbol_line.setFixedHeight(
self.symbol_line.sizeHint().height() * 2)
contracts = self.main_engine.get_all_contracts()
self.vt_symbols = [contract.vt_symbol for contract in contracts]
self.symbol_completer = QtWidgets.QCompleter(self.vt_symbols)
self.symbol_completer.setFilterMode(QtCore.Qt.MatchContains)
self.symbol_completer.setCompletionMode(
self.symbol_completer.PopupCompletion)
self.symbol_line.setCompleter(self.symbol_completer)
add_bar_button = QtWidgets.QPushButton("添加")
add_bar_button.clicked.connect(self.add_bar_recording)
remove_bar_button = QtWidgets.QPushButton("移除")
remove_bar_button.clicked.connect(self.remove_bar_recording)
add_tick_button = QtWidgets.QPushButton("添加")
add_tick_button.clicked.connect(self.add_tick_recording)
remove_tick_button = QtWidgets.QPushButton("移除")
remove_tick_button.clicked.connect(self.remove_tick_recording)
self.bar_recording_edit = QtWidgets.QTextEdit()
self.bar_recording_edit.setReadOnly(True)
self.tick_recording_edit = QtWidgets.QTextEdit()
self.tick_recording_edit.setReadOnly(True)
self.log_edit = QtWidgets.QTextEdit()
self.log_edit.setReadOnly(True)
# Set layout
grid = QtWidgets.QGridLayout()
grid.addWidget(QtWidgets.QLabel("K线记录"), 0, 0)
grid.addWidget(add_bar_button, 0, 1)
grid.addWidget(remove_bar_button, 0, 2)
grid.addWidget(QtWidgets.QLabel("Tick记录"), 1, 0)
grid.addWidget(add_tick_button, 1, 1)
grid.addWidget(remove_tick_button, 1, 2)
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel("本地代码"))
hbox.addWidget(self.symbol_line)
hbox.addWidget(QtWidgets.QLabel(" "))
hbox.addLayout(grid)
hbox.addStretch()
grid2 = QtWidgets.QGridLayout()
grid2.addWidget(QtWidgets.QLabel("K线记录列表"), 0, 0)
grid2.addWidget(QtWidgets.QLabel("Tick记录列表"), 0, 1)
grid2.addWidget(self.bar_recording_edit, 1, 0)
grid2.addWidget(self.tick_recording_edit, 1, 1)
grid2.addWidget(self.log_edit, 2, 0, 1, 2)
vbox = QtWidgets.QVBoxLayout()
vbox.addLayout(hbox)
vbox.addLayout(grid2)
self.setLayout(vbox)
def register_event(self):
""""""
self.signal_log.connect(self.process_log_event)
self.signal_contract.connect(self.process_contract_event)
self.signal_update.connect(self.process_update_event)
self.event_engine.register(EVENT_CONTRACT, self.signal_contract.emit)
self.event_engine.register(
EVENT_RECORDER_LOG, self.signal_log.emit)
self.event_engine.register(
EVENT_RECORDER_UPDATE, self.signal_update.emit)
def process_log_event(self, event: Event):
""""""
timestamp = datetime.now().strftime("%H:%M:%S")
msg = f"{timestamp}\t{event.data}"
self.log_edit.append(msg)
def process_update_event(self, event: Event):
""""""
data = event.data
self.bar_recording_edit.clear()
bar_text = "\n".join(data["bar"])
self.bar_recording_edit.setText(bar_text)
self.tick_recording_edit.clear()
tick_text = "\n".join(data["tick"])
self.tick_recording_edit.setText(tick_text)
def process_contract_event(self, event: Event):
""""""
contract = event.data
self.vt_symbols.append(contract.vt_symbol)
model = self.symbol_completer.model()
model.setStringList(self.vt_symbols)
def add_bar_recording(self):
""""""
vt_symbol = self.symbol_line.text()
self.recorder_engine.add_bar_recording(vt_symbol)
def add_tick_recording(self):
""""""
vt_symbol = self.symbol_line.text()
self.recorder_engine.add_tick_recording(vt_symbol)
def remove_bar_recording(self):
""""""
vt_symbol = self.symbol_line.text()
self.recorder_engine.remove_bar_recording(vt_symbol)
def remove_tick_recording(self):
""""""
vt_symbol = self.symbol_line.text()
self.recorder_engine.remove_tick_recording(vt_symbol)
| [
"[email protected]"
] | |
53c92decd00dbba020c28fb522b2e2f430ebe062 | a530629220ebc747702119f96d896a8dcdd4ed76 | /UniversalSongBarnManager/KrncUsbManager/ffmpeg_filter.py | c9a036e3e7f1e2a6832388afe4759d04381f7a12 | [] | no_license | engineerjoe440/KRNCApps | 59aecedea84cbd4e29f5c35ed29bea435d231885 | 4b2966a8fad9a372e789c68f8adf13c4eaa26753 | refs/heads/master | 2023-01-19T14:59:55.243348 | 2022-12-31T02:17:40 | 2022-12-31T02:17:40 | 240,826,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,889 | py | """
#######################################################################################
Universal Song Barn (USB) Manager - Tkinter App (powered by PySimpleGUI)
(c) Stanley Solutions - 2020
By: Joe Stanley
#######################################################################################
"""
FILTER_STRING = "{FFMPEG_EXE} -i {IN_PATH} {FILTER} {OUT_PATH}"
BUILTIN_FILTERS = {
"Dirty Compand": """ -filter_complex "compand=attacks=0:points=-80/-900|-45/-15|-27/-9|0/-7|20/-7:gain=5" """,
"Light Compand": """ -filter:a "compand=.3|.3:1|1:-90/-60|-60/-40|-40/-30|-20/-20:6:0:-90:0.2" """,
"Heavy Compand": """ -filter:a "compand=0|0:1|1:-90/-900|-70/-70|-30/-9|0/-3:6:0:0:0" """,
"Dynamic Normalization": """ -filter:a "dynaudnorm" """,
}
# Define Function to Format Command
def format_ffmpeg_command(in_path: str, out_path: str, filter: str,
ffmpeg_binary: str = "ffmpeg"):
"""Format the FFMPEG Filter Command."""
def sanitize_input(param_string, wrap=True):
# Only accept the first portion of any multi-command string
param_string = param_string.split('&&')[0]
param_string = param_string.split(';')[0]
if wrap:
# Wrap with Quotes
if not param_string.startswith('"'):
param_string = '"{}'.format(param_string)
if not param_string.endswith('"'):
param_string = '{}"'.format(param_string)
return param_string
# Sanitize Each of the Parameters
in_path = sanitize_input(in_path)
out_path = sanitize_input(out_path)
filter = sanitize_input(filter, wrap=False)
ffmpeg_binary = sanitize_input(ffmpeg_binary)
# Format the Full Command String
return FILTER_STRING.format(
FFMPEG_BIN = ffmpeg_binary,
IN_PATH = in_path,
FILTER = filter,
OUT_PATH = out_path,
)
| [
"[email protected]"
] | |
2fc13c5e24f996ef12aea05cec25cf9e630682a7 | 2ea4d9a2b615232a226ff6199cf8b0790644e9c6 | /admin.py | 6ad5fc84b6407d361429e9d517781b8a5e13d327 | [] | no_license | akshat131/assignmnet3 | 82828e4459458711b7bae27154565f52e16b6e56 | 7889bfe1abe6ced0eaf957e33a117172a02cb0b6 | refs/heads/master | 2022-04-24T22:36:16.779351 | 2020-04-21T18:52:47 | 2020-04-21T18:52:47 | 257,685,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | from django.contrib import admin
from .models import Tool
admin.site.register(Tool)
# Register your models here.
| [
"[email protected]"
] | |
bdd28110657a94bea5e1ae71f891c8d28993dff8 | 19872c0f261100d3a7a3c770aa16ef719b7f397b | /PythonProjects/myscrapy/douban_movie_top250/douban_movie_top250/run.py | 6c3a5db6db12d6347bb82cf6fa61caf55bc950f1 | [] | no_license | enjoqy/PythonProjects | b7951bd13c32ec40842e8c7f7a4b2a32929d3d8b | ae1a4b1a55a7906bb4dd78e8bd43d19decec48ba | refs/heads/master | 2020-06-20T03:50:32.814009 | 2019-07-15T09:17:50 | 2019-07-15T09:17:50 | 184,964,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from scrapy.cmdline import execute
# 其中name参数为spider的name。
name = 'douban_movie_top250'
cmd = 'scrapy crawl {0} -o douban01.csv'.format(name)
execute(cmd.split())
| [
"[email protected]"
] | |
16762b002adb3beb61929780b03ee206ef4cc61b | 4c24beba868b86a063b6fd5a1ea38d37e82aefd7 | /day1/c2f.py | f173984ee3bb9ce047d98b963113ff63770a9446 | [] | no_license | denisvrdoljak/AetnaCVS_Python_AUG2020 | bf192f47e1ca1e8127a8349f01bc2427887b6cc2 | 3d95b3063217d8daec0f6790884ec4f3365b2b6f | refs/heads/master | 2022-12-13T18:15:27.624825 | 2020-09-04T22:24:42 | 2020-09-04T22:24:42 | 291,823,031 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | #!/usr/bin/env python
# coding: utf-8
print("This is a C' to F' converter.")
temp_in_celsius = input("Please enter a temperature, in C\n--> ")
temp_in_fahrenheit = float(temp_in_celsius) *9/5 + 32
print("The temperature in Fahrenheit is: {:.2f}".format(temp_in_fahrenheit))
| [
"[email protected]"
] | |
c94b2c053a007e87154dc677ea8df2d8d6db02e4 | e63c1e59b2d1bfb5c03d7bf9178cf3b8302ce551 | /uri/uri_python/iniciante/p1038.py | e994b8738d0a17022bb596d11005b2a23996e826 | [] | no_license | GabrielEstevam/icpc_contest_training | b8d97184ace8a0e13e1c0bf442baa36c853a6837 | 012796c2ceb901cf7aa25d44a93614696a7d9c58 | refs/heads/master | 2020-04-24T06:15:16.826669 | 2019-10-08T23:13:15 | 2019-10-08T23:13:15 | 171,758,893 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | valor = input().split(" ")
codigo = int(valor[0])
quantidade = int(valor[1])
preco = [4, 4.5, 5, 2, 1.5]
print("Total: R$ %.2f" % (quantidade*preco[codigo-1]))
| [
"[email protected]"
] | |
2c86b03f9d31f5fff00f548fe967ecad0730641b | 6ed40c41e6f09cb412d2d704abc02f686ba1b4c2 | /Yichuan/mutation.py | 31ecc05c0e3f021cd29af158c4bf68b71ad88dca | [] | no_license | Cpaulyz/- | 92b7f3928c2687bd075941a7e2271dd212351725 | 17655f42f38c58de4b386cb9144d167494d1ad2d | refs/heads/master | 2020-05-05T02:19:58.251946 | 2019-04-08T13:35:10 | 2019-04-08T13:35:10 | 179,633,699 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | import random
def mutation(pop, p_mutation):
popSize = len(pop)
chromSize = len(pop[0])
mutation_pop = []
for i in range(popSize):
if random.random() < p_mutation:
mPoint = random.randint(0, chromSize - 1)
if (pop[i][mPoint] == 1):
pop[i][mPoint] = 0
else:
pop[i][mPoint] == 1
| [
"[email protected]"
] | |
25b5d54b37582f3c67e6aa1a534bb646aba4bd48 | 2b082b009631feca91dbae8560a4a3630e828c37 | /hash.py | e6fbda299e01adedf2fa4e900c83a9facc9c38d5 | [] | no_license | dzittin/hash-and-linked-list-modules | 2ced739596aeb740e769717c004862841b6ff78d | 815fd69c2f26e4843c05bc62a07dd9238a5c191b | refs/heads/master | 2022-12-26T00:03:54.972813 | 2020-10-02T20:15:30 | 2020-10-02T20:15:30 | 298,893,461 | 0 | 0 | null | 2020-10-02T19:58:12 | 2020-09-26T20:21:07 | Python | UTF-8 | Python | false | false | 5,252 | py | import linked_list as llist
from collections import Counter #Needed for diagnosic in bottom part
class my_hash:
def __init__(self, ht_size = 100):
"""Create a hash table object"""
self.ht_size = int(ht_size * 1.0)
if self.ht_size < 10:
self.ht_size = 10
self.hash_table = [None for i in range(0, self.ht_size)]
self.count = 0 # Keys in table
def hash_gen(self, key):
"""Generate a hash table index"""
return hash(key) % self.ht_size
def find_key_value(self, key):
"""Find the value associated with key"""
lst = self.hash_table[self.hash_gen(key)]
if lst != None:
return lst.find_value(key)
return None
def add(self, key, value):
"""Add unique key. Prohibit duplicate keys"""
"""A key must be hashable. A value can be any data type"""
h_index = self.hash_gen(key)
if self.hash_table[h_index] == None: # No list at this index
self.hash_table[h_index] = llist.linked_list(key, value)
self.count += 1
else:
if self.find_key_value(key) == None:
self.hash_table[h_index].add_node(key, value)
self.count += 1
else:
return None # Key already in table
return not None
def get_ht_count(self):
"""Return count of keys in table"""
return self.count
def get_node(self, key):
"""Access an individual key node"""
"""in order to change a value"""
a_list = self.hash_table[self.hash_gen(key)]
if a_list != None:
return a_list.find_node(key)
def change_value(self, key, value):
node = self.get_node(key)
if node != None:
node.value = value
return not None
return None
def increment_value(self, key, incrval=1):
"""If the node value is not type int throw value exception"""
node = self.get_node(key)
if node == None:
return None
if node != None:
if type(node.value) == int:
node.value += incrval
else:
s = "Key='{}', value '{}' cannot be incremented.".format(node.key, node.value)
raise ValueError(s)
def delete_key(self, key):
"""Delete a found key"""
"""If key is deleted, return 'not None'"""
a_list = self.hash_table[self.hash_gen(key)]
if a_list.head == None or a_list.find_node(key) == None:
return None
a_list.delete_node(key)
self.count -= 1
return not None
def grow_ht(self):
"""Grow the table if there are too many collisions"""
"""Tables whose size is at leat 80% of the key count"""
"""tend to peform well where at least 90% of the collisions"""
"""are in lists whose length is 3 or less"""
pass
# #Uncomment from here to bottom to visualize hash table linked list lengths.
# def dump_table_contents(self, full_dump):
# """Dump the contents of a hash table"""
# """For debugging and analysis"""
# counter = Counter()
# for llnkedlst in self.hash_table:
# if llnkedlst != None:
# lst = llnkedlst.print_list()
# list_len = len(lst)
# counter[len(lst)] += 1
# if full_dump:
# print("{} {}".format(list_len, lst))
# cnt_list = [i for i in counter.items()]
# cnt_list.sort()
# return cnt_list
# import random
# def really_ugly_word(char_list):
# """Generate random string lengths to test hashing"""
# s = ""
# word_len = random.randrange(1, 14 + 1)
# for w in range(1, word_len + 1):
# c = random.choice(char_list)
# if c.isprintable() == True and c.isspace() == False:
# s += c
# if len(s) < 1 or len(s) > 14:
# print("Length ", len(s), s)
# return s
# def main():
# n = 1000
# ht = my_hash(int(n))
# chars = [chr(i) for i in range(32, 127)] #Printables
# i = 0
# while i < n:
# w = really_ugly_word(chars)
# if ht.find_key_value(w) == None: # No dupllicates
# ht.add(w, i)
# i += 1
# dump_whole_table = False
# freqs = ht.dump_table_contents(dump_whole_table)
# entries = 0
# lst = []
# for i in freqs:
# stars = "*" * (80 if i[1] > 80 else i[1])
# stars = stars + ("..." if i[1] > 80 else "")
# lst.append(("{:6d} {} ({})".format(i[0], stars, i[1]), i[1]))
# entries += i[0] * i[1]
# print("++++ {} entries\n".format(entries))
# print("Collision list length(len) and frequencies of a given length")
# print("{:>8s} {:>8s} {:^12s} {}".format("%", "cum%", "len", "frequency"))
# print("-" * 50)
# i = 1
# cum = 0
# for f in lst:
# percent = i * f[1] / entries
# cum += percent
# lst = f[0].split()
# print("{:8.1%} {:8.1%} {:^12s} {:s}".format(percent, cum, lst[0], lst[1] + lst[2]))
# i += 1
# if __name__ == "__main__":
# main() | [
"[email protected]"
] | |
dad187dfbdea408c0651f1b9a8f5559f41e1ff90 | 8bfd60469df101c0db242aa7cbc61c3061619f40 | /Basics/transcription.py | 47bd58f702bcc39b4333b868db8447d2668d182a | [] | no_license | Grassporridge/Courses | 9b132bed337e2b75931a4cb49320a0b15655e52e | 6f392b2d561a4d4e8e65c3b0b5daf6b78d94a5d8 | refs/heads/main | 2023-06-26T12:43:35.797701 | 2021-07-28T11:18:28 | 2021-07-28T11:18:28 | 390,085,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,108 | py | # code for DNA -> RNA transcription
# Accepts coding strand and template strand
# (str) -> (str)
def transcription(seq, coding_strand=True):
seq = seq.upper()
if coding_strand:
rna = ['U' if nuc == 'T' else '' if nuc == ' ' or nuc == '\n' else nuc for nuc in seq]
else:
trans_map = {
'A':'U', 'T':'A', 'G':'C', 'C':'G', '-':'-'
}
rna = [trans_map[nuc] if nuc in trans_map.keys() else '' if nuc == ' ' or nuc == '\n'
else nuc for nuc in seq]
if not(set(rna).issubset({'A','U','G','C','-',''})):
raise ValueError("String isn't fully DNA sequence")
rna = "".join(rna)
return(rna)
"""
#test case 1
DNA = 'ATGCATgtca\n agtctagc'
RNA = transcription(DNA)
EXP_OP = 'AUGCAUGUCAAGUCUAGC'
assert RNA == EXP_OP, "base case doesn't work"
#test case 2
DNA = 'ATGCATgtca\n agtctagc'
RNA = transcription(DNA, coding_strand=False)
EXP_OP = 'UACGUACAGUUCAGAUCG'
assert RNA == EXP_OP, "Template strand case doesn't work"
#test case 3 (error check)
DNA = 'ATgagtca shatcgagtcagtacg'
try:
RNA = transcription(DNA)
except ValueError:
print("Error check works")
print("All test cases cleared")
"""
| [
"[email protected]"
] | |
1777f65fb2801fd9da7727e328729586d6c71d9d | f73cd4d6faeb11dba56964335c205ec7d0490303 | /todoapp/api/views.py | 209cd5dfe755c6cc199bdd36eb96e3cfbf171df9 | [
"MIT"
] | permissive | madariyaprakash/TODOAPP | cc0780be84c721b65015be078a19d4723e40e1f5 | b4b0b15e924476a98214110e8d680fe8593f660f | refs/heads/master | 2023-08-07T03:59:09.286401 | 2021-09-27T09:09:55 | 2021-09-27T09:09:55 | 410,797,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,952 | py | from django.shortcuts import render
from api.models import Task
from rest_framework.decorators import api_view
from rest_framework.response import Response
from api.serializers import TaskSerializer
from rest_framework import status
from django.http.response import JsonResponse
@api_view(['GET'])
def api_overview(request):
api_urls = {
'Task List':'/task-list/',
'Create':'/task-create/',
'Update':'/task-update/<str:pk>/',
'Detail':'/tast-detail/<str:pk>/',
'Delete':'/task-delete/<str:pk>/'
}
return Response(api_urls)
@api_view(['GET'])
def task_list(request):
tasks = Task.objects.all()
serializer = TaskSerializer(tasks, many=True)
return JsonResponse(serializer.data, safe=False)
@api_view(['GET'])
def task_detail(request, pk):
try:
task = Task.objects.get(id=pk)
serializer = TaskSerializer(task, many=False)
return JsonResponse(serializer.data, safe=False)
except:
return JsonResponse('Record Not Found', safe=False)
@api_view(['POST'])
def task_update(request, pk):
try:
task = Task.objects.get(id=pk)
serializer = TaskSerializer(instance=task, data=request.data)
if serializer.is_valid():
serializer.save()
return JsonResponse("Updated Successfully!", safe=False)
except:
return JsonResponse("Failed to update!", safe=False)
@api_view(['POST'])
def task_create(request):
serializer = TaskSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return JsonResponse("Created Successfully!", safe=False)
return JsonResponse("Failed to create!", safe=False)
@api_view(['POST'])
def task_delete(request, pk):
try:
task = Task.objects.get(id=pk)
task.delete()
return JsonResponse("Deleted Successfully!", safe=False)
except:
return JsonResponse("Failed to delete!", safe=False) | [
"[email protected]"
] | |
d4f8eae64ade0e39fab8925266724f9e0dd6a137 | f3fb71d50f38f5720358b6e822405f5725ac784d | /dynamic/views.py | 6db7703e5c0bddaea54eb5181822b0962718be4e | [] | no_license | easonfg/self_report | 58ddd1f25d52f99f92d95597644667eaab1304f9 | 38a9a144795091351f53ff085989a99ddefde70f | refs/heads/master | 2021-01-10T12:30:03.199216 | 2016-01-06T23:45:27 | 2016-01-06T23:45:27 | 49,168,542 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | from django.shortcuts import render
from django.core.mail import send_mail
from django.contrib.auth.models import User
from django import forms
from django.shortcuts import redirect
from django.shortcuts import render, get_object_or_404
from .models import Post
from django.utils import timezone
#from .forms import PostForm
#from .forms import MyForm
from django.shortcuts import render
from django.http import HttpResponseRedirect
#from .forms import NameForm, ContactForm, PostForm, js_form
from .forms import js_form
def dynamic_js(request):
if request.method == 'POST':
form = js_form(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return HttpResponseRedirect('/thanks/')
else:
form = js_form()
return render(request, 'test/index.html', {'form': form})
| [
"[email protected]"
] | |
3cab93f0ef0922018403a2652ca9cfee2e7bff9d | 9e4fe71775ca1fc4fd7320d4d65c034c50e3e251 | /tests/config/test_config_snips.py | ba5c71275abcd8c67b9f582df74cc0123fa1409f | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | koenvervloesem/snipskit | 468eed362aba9f7013d437141d9e7f4348c08be4 | 442aa70d423f8371de5f3557b8e2fc6a7c6e19bf | refs/heads/master | 2020-04-28T12:43:21.412408 | 2019-12-06T23:46:36 | 2019-12-06T23:46:36 | 175,284,635 | 12 | 4 | MIT | 2019-05-01T08:03:48 | 2019-03-12T19:40:38 | Python | UTF-8 | Python | false | false | 2,507 | py | """Tests for the `snipskit.config.SnipsConfig` class."""
import pytest
from snipskit.config import SnipsConfig
from snipskit.exceptions import SnipsConfigNotFoundError
from toml import TomlDecodeError
def test_snips_config_default(fs):
"""Test whether a default `SnipsConfig` object is initialized correctly."""
config_file = '/usr/local/etc/snips.toml'
fs.create_file(config_file,
contents='[snips-hotword]\n'
'audio = ["+@mqtt"]\n')
snips_config = SnipsConfig()
assert snips_config.filename == config_file
assert snips_config['snips-hotword']['audio'] == ["+@mqtt"]
def test_snips_config_with_filename(fs):
"""Test whether a `SnipsConfig` object is initialized correctly with a
filename argument."""
config_file = '/usr/local/etc/snips.toml'
fs.create_file(config_file,
contents='[snips-hotword]\n'
'audio = ["+@mqtt"]\n')
snips_config = SnipsConfig(config_file)
assert snips_config.filename == config_file
assert snips_config['snips-hotword']['audio'] == ["+@mqtt"]
def test_snips_config_key_not_found(fs):
"""Test whether accessing a key that doesn't exist in a `SnipsConfig`
object raises a `KeyError`.
"""
config_file = '/usr/local/etc/snips.toml'
fs.create_file(config_file,
contents='[snips-hotword]\n'
'audio = ["+@mqtt"]\n')
snips_config = SnipsConfig()
with pytest.raises(KeyError):
snips_config['snips-hotword']['model']
def test_snips_config_broken_toml(fs):
"""Test whether a `SnipsConfig` object raises `TomlDecodeError` when a
broken TOML file is read.
"""
config_file = '/etc/snips.toml'
fs.create_file(config_file,
contents='[snips-hotword\n'
'audio = ["+@mqtt"]\n')
with pytest.raises(TomlDecodeError):
snips_config = SnipsConfig()
def test_snips_config_file_not_found(fs):
"""Test whether a `SnipsConfig` object raises `FileNotFoundError` when the
specified file doesn't exist.
"""
with pytest.raises(FileNotFoundError):
snips_config = SnipsConfig('/etc/snips.toml')
def test_snips_config_no_config_file(fs):
"""Test whether a `SnipsConfig` object raises `SnipsConfigNotFoundError`
when there's no snips.toml found in the search path.
"""
with pytest.raises(SnipsConfigNotFoundError):
snips_config = SnipsConfig()
| [
"[email protected]"
] | |
06661907f9bd31681bbb4d4dc195018f5158d473 | bdb3f2c3b1181dc62792f34679b26197e01c90cf | /loanchain/lib/python2.7/site-packages/eth_tester/utils/secp256k1.py | 5f457547b5f072920331087858240d2d6b1f6ce8 | [
"MIT",
"LicenseRef-scancode-public-domain"
] | permissive | adithyabsk/loanchain | 8a590a5a2a7fc7707b6c4576baf029fadb99cf04 | 03cc809c5a804af6735acf8e7cd2023930bfc303 | refs/heads/master | 2023-05-08T08:05:27.956496 | 2019-06-23T17:19:26 | 2019-06-23T17:19:26 | 109,518,819 | 5 | 5 | null | 2021-05-24T04:36:13 | 2017-11-04T18:15:25 | Python | UTF-8 | Python | false | false | 1,008 | py | """
Functions lifted from https://github.com/vbuterin/pybitcointools
"""
from eth_utils import (
big_endian_to_int,
int_to_big_endian,
is_bytes,
pad_left,
)
from eth_tester.constants import (
SECPK1_G,
SECPK1_N,
)
from .jacobian import (
fast_multiply,
)
def _pad32(value):
return pad_left(value, 32, b'\x00')
def _encode_raw_public_key(raw_public_key):
left, right = raw_public_key
return b''.join((
_pad32(int_to_big_endian(left)),
_pad32(int_to_big_endian(right)),
))
def private_key_to_public_key(private_key):
if not is_bytes(private_key) or len(private_key) != 32:
raise TypeError("`private_key` must be of type `bytes` and of lenght 32")
private_key_as_num = big_endian_to_int(private_key)
if private_key_as_num >= SECPK1_N:
raise Exception("Invalid privkey")
raw_public_key = fast_multiply(SECPK1_G, private_key_as_num)
public_key = _encode_raw_public_key(raw_public_key)
return public_key
| [
"[email protected]"
] | |
2010bc08025ea8a9fdca00d93a8d362274730e11 | cb23f4ff45fb48b50eaeebd2ed599e546c900b60 | /modules/i2clibraries/i2c_itg3205.py | 599843adc756c9e66520fa86f5b79503c3ba3fef | [] | no_license | lotek93/quadrocopter | 0b29c264468f40251989d3aa778ca3a177f7129b | d2d0c6486fe154a999bb67e68252106407c852dc | refs/heads/master | 2020-04-27T13:03:26.401034 | 2019-03-07T14:53:11 | 2019-03-07T14:53:11 | 174,353,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,943 | py | import math
from i2clibraries import i2c
from time import *
class i2c_itg3205:
WhoAmI = 0x0
SampleRateDivider = 0x15
DLPFAndFullScale = 0x16
InterruptConfig = 0x17
InterruptStatus = 0x1A
TempDataRegisterMSB = 0x1B
TempDataRegisterLSB = 0x1C
GyroXDataRegisterMSB = 0x1D
GyroXDataRegisterLSB = 0x1E
GyroYDataRegisterMSB = 0x1F
GyroYDataRegisterLSB = 0x20
GyroZDataRegisterMSB = 0x21
GyroZDataRegisterLSB = 0x22
PowerManagement = 0x3E
# DLPF, Full Scale Setting
FullScale_2000_sec = 0x18 # must be set at reset
DLPF_256_8 = 0x00# Consult datasheet for explanation
DLPF_188_1 = 0x01
DLPF_98_1 = 0x02
DLPF_42_1 = 0x03
DLPF_20_1 = 0x04
DLPF_10_1 = 0x05
DLPF_5_1 = 0x06
# Power Management Options
PM_H_Reset = 0x80 # Reset device and internel registers to power-up-default settings
PM_Sleep = 0x40 # Enables low power sleep mode
PM_Standby_X = 0x20 # Put Gyro X in standby mode
PM_Standby_Y = 0x10 # Put Gyro Y in standby mode
PM_Standby_Z = 0x08 # Put Gyro Z in standby mode
PM_Clock_Internal = 0x00 # Use internal oscillator
PM_Clock_X_Gyro = 0x01
PM_Clock_Y_Gyro = 0x02
PM_Clock_Z_Gyro = 0x03
PM_Clock_Ext_32_768 = 0x04
PM_Clock_Ext_19_2 = 0x05
# Interrupt Configuration
IC_IntPinActiveLow = 0x80
IC_IntPinOpen = 0x40
IC_LatchUntilIntCleared = 0x20
IC_LatchClearAnyRegRead = 0x10
IC_IntOnDeviceReady = 0x04
IC_IntOnDataReady = 0x01
# Address will always be either 0x68 (104) or 0x69 (105)
def __init__(self, port, addr=0x69):
self.bus = i2c.i2c(port, addr)
self.setPowerManagement(0x00)
self.setSampleRateDivider(0x07)
# self.setSampleRateDivider(0x0)
self.setDLPFAndFullScale(self.FullScale_2000_sec, self.DLPF_188_1)
# self.setDLPFAndFullScale(self.FullScale_2000_sec, self.DLPF_256_8)
self.setInterrupt(self.IC_LatchUntilIntCleared, self.IC_IntOnDeviceReady, self.IC_IntOnDataReady)
def setPowerManagement(self, *function_set):
self.setOption(self.PowerManagement, *function_set)
def setSampleRateDivider(self, divider):
self.setOption(self.SampleRateDivider, divider)
def setDLPFAndFullScale(self, *function_set):
self.setOption(self.DLPFAndFullScale, *function_set)
def setInterrupt(self, *function_set):
self.setOption(self.InterruptConfig, *function_set)
def setOption(self, register, *function_set):
options = 0x00
for function in function_set:
options = options | function
self.bus.write_byte(register, options)
# Adds to existing options of register
def addOption(self, register, *function_set):
options = self.bus.read_byte(register)
for function in function_set:
options = options | function
self.bus.write_byte(register, options)
# Removes options of register
def removeOption(self, register, *function_set):
options = self.bus.read_byte(register)
for function in function_set:
options = options & (function ^ 0b11111111)
self.bus.write_byte(register, options)
def getWhoAmI(self):
whoami = self.bus.read_byte(self.WhoAmI)
return whoami
def getDieTemperature(self):
temp = self.bus.read_s16int(self.TempDataRegisterMSB)
temp = round(35 + (temp + 13200) / 280, 2)
return temp
def getInterruptStatus(self):
(reserved, reserved, reserved, reserved, reserved, itgready, reserved, dataready) = self.getOptions(self.InterruptStatus)
return (itgready, dataready)
def getOptions(self, register):
options_bin = self.bus.read_byte(register)
options = [False, False, False, False, False, False, False, False]
for i in range(8):
if options_bin & (0x01 << i):
options[7 - i] = True
return options
def getAxes(self):
gyro_x = self.bus.read_s16int(self.GyroXDataRegisterMSB)
gyro_y = self.bus.read_s16int(self.GyroYDataRegisterMSB)
gyro_z = self.bus.read_s16int(self.GyroZDataRegisterMSB)
return (gyro_x, gyro_y, gyro_z)
def getDegPerSecAxes(self):
(gyro_x, gyro_y, gyro_z) = self.getAxes()
return (gyro_x / 14.375, gyro_y / 14.375, gyro_z / 14.375) | [
"[email protected]"
] | |
703935a75fe4bce80e7ed211280e8d8618c1a11e | f4af8ad91f3bfa07dbf1ff77be34e52b075a72fb | /python_work/appium_work4/page/black_handle.py | 24a2f6e0fad49ecce2d5d74fd583c5fe938ff3b3 | [] | no_license | nuannanxiaofeige/HogwartsLG5 | 70237171452a19b85be5536c015974f755319de2 | 10e84fe4f45772f74839af91bf30fbfdfb7e7b84 | refs/heads/master | 2023-03-16T12:37:55.585440 | 2021-02-28T08:08:28 | 2021-02-28T08:08:28 | 322,071,179 | 0 | 0 | null | 2020-12-16T18:40:32 | 2020-12-16T18:40:32 | null | UTF-8 | Python | false | false | 868 | py | # -*- coding:utf-8 -*-
import yaml
def black_handle(fun):
def run (*args,**kwargs):
instance=args[0]
with open("../data/blacklist.yaml","r",encoding="utf-8") as f:
black_lists= yaml.load(f)
# 捕获异常
try:
return fun(*args,**kwargs)
except Exception as e:
# 遍历黑名单
for black in black_lists:
# 如果发现黑名单中的元素存在
eles = instance.driver.find_elements(*black)
# 对黑名单的元素进行处理
if len(eles) > 0:
# 通过点击的方式关闭
eles[0].click()
# 再次查找
return fun(*args,**kwargs)
raise e
return run
| [
"[email protected]"
] | |
8056393586b0457cafedcfb0b3a25a9483302185 | 268c458151b99f6ad1c462a773fc51979d954992 | /Dental_hygiene_RPG_V4.py | d22eda7b9b52aa87626cee3c1d84d6e9560c2ada | [] | no_license | JarvisWarnockOnslow/12DTC-Iterative-Project | f008f8d51f05b3eec9bee5c917d4427a93252c8f | b7da1698a6904da805005008c24e7a0feb867b91 | refs/heads/master | 2020-06-24T09:52:31.339278 | 2019-09-26T19:43:26 | 2019-09-26T19:43:26 | 198,932,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,359 | py | ##
# Dental_hygiene_RPG.py
# Author: Jarvis Warnock
# A game to help promote dental hygiene in the form of an RPG
# Created: 03/07/19
def enemy_turn(player_health):
player_health = player_health
choice = random.randint(1,4)
chance = random.randint(1,4)
if choice == 1:
player_health -= 10
print("The evil wizard used plaque (-10hp)\n")
elif choice == 2:
player_health -= 15
print("The evil wizard used unhealthy food (-15hp)\n")
elif choice == 3:
if chance > 2 and chance <= 4:
player_health -= 20
print("The evil wizard used bleeding gums (-20hp)\n")
else:
print("The evil wizard tried to give you bleeding gums, and failed\n")
elif choice == 4:
if chance == 4:
player_health -= 25
print("The evil wizard used Holes in your teeth (-25hp)\n")
else:
print("The evil wizard tried to put holes in your teeth, and failed\n")
time.sleep(1)
return player_health
def player_turn(player_health, enemy_health):
player_health = player_health
repeat = True
while repeat == True:
turn_option = input("""What would you like to do?:
(A)Attack
(H)Heal
""").title()
# If the player wants to attack
if turn_option == 'A':
chance = random.randint(1,4)
attacks = input("""What attack would you like to use?
(1)Flossing - 10hp - 75% chance
(2)Brushing - 5hp - 100% chance
(3)Eat Healthy - 20hp - 25% chance
(4)Mouthwash - 15hp - 50% chance
""")
#Flossing attack
if attacks == '1':
if chance > 0 and chance < 4:
enemy_health -= 10
print("Your flossing worked (-10hp)\n")
elif chance == 4:
enemy_health -= 15
print("Your flossing worked and it was very effective (-15hp)\n")
else:
print("Your flossing did not work\n")
repeat = False
#Brushing attack
elif attacks == '2':
if chance == 4:
enemy_health -= 10
print("Your brushing worked and it was very effective (-10hp)\n")
else:
enemy_health -= 5
print("Your brushing worked (-5hp)\n")
repeat = False
#Eating healthy attack
elif attacks == '3':
if chance == 4:
enemy_health -= 20
print("Eating healthy worked (-20hp)\n")
else:
print("Eating healthy did not work\n")
repeat = False
#Mouthwash attack
elif attacks == '4':
if chance > 2 and chance <= 4:
enemy_health -= 15
print("Using mouthwash worked (-15hp)\n")
elif chance == 4:
enemy_health -= 20
print("Using mouthwash worked and it was very effective (-20hp)\n")
else:
print("Using mouthwash did not work\n")
repeat = False
else:
print("Please enter a valid option\n")
# If the player wants to heal
elif turn_option == 'H':
heal = input("""How would you like to heal?
(1)Get a new toothbrush - 15hp - 50% chance
(2)Go to the dentist - 5hp - 100% chance
""")
if heal == '2':
print("You went to the dentist (+5hp)\n")
player_health += 5
repeat = False
elif heal == '1':
chance = random.randint(1,2)
if chance == 1:
player_health += 15
print("The toothbrush worked (+15hp)\n")
else:
print("The tootbrush did not help\n")
repeat = False
else:
print("Please enter a valid option\n")
time.sleep(1)
return(player_health, enemy_health)
def main():
option = ""
# Tells the user a small description of what the game is about
print("In this game you will be tasked with stopping an evil wizard who is trying to bring bad dental hygiene upon the world. Use these special powers I am giving you to stop him!")
while option != "Quit":
option = input("Would you like to: (P) Play, (H) How to Play, or (Q) Quit\n").title()
if option == "P":
enemy_health = 10
player_health = 100
points = 0
enemy_no = 1
highscore = 0
while player_health > 0:
print("Your health: {}".format(player_health))
print("Enemy health: {}\n".format(enemy_health))
print("Enemy Number: {}".format(enemy_no))
print("Points: {}".format(points))
print("Highscore: {}".format(highscore))
print("<<>><<>><<>><<>><<>><<>><<>><<>><<>><<>><<>><<>><<>><<>><<>><<>>\n")
player_health, enemy_health = player_turn(player_health, enemy_health)
player_health = enemy_turn(player_health)
if enemy_health <= 0:
enemy_health = 10 + (enemy_no * 5)
enemy_no += 1
points += 10 * enemy_health
if player_health <= 0:
print("You have been defeated by the evil dental forces. Your score was {}. Why not play again?".format(points))
time.sleep(1)
print("Battles like this happen everyday within our lives. Make sure that you brush your teeth, floss, eat health foods, and go to the dentist to make sure that you have a healthy life.")
if highscore > score:
highscore = score
time.sleep(5)
elif option == "H":
print("""How to Play:
This is a turn based game where you will chose an action to preform such as attack or heal.
You will have to defeat the horde of evil minions who are trying to spread bad dental hygiene.
The enemies will get harder as you go but the harder then enemy, the more points you get, so get out ther and try get a highscore!""")
time.sleep(2)
elif option == "Q":
while option == "Q":
check = "N"
# Confirms if the user want to quit the program
while check != "Yes":
check = input("Are you sure that you want to quit the program? (Y/N)").upper()
if check == "N":
option = ""
check = "Yes"
elif check == "Y":
check = "Yes"
option = "Quit"
else:
print("Please enter 'Y' or 'N'")
else:
print("Please enter a valid option")
import random
import time
main()
| [
"[email protected]"
] | |
e05b8353c40489490a1f5c9706da4cc8ba65d448 | d3e306c55ddc38b31a9500d673f1223927564935 | /tutorial-simpy/car_example1.py | 1f3e4375cba38006835500f116745aa5af5f5d3d | [] | no_license | italocampos/simulacao-discreta | 292a38c81005bc82ed02e389f745b38ab2cd4e18 | 2b0099b9ef5f52451ac655b23174829e519b35a9 | refs/heads/master | 2020-03-21T13:32:42.486927 | 2019-09-11T00:47:35 | 2019-09-11T00:47:35 | 138,611,519 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | # First one simulation
import simpy
def carro(ambiente):
while True:
print('Estacionando em {}'.format(ambiente.now))
duracao_estacionamento = 5
yield ambiente.timeout(duracao_estacionamento)
print('Dirigindo em {}'.format(ambiente.now))
duracao_direcao = 2
yield ambiente.timeout(duracao_direcao)
ambiente = simpy.Environment()
processo = carro(ambiente)
ambiente.process(processo)
ambiente.run(until = 15) | [
"[email protected]"
] | |
98e5bb02b2f1e5c29f9b110dae3b25cd10b004f1 | d75703c2083dfc508c5608c4c35167b67d1a4308 | /2nd Chapter/graphTwo.py | 13107201655ba9be1fc0423142010b1927106346 | [] | no_license | vubon/Python-core | e8159763d281152a1b64da3a0534899fd3def2b5 | a415ef3c6159f0c85afa3240a762a00b2c68bd02 | refs/heads/master | 2020-07-03T17:08:10.091827 | 2016-12-09T19:26:51 | 2016-12-09T19:26:51 | 67,540,882 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | import networkx as nx
G=nx.Graph()
G.add_node("A")
G.add_node("B")
G.add_none("C")
G.add_edge("A","B")
G.add_edge("B", "C")
G.add_edge("C", "A")
print("Nodes: " + str(G.nodes()))
print("Edges: " + str(G.edge()))
| [
"[email protected]"
] | |
b4761b8e2a9dacd815fee80f5f9b586f897665ce | 6f596c280e6c9ec28c1fcd3be7555672c918c5be | /maintainers/scripts/hydra-eval-failures.py | 1b5df32c452ffab9ab9287760614ad81b28ae9ed | [
"MIT"
] | permissive | terretta/nixpkgs | 63f6cde5ee8ca37e52b05bbf25abd971778d493d | eff30adc0daefb09690c0af5f65093a91df56487 | refs/heads/master | 2023-06-25T05:48:49.921782 | 2017-02-09T22:41:55 | 2017-02-09T22:41:55 | 81,501,724 | 0 | 0 | NOASSERTION | 2023-06-09T23:49:45 | 2017-02-09T22:34:34 | Nix | UTF-8 | Python | false | false | 2,578 | py | #!/usr/bin/env nix-shell
#!nix-shell -i python -p pythonFull pythonPackages.requests pythonPackages.pyquery pythonPackages.click
# To use, just execute this script with --help to display help.
import subprocess
import json
import click
import requests
from pyquery import PyQuery as pq
maintainers_json = subprocess.check_output([
'nix-instantiate',
'lib/maintainers.nix',
'--eval',
'--json'])
maintainers = json.loads(maintainers_json)
MAINTAINERS = {v: k for k, v in maintainers.iteritems()}
def get_response_text(url):
return pq(requests.get(url).text) # IO
EVAL_FILE = {
'nixos': 'nixos/release.nix',
'nixpkgs': 'pkgs/top-level/release.nix',
}
def get_maintainers(attr_name):
nixname = attr_name.split('.')
meta_json = subprocess.check_output([
'nix-instantiate',
'--eval',
'--strict',
'-A',
'.'.join(nixname[1:]) + '.meta',
EVAL_FILE[nixname[0]],
'--json'])
meta = json.loads(meta_json)
if meta.get('maintainers'):
return [MAINTAINERS[name] for name in meta['maintainers'] if MAINTAINERS.get(name)]
@click.command()
@click.option(
'--jobset',
default="nixos/release-16.09",
help='Hydra project like nixos/release-16.09')
def cli(jobset):
"""
Given a Hydra project, inspect latest evaluation
and print a summary of failed builds
"""
url = "http://hydra.nixos.org/jobset/{}".format(jobset)
# get the last evaluation
click.echo(click.style(
'Getting latest evaluation for {}'.format(url), fg='green'))
d = get_response_text(url)
evaluations = d('#tabs-evaluations').find('a[class="row-link"]')
latest_eval_url = evaluations[0].get('href')
# parse last evaluation page
click.echo(click.style(
'Parsing evaluation {}'.format(latest_eval_url), fg='green'))
d = get_response_text(latest_eval_url + '?full=1')
# TODO: aborted evaluations
# TODO: dependency failed without propagated builds
for tr in d('img[alt="Failed"]').parents('tr'):
a = pq(tr)('a')[1]
print "- [ ] [{}]({})".format(a.text, a.get('href'))
maintainers = get_maintainers(a.text)
if maintainers:
print " - maintainers: {}".format(", ".join(map(lambda u: '@' + u, maintainers)))
# TODO: print last three persons that touched this file
# TODO: pinpoint the diff that broke this build, or maybe it's transient or maybe it never worked?
if __name__ == "__main__":
try:
cli()
except:
import pdb;pdb.post_mortem()
| [
"[email protected]"
] | |
fdbfe0ad25905e87c1a06d548c6f77ce523e6ecd | d956346d5bc05ffac76a0e973d49c32cde3bf5d5 | /clinicalSearch/migrations/0001_initial.py | b5ebd66ffe461ddc3571c850a1a05f3f1b121909 | [] | no_license | ShahidTariq/SearchClinicalTrials | e58df4515804cc69d24ab3667e68814996e4db09 | e1f111ea1c75cdba6053645e93a4f9cae7e89de8 | refs/heads/master | 2021-05-08T23:23:51.709651 | 2018-02-08T15:03:00 | 2018-02-08T15:03:00 | 119,706,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,169 | py | # Generated by Django 2.0.1 on 2018-02-06 06:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ClinicalStudy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('org_study_id', models.CharField(max_length=100)),
('nct_id', models.CharField(max_length=100)),
('official_title', models.CharField(max_length=100)),
('lead_sponsor_agency', models.CharField(max_length=100)),
('lead_sponsor_agency_class', models.CharField(max_length=100)),
('source', models.CharField(max_length=500)),
('brief_summary', models.TextField()),
('detail_description', models.TextField()),
('overall_status', models.CharField(max_length=100)),
('start_date', models.CharField(max_length=100)),
('completion_date', models.CharField(max_length=100)),
('study_type', models.CharField(max_length=100)),
('no_of_arms', models.IntegerField(default=0)),
('no_of_enrollment', models.IntegerField(default=0)),
('enrollment_type', models.CharField(max_length=100)),
('eligibility_study_pop', models.TextField()),
('eligibility_sampling_method', models.CharField(max_length=100)),
('eligibility_criteria', models.TextField()),
('eligibility_gender', models.CharField(max_length=100)),
('eligibility_min_age', models.CharField(max_length=100)),
('eligibility_max_age', models.CharField(max_length=100)),
('overall_official_name', models.CharField(max_length=100)),
('overall_official_role', models.CharField(max_length=100)),
('overall_official_affiliation', models.CharField(max_length=100)),
('result_first_posted_date', models.CharField(max_length=100)),
('last_updated_date', models.CharField(max_length=100)),
('verification_date', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Condition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('condition_name', models.CharField(max_length=200)),
('clinicalStudyId', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='clinicalSearch.ClinicalStudy')),
],
),
migrations.CreateModel(
name='Intervention',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('intervention_name', models.CharField(max_length=200)),
('intervention_type', models.CharField(max_length=200)),
('intervention_description', models.CharField(max_length=200)),
('clinicalStudyId', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='clinicalSearch.ClinicalStudy')),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('location_name', models.CharField(max_length=100)),
('location_status', models.CharField(max_length=100)),
('location_city', models.CharField(max_length=100)),
('location_state', models.CharField(max_length=100)),
('location_zip', models.CharField(max_length=100)),
('location_country', models.CharField(max_length=100)),
('clinicalStudyId', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='clinicalSearch.ClinicalStudy')),
],
),
migrations.CreateModel(
name='Mesh',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mesh_name', models.CharField(max_length=200)),
('clinicalStudyId', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='clinicalSearch.ClinicalStudy')),
],
),
migrations.CreateModel(
name='Outcome',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('outcome_type', models.CharField(max_length=200)),
('measure', models.TextField()),
('timeFrame', models.TextField()),
('description', models.TextField()),
('clinicalStudyId', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='clinicalSearch.ClinicalStudy')),
],
),
]
| [
"[email protected]"
] | |
354596a7e215dbda43d8b2a0e5becc1707e1fa44 | e3946d91dc5fe71989c2f4b6390232865fcb5d1b | /fjord/flags/tests/test_tasks.py | cc907a579b61d1e71e3621331f63e2dfa138d835 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | zeusintuivo/fjord | 61b632fd6df0e1b3508e628fe4f682a937cc0244 | 3bd227004d369df1fdc39f06acff12ebc8f0fe34 | refs/heads/master | 2021-01-16T18:28:52.564638 | 2014-09-24T21:02:51 | 2014-09-24T21:02:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,983 | py | from mock import patch
from nose.tools import eq_
# These tests require that tasks be imported so that the post_save
# signal is connected. Don't remove this.
import fjord.flags.tasks # noqa
from fjord.base.tests import TestCase
from fjord.feedback.tests import ResponseFactory
from fjord.flags.spicedham_utils import get_spicedham, tokenize
class TestClassifyTask(TestCase):
def test_classify_task(self):
"""flags should be created if classifier returns True"""
with patch('fjord.flags.tasks.classify') as classify_mock:
classify_mock.return_value = True
# This creates the response and saves it which kicks off
# the classifier task. It should be classified as abuse.
resp1 = ResponseFactory(locale=u'en-US', description=u'ou812')
eq_(classify_mock.call_count, 1)
eq_(sorted([f.name for f in resp1.flag_set.all()]),
['abuse'])
def test_classify_false_task(self):
"""flags shouldn't be created if classifier returns False"""
with patch('fjord.flags.tasks.classify') as classify_mock:
classify_mock.return_value = False
# This creates the response and saves it which kicks off
# the classifier task. It should not be classified as
# abuse.
resp1 = ResponseFactory(locale=u'en-US', description=u'ou812')
eq_(classify_mock.call_count, 1)
eq_([f.name for f in resp1.flag_set.all()], [])
def test_ignore_non_english(self):
"""non-en-US responses should be ignored"""
with patch('fjord.flags.tasks.classify') as classify_mock:
# This response is not en-US, so classify should never get
# called.
resp1 = ResponseFactory(locale=u'es', description=u'ou812')
eq_(classify_mock.called, False)
eq_([f.name for f in resp1.flag_set.all()], [])
class TestClassification(TestCase):
def train(self, descriptions, is_abuse=True):
# Note: This is probably a cached Spicedham object.
sham = get_spicedham()
for desc in descriptions:
sham.train(tokenize(desc), match=is_abuse)
def test_abuse(self):
self.train([
'gross gross is gross gross gross browser',
'gross icky gross gross browser',
'gross is mcgrossy gross',
'omg worst gross',
'browser worst'
], is_abuse=True)
self.train([
'Firefox is super!',
'Great browser!',
'Super fast!',
'Not gross!',
'super not gross!'
], is_abuse=False)
# This creates the response and saves it which kicks off
# the classifier task. It should be classified as abuse.
resp = ResponseFactory(
locale=u'en-US', description=u'browser is gross!')
eq_(sorted([f.name for f in resp.flag_set.all()]),
['abuse'])
| [
"[email protected]"
] | |
9483d78b53d74a577fa037cb76a08eada7ce6534 | 642e36c81ecb29d6d083180ae4e74345fb335ebe | /py_scripts/junk/vis_2_level_with_atlas_oxford copy_2.py | 2289dc527787095b02d8d57f1d02cb9c189f4f5a | [] | no_license | Olu93/project_basic_fmri_analysis | d0216093fe7841cce1d04e5cd68e819e3688cf90 | e48cc87eaeb1ee04f94ce9643d6945ba9da9c328 | refs/heads/master | 2023-07-13T00:13:15.950044 | 2021-08-17T15:57:38 | 2021-08-17T15:57:38 | 397,305,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,971 | py | from nilearn.masking import apply_mask
from py_scripts.helper_func import shorten_labels
from typing import OrderedDict
from matplotlib.colors import LinearSegmentedColormap, BASE_COLORS
from nilearn import plotting
import nilearn
from nilearn import image
import matplotlib.pyplot as plt
import pathlib as path
from pprint import pprint
from nilearn import datasets
from nilearn.datasets.atlas import fetch_atlas_smith_2009
from nilearn.datasets.struct import load_mni152_template
from nilearn.image.image import threshold_img
from nilearn.image.resampling import resample_to_img
from nilearn.plotting import find_cuts
from nilearn.plotting.displays import MosaicSlicer, TiledSlicer
from nilearn.plotting.find_cuts import find_cut_slices, find_xyz_cut_coords
from nilearn.regions.region_extractor import RegionExtractor, connected_label_regions, connected_regions
from nilearn.input_data import NiftiLabelsMasker, NiftiMapsMasker, NiftiMasker
import numpy as np
# from nilearn.regions import connected_regions
# atlas_data_aal = datasets.fetch_atlas_aal()
# atlas_data_msdl = datasets.fetch_atlas_msdl()
# atlas_data_icbm = datasets.fetch_icbm152_2009()
# atlas_data_allen = datasets.fetch_atlas_allen_2011()
# atlas_data_seitzman = datasets.fetch_coords_seitzman_2018()
# atlas_data_smith = datasets.fetch_atlas_smith_2009()
# atlas_data_schaefer = datasets.fetch_atlas_schaefer_2018()
atlas_data_oxford_4d = datasets.fetch_atlas_harvard_oxford('cort-prob-1mm')
atlas_data_yeo = datasets.fetch_atlas_yeo_2011()
atlas_data_talairach = datasets.fetch_atlas_talairach('gyrus')
atlas_data_oxford = datasets.fetch_atlas_harvard_oxford('cort-maxprob-thr0-1mm')
atlas_labels = atlas_data_oxford_4d.labels[1:]
print(len(atlas_labels))
selected_indices = np.arange(0, 48)
data_load = image.load_img(atlas_data_oxford_4d.maps)
data_load = image.index_img(data_load, selected_indices)
coordinates = plotting.find_probabilistic_atlas_cut_coords(data_load)
selected_indices_level_2 = [6, 16, 30] + [17, 18, 25] + list(range(41, 46))
atlas_mapping = {
oidx: {
"label_str": shorten_labels(atlas_labels[oidx]),
"old_label_num": oidx,
"label_num": idx,
"coord": coord,
"contour": image.index_img(data_load, oidx),
"mask": image.get_data(data_load) == oidx,
# "contour": data_load,
}
for idx, (oidx, coord) in enumerate(zip(selected_indices, coordinates))
}
template = load_mni152_template()
sub_file_path = path.Path("./global_results/dispersion").absolute()
target_path = path.Path("./figures").absolute()
# mask = RegionExtractor(image.index_img(data_load, [6, 16])).fit()
# masker = NiftiLabelsMasker(data_load).fit()
# report = masker.generate_report()
# plotting.show()
# download some example data
# haxby_dataset = datasets.fetch_haxby()
subset_num = 1
all_normed_anatomical_images = list(sub_file_path.rglob('./**/spmT_0001.nii'))
all_movement_images = image.load_img(str(list(sub_file_path.rglob('./movement_all/spmT_0001.nii'))[0]))
tmp = list(path.Path("./global_results/").rglob('./**/mean_*.nii'))[0]
mean_anatomical_image = {
"fp": tmp.as_posix(),
"type": "t1",
"name": tmp.name,
"image": image.load_img(str(tmp)),
}
all_color_maps = [(k, type(v)) for k, v in plotting.cm.__dict__.items()
if isinstance(v, LinearSegmentedColormap) and (k.startswith("blue") or k.startswith("purple"))][::2]
all_base_colors = [(k, v) for k, v in BASE_COLORS.items() if k not in ["b", "w"]]
num_files = len(atlas_mapping)
num_maps = 8
trh = 4.3
image_type = "image_tresh"
dataset = {
fn.as_posix(): {
"fp": fn.absolute(),
"type": fn.absolute().parent.name,
"name": fn.absolute().name,
"image": image.load_img(str(fn)),
"image_tresh": threshold_img(image.load_img(str(fn)), threshold=trh, copy=False),
# "image_masked": mask.transform(image.load_img(str(fn)))
# "image_masked": image.get_data(image.load_img(str(fn))) == 6
}
for fn in all_normed_anatomical_images[::subset_num] if not fn.is_dir()
}
against_rest = True
feet_suffix = ("_vs_rest" if against_rest else "")
lh_suffix = ("_vs_rest" if against_rest else "")
rh_suffix = ("_vs_rest" if against_rest else "")
tongue_suffix = ("_vs_rest" if against_rest else "")
movement_all_suffix = "movement_all"
dataset = {
v["type"]: v
for k, v in dataset.items() if v["type"] in [
# "feet" + feet_suffix, # 6, 16, 25, 30
# "lh" + lh_suffix, # 6 16-19
# "rh" + rh_suffix, # 6 16-19
"tongue" + tongue_suffix, # 6, 16, (41-45 - weak)
# "movement_all",
# "resting",
]
}
dataset = {
k: dict([("color_map_name", all_color_maps[idx][0]), ("color_map", all_color_maps[idx][1]),
("color_name", all_base_colors[idx][0]), ("color", all_base_colors[idx][1])] + list(v.items()))
for idx, (k, v) in enumerate(dataset.items())
}
# all_in_one = image.mean_img([v["image_masked"] for _, v in dataset.items()])
# regions_percentile_img, index = connected_regions(all_in_one)
for img_num, ns_file in enumerate(dataset.values()):
fig, axes = plt.subplots(nrows=(num_files // num_maps), ncols=num_maps)
fig.set_size_inches((5 * num_maps, 5 * (num_files // num_maps)))
faxes = axes.flatten()
str_arrangement = 'tiled'
cut_coords = TiledSlicer.find_cut_coords(ns_file[image_type])
for ax, col_num, (dict_idx, atlas_entry) in zip(faxes, range(num_files), atlas_mapping.items()):
label_str, old_label_idx, label_idx, coords, atlas_image, atlas_mask = atlas_entry.values()
title_str = f"{old_label_idx}: {label_str}"
curr_type = ns_file["type"].replace("_vs_rest", "")
print(title_str)
# coords = cut_coords[:, idx % cut_coords.shape[1]]
coords = cut_coords
display = plotting.plot_stat_map(
apply_mask(ns_file[image_type], image.new_img_like(ns_file[image_type], atlas_mask)),
bg_img=mean_anatomical_image["image"],
# view_type='contours',
colorbar=0,
black_bg=1,
threshold=3,
axes=ax,
display_mode=str_arrangement,
cut_coords=coords,
)
display.title(
title_str,
y=1.1,
color='white',
bgcolor='black',
fontsize='xx-large',
)
display.add_contours(
atlas_image,
filled=False,
colors='r',
levels=[5],
# cmap=ns_file["color_map_name"],
)
print(display.savefig(target_path / "misc" / f"level_2_results_atlas_oxford_trh{int(trh)}_{curr_type}.png"))
display.close()
pprint({k for k in dataset})
# plotting.show()
# display = plotting.plot_epi(ordered_dict["realigned"]["image"], black_bg=1, axes=ax, title="Step: Realignment", display_mode=str_arrangement, cut_coords=cut_coords)
# # display.savefig(target_path / "misc" / "sub01_realigned.png") | [
"[email protected]"
] | |
ad7e109cbb860c1a968abf076a0898461d6728c6 | e7efb2e90059684e3c731870e0a1e9d0a4a24b24 | /hype_user/migrations/0004_userfb_likes.py | 9d33d674771260ff48f9c20c950a04f8cf3ec545 | [] | no_license | bjersey/hype_server | c5f01fd027e2a86164603571c72a29d9439937ac | 22e5229d29dd5809bbc357e3ae43fa3f6082e193 | refs/heads/master | 2021-03-27T19:03:45.912683 | 2016-07-05T02:48:16 | 2016-07-05T02:48:16 | 45,125,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.contrib.postgres.fields
class Migration(migrations.Migration):
dependencies = [
('hype_user', '0003_auto_20160130_2215'),
]
operations = [
migrations.AddField(
model_name='userfb',
name='likes',
field=django.contrib.postgres.fields.ArrayField(size=None, null=True, base_field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=128, null=True, blank=True), size=2), blank=True),
),
]
| [
"[email protected]"
] | |
f7c3de6fccedb1b8b120d1c4aca0f8fc5ae561ef | 9b4597846ef13179c7fde4bd4dac0ef3dda7f0c5 | /Object-Oriented-Programming-Python/open.py | 75731c6cb8c03d2a5b6f6eaff3143736fe84b13e | [] | no_license | SladetBask-Kasper/Object-Oriented-Programming-Python | 4d97f0b08ec7640e4e543845f485de9677ba4b5c | 110e0d75792d44bb1b772e860d349a76d2fc27fd | refs/heads/master | 2020-04-18T02:34:09.776319 | 2019-01-24T09:17:49 | 2019-01-24T09:17:49 | 66,933,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | from main import main
quitz = main()
if quitz == 1:
exit()
elif quitz == 0:
exit()
else:
exit() | [
"[email protected]"
] | |
9c89d9ea5e84c2da0b5f7793fb4db88b23a82db7 | fa3b05e49e002936bd7ce7788baf3ae953e222f2 | /rst2txt/__init__.py | f749f6e00729c2cc65ae250e8a020020a8ed5757 | [
"BSD-2-Clause"
] | permissive | stephenfin/rst2txt | 3199e7f6a3e06fdde9788c858ad8d194463802bb | db1465a90325d2a0800f5a9efdc4149f18f06aef | refs/heads/master | 2021-06-13T07:47:27.478699 | 2021-04-01T07:35:40 | 2021-04-01T07:35:40 | 157,217,523 | 3 | 1 | BSD-2-Clause | 2021-04-01T07:35:41 | 2018-11-12T13:26:14 | Python | UTF-8 | Python | false | false | 920 | py | # -*- coding: utf-8 -*-
"""
rst2txt.__main__
~~~~~~~~~~~~~~~~
A minimal front end to the Docutils Publisher, producing plain text.
:copyright: Copyright 2018, Stephen Finucane <[email protected]>.
:license: BSD, see LICENSE for details.
"""
import locale
locale.setlocale(locale.LC_ALL, '') # noqa
from docutils.core import default_description
from docutils.core import publish_cmdline
from pkg_resources import DistributionNotFound
from pkg_resources import get_distribution
from rst2txt.writer import Writer
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
pass
def main():
description = ('Generates plain text documents from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer=Writer(), writer_name='txt',
description=description)
| [
"[email protected]"
] | |
e85a1c9fb91d56d5b78d4975cbff29cedf831c18 | 07ca730745c62c2c2286dacb1b86d4d89977f4cb | /test_polyfit.py | 224d36c6cfeff1b5ff7fa725d763d64a62014f17 | [] | no_license | RishiCSE89/PredictiveCaching | 9f8358ac56aa67ba822e17e7c5fa3192d4cfdc57 | b57e0736d9c0c1140d196a1b0151388aedc98bd7 | refs/heads/master | 2020-04-03T18:00:40.416878 | 2018-10-30T23:07:11 | 2018-10-30T23:07:11 | 155,467,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | import poly_fit_pluggable as pfit
s = [1,23,45,67,89,102]
print(s)
print(pfit.main(t_series=s,deg=3))
| [
"[email protected]"
] | |
24560ebd97ad7eefe326c5d0338c674f3f273d51 | cd834357f612378a725af830fe9536a6150cb6b1 | /utils.py | aec90d2c1d2dec71cb1c3990b2c326cafc6cb10e | [] | no_license | mgrenander/reproducing-paulus-xiong-socher | fb41782bbb2aa16f12f3c62dc4ebf5831a012769 | 1784d2ad4072e873130f7cdcf9ad5861e13f0607 | refs/heads/master | 2020-03-11T06:19:47.702456 | 2018-06-08T20:59:12 | 2018-06-08T20:59:12 | 129,827,118 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,399 | py | import os
from tqdm import tqdm
import spacy
from datetime import datetime
from torch.autograd import Variable
import torch
spacy_en = spacy.load('en')
base_path = "data"
base_read_path = "data/finished_files"
max_input_len = 800
max_output_len = 100
decoder_hidden_size = 400
def convert_to_tsv(dataset):
art_path = os.path.join(base_read_path, "article", dataset)
ref_path = os.path.join(base_read_path, "reference", dataset)
# Remove previous version
open(os.path.join(base_path, dataset + ".tsv"), 'w').close()
f = open(os.path.join(base_path, dataset + ".tsv"), 'a', encoding='utf-8')
for i in tqdm(range(len(os.listdir(art_path)))):
article_name = str(i) + "_" + dataset + "_art.txt"
ref_name = str(i) + "_" + dataset + "_ref.txt"
article = open(os.path.join(art_path, article_name), encoding='utf-8')
reference = open(os.path.join(ref_path, ref_name), encoding='utf-8')
f.write(article.read() + "\t" + reference.read() + "\n")
f.close()
def tokenizer_in(text):
"""Tokenizer. Note we limit to top 800 tokens, as per Paulus et al."""
return [tok.text for tok in spacy_en(text)[:max_input_len]]
def tokenizer_out(text):
"""Tokenizer. Note we limit to top 100 tokens"""
return [tok.text for tok in spacy_en(text)[:max_output_len]]
def get_time_diff(curr_time):
return (datetime.now() - curr_time).seconds / 60.0, datetime.now()
# Attention weight combination methods
def normalize_with_pen(scores):
ret_scores = Variable(torch.zeros())
ret_scores[0] = torch.exp(scores[0])
for t in range(1, scores.size(0)):
norm_const = torch.sum(torch.exp(ret_scores[:t]))
ret_scores[t] = torch.div(torch.exp(scores[t]), norm_const)
return ret_scores
def get_enc_context_vector(scores, hidden_states):
pen_scores = normalize_with_pen(scores)
norm_constant = torch.sum(pen_scores, dim=0)
attn_weights = torch.div(pen_scores, norm_constant)
context_vector = torch.sum(torch.matmul(attn_weights, hidden_states), dim=0)
return context_vector, attn_weights
def get_dec_context_vector(scores, hidden_states):
if hidden_states is None:
return Variable(torch.zeros(scores.size(1), decoder_hidden_size))
attn_weights = normalize_with_pen(scores)
context_vector = torch.sum(torch.matmul(attn_weights, hidden_states), dim=0)
return context_vector | [
"[email protected]"
] | |
e059f1040835b3ab861f83ca8d12ceb3ee980696 | 4cfe65557710defbba8496f3194299b54996d58b | /EncoderDecoderAgent/GRU/Encoder.py | d4b6f6b466024ff30eaec8f11e1bc5aad3717b4d | [
"MIT"
] | permissive | MehranTaghian/DQN-Trading | 176235663004e878c776e52c6c264add8d801275 | 1cca1122cc3eac862f8a2af69b979fddfbe6bd1d | refs/heads/master | 2023-08-11T17:23:57.831656 | 2021-10-09T05:50:21 | 2021-10-09T05:50:21 | 400,635,399 | 100 | 37 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | import torch
import torch.nn as nn
class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size, device):
"""
:param input_size: 5 which is OHLC + trend
"""
super(EncoderRNN, self).__init__()
self.device = device
self.hidden_size = hidden_size
self.gru = nn.GRU(input_size, hidden_size)
# self.lstm = nn.LSTM(input_size, hidden_size)
def forward(self, x):
"""
:param x: if the input x is a batch, its size is of the form [window_size, batch_size, input_size]
thus, the output of GRU would be of shape [window_size, batch_size, hidden_size].
e.g. output[:, 0, :] is the output sequence of the first element in the batch.
The hidden is of the shape [1, batch_size, hidden_size]
"""
if len(x.shape) < 3:
x = x.unsqueeze(1)
hidden = self.initHidden(x.shape[1])
output, hidden = self.gru(x, hidden)
# output, hidden = self.gru(x)
# cell = self.initHidden(x.shape[1])
# output, (hidden, cell) = self.lstm(x, (hidden, cell))
return output, hidden
def initHidden(self, batch_size):
return torch.zeros(1, batch_size, self.hidden_size, device=self.device)
| [
"[email protected]"
] | |
8818d2dee291bc650cf6823284771fcddef0856d | 2ea548f2ff55d5d144580bbf91ec25b307a77e10 | /analysis/models.py | 12e5b1de7b777c60488816d0c7cce3a7bc004486 | [] | no_license | philippHorn/tiltscore | 7183286f67b0598f31ad9c2e116d825528123431 | dc03b314ffd52cad9344897942ee38cee8fd1874 | refs/heads/master | 2022-12-11T02:33:11.008729 | 2018-05-06T10:41:20 | 2018-05-06T10:41:20 | 132,249,170 | 0 | 0 | null | 2022-12-08T00:59:58 | 2018-05-05T13:11:58 | JavaScript | UTF-8 | Python | false | false | 300 | py | from django.db import models
from riot.models import Summoner
class Calculation(models.Model):
summoner = models.ForeignKey(Summoner, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
count = models.IntegerField(default=0)
finished = models.BooleanField()
| [
"[email protected]"
] | |
b81fc0b1aed940c4085205694c61915ef05eb2df | f6447a01364882f6bc8ab259d86e9e945c3f2356 | /jenkins_jobs/modules/project_folder.py | ac6e6bdddc22e128dfe17342bc962db3d330393f | [
"Apache-2.0"
] | permissive | gooddata/jenkins-job-builder | 4e0909cab5ada4114f2396a1335300c4e8590500 | a7aac18ca142c2a3323577458522d1e28827fccd | refs/heads/gd-patches-2.0.0.0b3-1-g0b5ed034 | 2022-12-13T23:31:33.900167 | 2022-03-18T10:58:05 | 2022-03-18T10:58:05 | 44,802,046 | 0 | 12 | Apache-2.0 | 2022-11-30T12:59:15 | 2015-10-23T08:57:51 | Python | UTF-8 | Python | false | false | 1,955 | py | # -*- coding: utf-8 -*-
# Copyright (C) 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The folder Project module handles creating Jenkins folder projects.
You may specify ``folder`` in the ``project-type`` attribute of
the :ref:`Job` definition.
Requires the Jenkins :jenkins-wiki:`CloudBees Folder Plugin
<CloudBees+Folder+Plugin>`.
Job example:
.. literalinclude::
/../../tests/yamlparser/fixtures/project_folder_template001.yaml
Job template example:
.. literalinclude::
/../../tests/yamlparser/fixtures/project_folder_template002.yaml
"""
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
class Folder(jenkins_jobs.modules.base.Base):
sequence = 0
def root_xml(self, data):
xml_parent = XML.Element('com.cloudbees.hudson.plugins.folder.Folder',
plugin="cloudbees-folder")
XML.SubElement(xml_parent, 'actions')
attributes = {"class": "com.cloudbees.hudson.plugins.folder."
"icons.StockFolderIcon"}
XML.SubElement(xml_parent, 'icon', attrib=attributes)
XML.SubElement(xml_parent, 'views')
attributes = {"class": "hudson.views.DefaultViewsTabBar"}
XML.SubElement(xml_parent, 'viewsTabBar', attrib=attributes)
XML.SubElement(xml_parent, 'primaryView').text = 'All'
XML.SubElement(xml_parent, 'healthMetrics')
return xml_parent
| [
"[email protected]"
] | |
ef30903e305f3318091cd214dc77eb24811ecfe1 | 29afb48dd73bfd988864218aa5bf9e0acf219d0f | /src/sandBox/analysisFunctionDevelopment/build.py | 7cc590661b6ef651c8eed7e978bccd15b8d86a23 | [] | no_license | alx5246/srnnetTraining | b18623ae2b2bc80476dc273af615ba7d784443d4 | 416a0f7f30ac55b094120abef370c7c4c166fdc8 | refs/heads/master | 2022-11-04T14:42:50.082172 | 2016-11-19T20:07:02 | 2016-11-19T20:07:02 | 59,118,134 | 1 | 1 | null | 2022-10-12T10:10:49 | 2016-05-18T13:29:42 | Python | UTF-8 | Python | false | false | 1,016 | py | import os
import subprocess
if __name__ == '__main__':
wd = os.path.dirname(os.path.realpath(__file__))
# fileName = raw_input("Enter file name to run:")
fileName = "controller.py"
visualStudioVcVarsAllPath = "C:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\vcvarsall.bat"
argsAndApp="(\"%s\" amd64 > nul) && python \"%s\"" % (visualStudioVcVarsAllPath,
os.path.join(wd, fileName))
# argsAndApp="(\"%s\" amd64 > nul)" % visualStudioVcVarsAllPath
print("RUNNING COMMAND: %s" % argsAndApp)
childProcess = subprocess.Popen(argsAndApp, cwd=wd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=os.environ, bufsize=1)
for line in iter(childProcess.stdout.readline, b''):
print(str(line.rstrip()))
childProcess.communicate()
if childProcess.returncode != 0:
print("\n\nFAILURE: return code %s" % childProcess.returncode)
else:
print("\n\nSUCCESS") | [
"[email protected]"
] | |
623e17c320df0d74ffbe22456d181278faf49b14 | 7810c4043fdf5fd849922af6df99c8aae5b86392 | /python/atm_program.py | 1b29517e9e139f12918e0a471626dfb8b4daa4ac | [] | no_license | bellaananda/python_progate | 99b4dd65059481a2a43b649f2ddf54a8ec108816 | 1c8f22708c2c49efafd12b82b3064d531666ca7f | refs/heads/master | 2022-12-15T03:46:38.523422 | 2020-09-16T06:48:36 | 2020-09-16T06:48:36 | 295,947,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,432 | py | import random
import datetime
from customer import Customer
atm = Customer(id)
while True:
id = int(input("\n Masukkan pin anda: "))
trial = 0
while (id != int(atm.cekPin()) and trial < 3):
id = int(input("Pin salah. Silakan masukkan lagi : "))
trial += 1
if trial == 3:
print("Error. Silakan ambil kartu dan coba lagi.")
exit()
while True:
a = 1
print("\n\t ------------------------------ \n")
print("\t SELAMAT DATANG DI APLIKASI ATM")
print("\t\t 1 - Cek Saldo \n\t\t 2- Debet \n\t\t 3 - Simpan \n\t\t 4 - Ganti Pin \n\t\t 5 - Keluar")
# a -= 1
# if a != 1:
# break
pilihmenu = int(input("\t Silakan pilih menu : "))
if pilihmenu == 1:
print("\t Selamat Datang di Menu Cek Saldo")
print("\t Saldo anda sekarang : Rp. " + str(atm.cekBalance()))
elif pilihmenu == 2:
print("\t Selamat Datang di Menu Debet")
nominal = float(input("\t Silakan masukkan nominal saldo : "))
verifikasi_debet = input("\t Konfirmasi : Anda akan melakukan debet dengan nominal Rp. " + str(nominal) + " ? y/t" + " ")
if verifikasi_debet == "y":
print("\t Saldo awal anda adalah : Rp. " + str(atm.cekBalance()))
else:
break
if nominal < atm.cekBalance():
atm.debetBalance(nominal)
print("\t Transaksi debet berhasil!")
print("\t Saldo anda sekarang adalah : Rp. " + str(atm.cekBalance()))
else:
print("\t Maaf. Saldo anda tidak cukup untuk melakukan debet.")
print("\t Silakan lakukan penambahan nominal saldo.")
elif pilihmenu == 3:
print("\t Selamat Datang di Menu Simpan")
nominal = float(input("\t Silakan masukkan nominal saldo : "))
verifikasi_simpan = input("\t Konfirmasi : Anda akan melakukan penyimpanan dengan nominal Rp. " + str(nominal) + " ? y/t" + " ")
if verifikasi_simpan == "y":
atm.simpanBalance(nominal)
print("\t Saldo anda sekarang adalah : Rp. " + str(atm.cekBalance()))
else:
break
elif pilihmenu == 4:
print("\t Selamat Datang di Menu Ganti Pin")
verifikasi_pin = int(input("\t Silakan masukkan pin anda : "))
if verifikasi_pin != int(atm.cekPin()):
print("\t Pin anda salah. Silakan masukkan pin : ")
pin_baru = int(input("\t Silakan masukkan pin baru : "))
print("\t Pin anda berhasil diganti!")
verifikasi_pinbaru = int(input("\t Coba masukkan pin baru anda : "))
if verifikasi_pinbaru == pin_baru:
print("\t Selamat, pin baru anda berhasil!")
else:
print("\t Maaf, pin baru anda salah!")
elif pilihmenu == 5:
print("\t Selamat Datang di Menu Keluar")
print("\t Resi tercetak otomatis saat anda keluar. \n\t Harap simpan sebagai bukti transaksi.")
print("\t No. Record: ", random.randint(100000, 1000000))
print("\t Tanggal: ", datetime.datetime.now())
print("\t Saldo akhir: ", atm.cekBalance())
print("\t Terima kasih dan sampai jumpa!")
exit() | [
"[email protected]"
] | |
58ee8fa2946ceeab6382b00f21d4c439fc798613 | b31ff20af39eb96f5c78a3e41d4a7727a32bc309 | /collection/list/examples/list/list_comprehension/exercise2.py | 16501aaabe64db47d5a4e04fa83ac2ab25aa876f | [] | no_license | abhi15sep/Python-Course | 42b74c2f3f016c960edcc091808066f7d1411054 | 482bd7fdb32df54d97d1e6dd76fc807bcab70e9a | refs/heads/master | 2020-04-27T20:28:25.448692 | 2019-08-04T07:00:12 | 2019-08-04T07:00:12 | 174,659,260 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 952 | py | #1. Given two lists [1,2,3,4] and [3,4,5,6], create a variable called answer, which is a new list that is the intersection of the two. Your output should be [3,4] . Hint: use the in operator to test whether an element is in a list. For example: 5 in [1,5,2] is True. 3 in [1,5,2] is False.
#2. Given a list of words ["Elie", "Tim", "Matt"] answer2, which is a new list with each word reversed and in lower case (use a slice to do the reversal!) Your output should be ['eile', 'mit', 'ttam']
#Using list comprehensions(the more Pythonic way):
answer = [val for val in [1,2,3,4] if val in [3,4,5,6]]
#the slice [::-1] is a quick way to reverse a string
answer2 = [val[::-1].lower() for val in ["Elie", "Tim", "Matt"]]
#Without list comprehensions, things are a bit longer:
answer = []
for x in [1,2,3,4]:
if x in [3,4,5,6]:
answer.append(x)
answer2 = []
for name in ["Elie", "Tim", "Matt"]:
answer2.append(name[::-1].lower())
| [
"[email protected]"
] | |
888e6408ce09b9e8e18645e386d0ea6ec6f0ec83 | 96c0a8c5301f97d8ec5193ead7f58768ee17040a | /openn/xml/identifier.py | 2b1e386bb5633580a9d86015e1a0af37457a2c47 | [] | no_license | KislakCenter/openn | 7a6b2de5e03ff6663bb321cfd16d5809ab58e151 | 8b410f08713b778d6dd0e5d60cff558f20c27a26 | refs/heads/master | 2022-12-15T23:07:17.902112 | 2020-09-03T16:28:19 | 2020-09-03T16:28:19 | 294,202,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 761 | py | # -*- coding: utf-8 -*-
from lxml import etree
from openn.xml.xml_whatsit import XMLWhatsit
class Identifier(XMLWhatsit):
def __init__(self,node,ns):
self.xml = node
self.ns = ns
def is_url(self,t):
return t.strip().startswith('http')
@property
def id_type(self):
return self._get_attr('.', 'type')
@property
def text(self):
t = self._get_text('./t:idno')
if self.is_url(t):
return t
elif self.element_name() == 'msIdentifier':
return t
else:
return "%s: %s" % (self.id_type, t)
def element_name(self):
return self.xml.xpath('name(.)')
def tostring(self):
return etree.tostring(self.xml, pretty_print=True)
| [
"[email protected]"
] | |
01bc56bdb45eeb7f88b23db1a8bbefc460de1985 | 0229ce181421a7781864421fe5251e724472ea53 | /educative.io/XOR/Two Single Numbers (medium).py | 91b0b5e2c1fdaf19d1a4a94e624e175bc124e8eb | [] | no_license | sudonitin/dsa | 012e68c5a49e4b52faef1e4582d1e1a29ae7890c | 7c3694dcd1e4b79571a163a9646373de0e0ebe58 | refs/heads/master | 2023-04-26T09:03:05.780294 | 2021-05-08T16:10:59 | 2021-05-08T16:10:59 | 244,941,964 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 940 | py | def two_nums_hashmap(arr):
hash_map ={}
for i in arr:
if i not in hash_map:
hash_map[i] = 0
else:
del hash_map[i]
return list(hash_map.keys())
# referred solution
def two_nums_xor(arr):
xor_result = 0
for i in range(len(arr)):
xor_result ^= arr[i]
# print(xor_result)
right_most_one_bit_in_xor_result = 1
while right_most_one_bit_in_xor_result & xor_result == 0:
right_most_one_bit_in_xor_result <<= 1
num1, num2 = 0, 0
for i in arr:
if right_most_one_bit_in_xor_result & i != 0:
num1 ^= i
else:
num2 ^= i
return [num1, num2]
# print("------Hashmap------")
# print(two_nums_hashmap([1, 4, 2, 1, 3, 5, 6, 2, 3, 5])) #[4, 6]
# print(two_nums_hashmap([2, 1, 3, 2])) #[1, 3]
print("------XOR------")
print(two_nums_xor([1, 4, 2, 1, 3, 5, 6, 2, 3, 5])) #[4, 6]
print(two_nums_xor([2, 1, 3, 2])) #[1, 3] | [
"[email protected]"
] | |
2c4f87b94aa0d96dd697d8229a9c6e151d976104 | 7a09af404f29389504742a3d5f1727bfbe562750 | /TrekBot2_WS/build/tf2_eigen/catkin_generated/pkg.develspace.context.pc.py | f5c5a1a6d5db48c50693d36ed16e7a9aa2ab7b74 | [
"MIT"
] | permissive | Rafcin/TrekBot | 4baa2ed93b90920b36adba0b72384ac320d2de01 | d3dc63e6c16a040b16170f143556ef358018b7da | refs/heads/master | 2020-03-30T02:15:35.361254 | 2018-12-14T03:30:25 | 2018-12-14T03:30:25 | 150,622,252 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/xavier_ssd/TrekBot/TrekBot2_WS/src/geometry2/tf2_eigen/include;/usr/include/eigen3".split(';') if "/xavier_ssd/TrekBot/TrekBot2_WS/src/geometry2/tf2_eigen/include;/usr/include/eigen3" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "tf2_eigen"
PROJECT_SPACE_DIR = "/xavier_ssd/TrekBot/TrekBot2_WS/devel/.private/tf2_eigen"
PROJECT_VERSION = "0.6.3"
| [
"[email protected]"
] | |
79629d6a6edfb929201ce161f3a3ee1f7b75b510 | 83c5481333c3f2d73b3080cf01788cae8b785e7e | /main.py | 312cbf415c306f91175d18caa247bfcbd4b311fd | [] | no_license | DanangCode/hyperlocative | 3cd96f555b9bf38643890b70ba6b38ac43718078 | 90be2336ee752e3c99ccae7c2473aa5cca389edc | refs/heads/master | 2021-08-27T15:10:47.634210 | 2011-05-12T06:01:35 | 2011-05-12T06:01:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,022 | py | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
class MainHandler(webapp.RequestHandler):
def get(self):
self.response.out.write(self.request.headers["User-Agent"])
def main():
application = webapp.WSGIApplication([('/', MainHandler)],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
122e13140dd941b7f9130029967e01235509b7a4 | 63aa1f0ead23c3f8d18847cca61613b40e34e5dd | /groups/migrations/0003_auto_20181221_1758.py | c33f31b9ed21d96c219039afc6c7533d82e5384c | [] | no_license | meheck/Social-Network | 3e151def207cd2c1548445065e36d501ed71523c | ac5182fd39a1f8a6ed95a4adedbe4a5a47d10011 | refs/heads/master | 2022-11-19T02:12:15.113717 | 2020-07-11T07:59:01 | 2020-07-11T07:59:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | # Generated by Django 2.1.4 on 2018-12-21 17:58
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('groups', '0002_auto_20181221_1517'),
]
operations = [
migrations.RemoveField(
model_name='group',
name='members',
),
migrations.AddField(
model_name='group',
name='members',
field=models.ManyToManyField(related_name='Memberships', to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
] | |
cc920b16c95ac819b236fa84be0b4223fe58683a | 96602eeaa034e3e7b36df4ed10fba9bc9c9ed5c8 | /01-15/day08-2/文件操作.py | 57723e900a692f4a8c9c3817e4781677663b0e4e | [] | no_license | microease/Old-boy-Python-knight-project-1 | f4b12fe6f46bd159c6dc8151b1d28c6520042441 | dc32749e29cc63b44849d40af345d4bb7817d624 | refs/heads/master | 2020-09-20T18:00:34.821769 | 2019-12-11T14:47:44 | 2019-12-11T14:47:44 | 224,553,833 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | # coding:utf-8
# File Name: 文件操作
# Description :
# Author : micro
# Date: 2019/12/3
f = open("./测试.txt", encoding="utf-8", mode="w")
| [
"[email protected]"
] | |
a6c261ac3e0a64a23198f2e0e53d7ffc9ee18efe | 17628aa356d636a173230309140381d091c694a3 | /BetNow/creatematch/migrations/0015_auto_20190713_1228.py | 33f275ad9c702b3cb2a24362c803ce4ee8bd424f | [] | no_license | sudeepth457/Bet24 | 247ea7865cfaaf3e7faf59f0e7969ca679cc44c6 | 493624982f0ee0b494aa752625b7b2baef3888e0 | refs/heads/master | 2022-02-24T05:18:24.267030 | 2022-02-15T05:58:01 | 2022-02-15T05:58:01 | 201,655,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | # Generated by Django 2.1.7 on 2019-07-13 06:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('creatematch', '0014_auto_20190713_1222'),
]
operations = [
migrations.RemoveField(
model_name='voterdetails',
name='id',
),
migrations.AlterField(
model_name='voterdetails',
name='match',
field=models.CharField(default=0, max_length=50, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='voterdetails',
name='userid',
field=models.CharField(default=0, max_length=100),
),
]
| [
"[email protected]"
] | |
06fbc63ad5da39878b6e54f87c00b44717291c0a | 4e8399b2fa87ef498a26d89896bccfeb3cb9c49e | /bot/api/core/result.py | 63c0f7e5345b7e0f34aab351168affb1ab4ef41b | [] | no_license | Deibrony/Easy_Recycle_Telegram_Bot | 0e2c7f173b7ffd67a96bdd3708630dce66d2e7fa | 3a0cc98c162a1496c755988c6939a2921a08a8ba | refs/heads/main | 2023-08-19T06:16:49.795874 | 2021-10-28T18:36:37 | 2021-10-28T18:36:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,376 | py | from api.static import PH
import os
from api.static import TRY_AGAIN
def result_out():
'''Функция принимает возвращает список классов, которые были обнаружены на фотографии.'''
os.system(f'{PH}darknet.exe detector test {PH}cfg/coco.data {PH}cfg/custom-yolov4-detector.cfg {PH}72custom-yolov4-detector_best.weights C:/Easy_Recycle/data/prediction.jpg -thresh 0.8 -dont-show -ext_output < {PH}data/train.txt > {PH}result.txt')
with open(f'{PH}result.txt') as f:
text = f.read()
text = text.split('\n')
text = list(filter(None, text))
if len(text) > 12:
text_1 = text[12:]
all_results = [i.split(':')[0] for i in text_1]
return all_results
else:
return(TRY_AGAIN)
def result_out_plastic():
'''Функция принимает возвращает список классов, которые были обнаружены на фотографии пластика.'''
with open(f'{PH}result_plastic.txt') as f:
text = f.read()
text = text.split('\n')
text = list(filter(None, text))
if len(text) > 7:
text_2 = text[7:]
plastic_results = [i.split(':')[0] for i in text_2]
return plastic_results | [
"[email protected]"
] | |
f6d365330a0e3c5ee7ccb9c6a8f92bdf3f1e5d2c | 80363ed5994e79c1c9eda176a03c3cdee2ac0065 | /venv/bin/django-admin.py | 7a8c7c3e83b4fb3876ccca8cf305ab999e9daf7f | [] | no_license | Peterfilho/hella | 9fec02c1b6bb613491c2d6a905b28ed80a00c1cb | 6ac0e82501f4b3e7db761ccc6098ad5bff32dadd | refs/heads/master | 2023-01-09T04:07:13.047992 | 2020-11-07T22:42:11 | 2020-11-07T22:42:11 | 310,075,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | #!/home/peterson/projects/hella/venv/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"[email protected]"
] | |
442c97a997c2cded002ffc2c1a95639104e036f9 | bcdb0d74add595fd07125508c789249b5512fa5f | /ArithmeticOperators.py | 5c0a5f762e1d1bbf232354dcaef328ddd5690903 | [] | no_license | milomacphail/pythonPractice | 0fe8a59d761cfc1dc27594cfd402491b032f2e4c | 60b5b72f7c76e1bcfe858cbdb000c3dab82801b2 | refs/heads/master | 2020-06-09T08:40:46.518595 | 2020-02-21T01:22:22 | 2020-02-21T01:22:22 | 193,410,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | if __name__ == '__main__':
a = int(input())
b = int(input())
print (a + b)
print (a - b)
print (a * b)
| [
"[email protected]"
] | |
d7321a50eac4ad2fe6b9d8f5b2ce5efcf21ef166 | eed5362fb9e273cabd903f3b1507332bf419a9d5 | /venv/bin/pip3 | 8545745b5e1e7e67c744ea0b91c100774bb832e0 | [] | no_license | douglas1850/PythonWebCrawler | 9bcb5d3ddb30b7b6d515253eaa7da02a319336e2 | d9b6355e2082f13a43062a678644ecfb860baf50 | refs/heads/master | 2020-03-29T09:04:21.102107 | 2018-09-27T03:26:21 | 2018-09-27T03:26:21 | 149,739,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | #!/Users/douglasomeara/PycharmProjects/WebScraper/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"[email protected]"
] | ||
434c071f28cd3fba9ca00651048d3da61c4ae8ec | b6aea04af7ae51b2df4130535643fee4af581a52 | /Windows/main.py | 59ebdc6a27dc24959f47bd358334704eba451948 | [] | no_license | Andreivilla/bot-insta-python | 5cc8b96b68ad958142b4f3bfa48eabb11085dcb0 | 00165962ff0da18b0b34f9ba7964f899ec46658c | refs/heads/master | 2023-03-03T09:51:31.093222 | 2020-06-09T23:25:56 | 2020-06-09T23:25:56 | 258,543,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | from InstagramBot import InstagramBot
from util_json import util_json
import time
#dar entrada em informaões do bot
#user = str(input('Usuario: '))
#password = str(input('Senha: '))
#perfil
#photo
user = 'Marvin_Robot63'
password = '36461023'
#login
bot_driver = InstagramBot()
bot_driver.login(user, password)
#seguir por perfil
#id_perfil = str(input('Id do perfil: '))
id_perfil = 'caiobotturapro'
for i in range(10):
| [
"[email protected]"
] | |
c902fadf9d3369b7d4eba86635cce91b47bcf542 | 6de7c23a72e03672fee1b05b7383c0d652ec16b7 | /mtcnn/first_stage.py | 01d58e74167a83dd3d76f8959316329f487ff794 | [] | no_license | Jasionkit/awesome-face-detection | 56bf96920d9bab363069c35b2d3a4256f6513621 | 31b8fb973902585e1cbe6c87c0701e38ec223832 | refs/heads/master | 2022-03-22T16:06:49.935390 | 2019-11-20T06:33:31 | 2019-11-20T06:33:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,987 | py | import torch
from torch.autograd import Variable
import math
import cv2
import numpy as np
from .box_utils import nms, _preprocess
def run_first_stage(image, net, scale, threshold):
"""Run P-Net, generate bounding boxes, and do NMS.
Arguments:
image: an instance of PIL.Image.
net: an instance of pytorch's nn.Module, P-Net.
scale: a float number,
scale width and height of the image by this number.
threshold: a float number,
threshold on the probability of a face when generating
bounding boxes from predictions of the net.
Returns:
a float numpy array of shape [n_boxes, 9],
bounding boxes with scores and offsets (4 + 1 + 4).
"""
# scale the image and convert it to a float array
width = image.shape[1]
height = image.shape[0]
sw, sh = math.ceil(width*scale), math.ceil(height*scale)
img = cv2.resize(image, (sw, sh))
img = np.asarray(img, 'float32')
img = torch.FloatTensor(_preprocess(img))
output = net(img)
probs = output[1].data.numpy()[0, 1, :, :]
offsets = output[0].data.numpy()
# probs: probability of a face at each sliding window
# offsets: transformations to true bounding boxes
boxes = _generate_bboxes(probs, offsets, scale, threshold)
if len(boxes) == 0:
return None
keep = nms(boxes[:, 0:5], overlap_threshold=0.5)
return boxes[keep]
def _generate_bboxes(probs, offsets, scale, threshold):
"""Generate bounding boxes at places
where there is probably a face.
Arguments:
probs: a float numpy array of shape [n, m].
offsets: a float numpy array of shape [1, 4, n, m].
scale: a float number,
width and height of the image were scaled by this number.
threshold: a float number.
Returns:
a float numpy array of shape [n_boxes, 9]
"""
# applying P-Net is equivalent, in some sense, to
# moving 12x12 window with stride 2
stride = 2
cell_size = 12
# indices of boxes where there is probably a face
inds = np.where(probs > threshold)
if inds[0].size == 0:
return np.array([])
# transformations of bounding boxes
tx1, ty1, tx2, ty2 = [offsets[0, i, inds[0], inds[1]] for i in range(4)]
# they are defined as:
# w = x2 - x1 + 1
# h = y2 - y1 + 1
# x1_true = x1 + tx1*w
# x2_true = x2 + tx2*w
# y1_true = y1 + ty1*h
# y2_true = y2 + ty2*h
offsets = np.array([tx1, ty1, tx2, ty2])
score = probs[inds[0], inds[1]]
# P-Net is applied to scaled images
# so we need to rescale bounding boxes back
bounding_boxes = np.vstack([
np.round((stride*inds[1] + 1.0)/scale),
np.round((stride*inds[0] + 1.0)/scale),
np.round((stride*inds[1] + 1.0 + cell_size)/scale),
np.round((stride*inds[0] + 1.0 + cell_size)/scale),
score, offsets
])
# why one is added?
return bounding_boxes.T
| [
"[email protected]"
] | |
aeafcc6aa2038481d97caa38fb955fbd19cd7a46 | 45c901fd5e8b3db79810b918382374e93f79fd3b | /config/__init__.py | c07d889d8b4aaf0114fb4476901ad132e2d4fab6 | [] | no_license | gushui250/nh_77 | 47a1e5987d2a5615d3b131681c05c614892d5b63 | 8f301b5a61ae4cf694bdd33c8c401b5021edd91a | refs/heads/master | 2023-07-11T21:37:12.890318 | 2021-08-12T06:25:15 | 2021-08-12T06:25:15 | 383,658,449 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName :__init__.py.py
# @Time :2021/1/29 17:54
# @Author :shui
| [
"[email protected]"
] | |
dda962378b9ceac412f4051fd1f2c9a245824947 | 96863d5522a69f253e4c94bd7a68fbe2eed673d3 | /src/Room.py | 61c53fa3a92aebedc1f86d412ae34b4183c820ea | [] | no_license | Oblaize/Office_Space_Allocation | 1f9eecccafbd683d4627c27e053e2623e362e69f | edd48fa8c161155ddb11b4828f05f8d2af97ea99 | refs/heads/master | 2021-01-20T01:10:59.672370 | 2017-04-28T07:06:47 | 2017-04-28T07:06:47 | 89,230,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | class Room:
max_space = None
room_type = None
occupants = []
def set_max_space(self, value):
if isinstance(value, int):
self.max_space = value
else:
raise ValueError('max space value can only be an integer')
class Office(Room):
def __init__(self, name):
self.name = name
self.room_type = 'office'
self.max_space = 6
self.occupants = []
class LivingSpace(Room):
def __init__(self, name):
self.name = name
self.room_type = 'livingspace'
self.max_space = 4
self.occupants = [] | [
"Blaize Ottizy"
] | Blaize Ottizy |
333c1a97110f0fbf12b144d2c29ae0e2be6c79a3 | 74f8f4b88944aa9dea9a3e87a9841568ae0672db | /mini_batch_iter.py | 7b6ac24888d347e9ffd69623f9747fc7149a92a3 | [
"MIT"
] | permissive | feifan456/deep_parsimonious | 01c6cadb3f2614b2a421158f1709d1d83c0f9ba3 | 0c84654f9963eb3532b3d291087881f1baa063ad | refs/heads/master | 2022-03-27T10:54:14.255449 | 2019-12-25T00:19:26 | 2019-12-25T00:19:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,201 | py | import numpy as np
class MiniBatchIterator:
def __init__(self, idx_start, bat_size, num_sample, train_phase=True, is_permute=True):
self._bat_size = bat_size
self._idx_start = idx_start
self._num_sample = num_sample
self._train_phase = train_phase
self._is_permute = is_permute
if self._is_permute:
self._idx_sample = np.random.permutation(self._num_sample)
else:
self._idx_sample = np.array(range(self._num_sample))
@property
def idx_start(self):
return self._idx_start
@property
def bat_size(self):
return self._bat_size
@property
def num_sample(self):
return self._num_sample
@property
def train_phase(self):
return self._train_phase
@property
def is_permute(self):
return self._is_permute
def get_batch(self):
""" Get indices of a mini-batch """
if self._idx_start + self._bat_size > self._num_sample:
if self._train_phase:
idx_out = self._idx_sample[self._idx_start:]
if self._is_permute:
self._idx_sample = np.random.permutation(self._num_sample)
count = self._bat_size - (self._num_sample - self._idx_start)
idx_out = np.concatenate((idx_out, self._idx_sample[: count]))
self._idx_start = count
else:
idx_out = self._idx_sample[self._idx_start:]
self._idx_start = 0
else:
idx_out = self._idx_sample[
self._idx_start: self._idx_start + self._bat_size]
self._idx_start = (self._idx_start +
self._bat_size) % self._num_sample
return idx_out
def reset_iterator(self, idx_start=0):
if idx_start < 0:
raise ValueError('Sample index should be non-negative!')
self._idx_start = idx_start
# unit test
if __name__ == '__main__':
myIter = MiniBatchIterator(
idx_start=0, bat_size=256, num_sample=5994, train_phase=True, is_permute=True)
for i in xrange(25):
idx = myIter.get_batch()
print idx
| [
"[email protected]"
] | |
cdaddbe4548ee42770d48b813be4bce96f6d0541 | af3714908fa47f956ce02b0436e4f88d9c922041 | /SuccessPredictionBudgetNB/src/rating/PedictRatingUsingGenre.py | d7d078837d338ea7f80a5af9d27125d7a208ff49 | [] | no_license | tanviborkar/MovieSuccessPrediction | 0c4d93a9ab78391852bb6082053aa65327a237f3 | d174b14d317007121873532a5289d7dfab7017c4 | refs/heads/master | 2021-01-19T23:42:11.610272 | 2017-04-23T03:55:40 | 2017-04-23T03:55:40 | 89,012,931 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,869 | py | '''
Created on Apr 2, 2017
@author: Tanvi Borkar
'''
from __future__ import division
import csv
from src.rating.Genre import Genre
class PredictRatingUsingGenre(object):
maxFbLikes = 0.0
maxProfit = 0.0
maxIMDBScore = 0.0
genreList = None
def calculateRating(self, movieGenreList):
self.genreList = []
self.loadGenreData(movieGenreList)
sumIMDBScore = 0.0
for genre in self.genreList:
sumIMDBScore = sumIMDBScore + genre.getGenreIMDBScore()
return sumIMDBScore / len(self.genreList)
'''
count = 1
sumFbLikes = 0.0
sumProfit = 0.0
sumIMDBScore = 0.0
for actor in self.actorList:
if count == 1:
sumFbLikes = sumFbLikes + (0.6 *(actor.getActorFbLikes() / self.maxFbLikes))
sumProfit = sumProfit + (0.6 * (actor.getActorTotalProfits() / self.maxProfit))
sumIMDBScore = sumIMDBScore + (0.6 * (actor.getActorIMDBScore() / self.maxIMDBScore))
count = count + 1
elif count == 2:
sumFbLikes = sumFbLikes + (0.3 *(actor.getActorFbLikes() / self.maxFbLikes))
sumProfit = sumProfit + (0.3 * (actor.getActorTotalProfits() / self.maxProfit))
sumIMDBScore = sumIMDBScore + (0.3 * (actor.getActorIMDBScore() / self.maxIMDBScore))
count = count + 1
elif count == 3:
sumFbLikes = sumFbLikes + (0.1 *(actor.getActorFbLikes() / self.maxFbLikes))
sumProfit = sumProfit + (0.1 * (actor.getActorTotalProfits() / self.maxProfit))
sumIMDBScore = sumIMDBScore + (0.1 * (actor.getActorIMDBScore() / self.maxIMDBScore))
count = count + 1
sumFbLikes = sumFbLikes / 3
sumProfit = sumProfit / 3
sumIMDBScore = sumIMDBScore / 3
print('Computed Score: '+ str(sumIMDBScore))
#computedScore = (0.3 * sumFbLikes) + (0.7 * sumIMDBScore)
#print('Computed Score: '+ str(computedScore))
#computedScore = (0.25 * sumFbLikes) + (0.35 * sumProfit) + (0.4 * sumIMDBScore)
#print('Computed Score: '+ str(computedScore))
'''
def loadGenreData(self, genreList):
with open('rating/genre_summary.csv', 'rt') as csvfile:
columnNames = ['genre_name', 'no_of_fb_likes', 'total_profit', 'total_imdb_score']
reader = csv.DictReader(csvfile, columnNames)
for row in reader:
if(row['genre_name'] != 'genre_name'):
if(row['genre_name'] in genreList):
genreObj = Genre()
genreObj.setGenre(row['genre_name'])
genreObj.setGenreFbLikes(float(row['no_of_fb_likes']))
genreObj.setGenreTotalProfits(float(row['total_profit']))
genreObj.setGenreIMDBScore(float(row['total_imdb_score']))
self.genreList.append(genreObj)
'''
if(self.maxFbLikes<float(row['no_of_fb_likes'])):
self.maxFbLikes = float(row['no_of_fb_likes'])
if(float(row['total_profit'])<0.0):
if(self.maxProfit< (float(row['total_profit']) * -1)):
self.maxProfit = float(row['total_profit']) * -1
else:
if(self.maxProfit< float(row['total_profit'])):
self.maxProfit = float(row['total_profit'])
if(self.maxIMDBScore<float(row['total_imdb_score'])):
self.maxIMDBScore = float(row['total_imdb_score'])
''' | [
"[email protected]"
] | |
1b9306276df1e8016f7d5311a7b3451ba0f93698 | eade42bf8060f4d692724ab31275a13efac7e490 | /les3_5.py | b7e28263226270b2408f951848b07dd68864f788 | [] | no_license | Paradox-1337/less3 | 62ea2aa76b9a9382a23860781ac6f3e7c854d590 | ef8f3b0a409d8edb0c977e65f1c9867c16f86a84 | refs/heads/master | 2021-01-26T07:25:41.447000 | 2020-02-27T11:52:24 | 2020-02-27T11:52:24 | 243,364,600 | 0 | 0 | null | 2020-02-27T11:52:26 | 2020-02-26T21:00:02 | null | UTF-8 | Python | false | false | 659 | py | def my_func():
sum_result = 0
exit_code = False
while exit_code == False:
number = input("Введите числа для суммирования. Для выхода из программы, введите '№/#' ").split()
result = 0
for element in range(len(number)):
if number[element] == '#' or number[element] == '№':
exit_code = True
break
else:
result = result + int(number[element])
sum_result = sum_result + result
print("Число, получившееся в итоге: ", sum_result)
my_func()
| [
"[email protected]"
] | |
deba1ddba9647dd082d686355a5d99132d352633 | 99a56bc3baf9629123a07d0dacfe50ab6fee1268 | /millychocolates.py | 817ca936c26e4454fc23b51347b5d2f3e1b04ed4 | [] | no_license | varenya/algorithms | 43dc3ea7e9b87525314a738eaaa5ec58fc5e5b2d | 6afd0c61be3c696487ddeac6317c5462d0208a7b | refs/heads/master | 2020-12-29T02:20:01.821094 | 2017-02-28T15:32:45 | 2017-02-28T15:32:45 | 52,890,735 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | T = int(raw_input())
N,K = map(int,raw_input().strip().split())
for n in xrange(N):
| [
"[email protected]"
] | |
bdcefd0fa9216be85610f15c0c246f81cbd616ed | 4ecdaa0d6bbe9f55cc343ac16ee7239021e5645a | /berg/metadata_util.py | 7d1d6da725ecceab132ea4ae570294622c2ef6f5 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | isabella232/cloud-berg | 8351161d91607fdd9cb2b4e0078f1820313335a5 | 41e38e44d4e7572a230e5eae204a19204cbc2edd | refs/heads/master | 2022-01-13T16:31:07.107085 | 2018-11-17T21:23:52 | 2018-11-17T21:23:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,302 | py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import re
import sys
from berg import logger, gsutil
from berg.configuration import config
from colorama import Fore
def local_path(name):
return os.path.join(config.local_berg_root, 'jobs',
"%s_job_metadata.json" % name)
def gcs_path(name):
return os.path.join(config.gcs_berg_root, 'jobs',
"%s_job_metadata.json" % name)
def parse_local(name, permissive=False):
try:
with open(local_path(name), 'r') as f:
return json.loads(f.read())
except FileNotFoundError as e:
if permissive:
return {}
else:
raise e
def save_to_local_path(metadata, name):
path = local_path(name)
os.makedirs(os.path.dirname(path), exist_ok=True)
logger.debug("Wrote metadata to %s" % path)
with open(path, 'w') as f:
f.write(json.dumps(metadata, indent=4))
def upload_to_gcs(name):
gsutil.cp(local_path(name), gcs_path(name))
def upload_copy_to_gcs_results_dir(name, results_dir):
dest = os.path.join(config.gcs_results_root, results_dir,
"berg_job_metadata.json")
gsutil.cp(local_path(name), dest)
def fetch_and_parse(name):
path = local_path(name)
os.makedirs(os.path.dirname(path), exist_ok=True)
gsutil.cp(gcs_path(name), path)
return parse_local(name)
def sketchy_guess_at_results_dir_from_cmd(cmd):
"""Try to figure out the results_dir from cmd, if we fail, return '<none>' """
match = re.search('berg_results/(\S*)', cmd)
if match is not None and len(match.groups()) == 1:
return match[1].strip()
else:
print(Fore.RED + "Could not guess the results_dir from the command. "
"Please specify it explicitly with --results-dir")
sys.exit(1)
| [
"[email protected]"
] | |
8228c39d2a5bcb4fe820da980edae8c242bcdc15 | b0110e27e3162e2092259dd299481de1dafb4ea8 | /space/orbit-old.py | a9d6b5bdbb7833e0ad7d7a2d6a00c44d44099913 | [
"MIT"
] | permissive | mobarski/sandbox | f9be203bf7015f6df70badd605a40172b63a90f8 | f9054fb3252488208e503a87efba5df74fc70538 | refs/heads/master | 2023-05-29T14:51:00.125028 | 2023-05-14T21:02:38 | 2023-05-14T21:02:38 | 86,854,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,096 | py | from util import *
from math import *
## CONSTANTS #########################################
# gravitational constant
G = 6.673e-11
# gravitational acceleration at earth surface
g = 9.81
# stefan-boltzmann constant
sigma = 5.67e-8
GM=namespace('Gravitational constant times body mass [???]')
GM.sun = 1.327e20
GM.earth = 3.986e14
GM.moon = 4.903e12
GM.mercury = 2.094e13
GM.venus = 3.249e14
GM.mars = 4.269e13
GM.jupiter = 1.267e17
mu=GM
S=namespace('Solar irradiation constant [W/m2]')
S.earth = 1.361e3
M=namespace('Mass [kg]')
M.moon = 7.348e22
M.earth = 5.973e24
M.mars = 6.417e23
AU = 1.496e11
DAU=namespace('Distance to central body [AU]')
DAU.mercury = 0.387
DAU.venus = 0.723
DAU.earth = 1.0
DAU.mars = 1.524
DAU.jupiter = 5.204
D=namespace('Distance to central body [m]')
D.moon = 3.844e8
D.mercury = DAU.mercury * AU
D.venus = DAU.venus * AU
D.earth = AU
D.mars = DAU.mars * AU
D.jupiter = DAU.jupiter * AU
R=namespace('Radius (equatorial) [m]')
R.sun = 6.955e8
R.earth = 6.378e6
R.moon = 1.738e6
R.mars = 3.396e6
SRP=namespace('Sidereal Rotation Period [s]')
SRP.earth = to_seconds(23,56,4)
SRP.mars = to_seconds(24,37,22)
## FORMULAS #########################################
class formula:
def __init__(self,hint,info='',**formulas):
self.hint=hint
self.info=info
self.formula=formulas
def copy(self):
f = formula(self.hint,**self.formula)
f.__dict__=self.__dict__.copy()
return f
def expand(self,x):
f = self.formula[x]
while '{' in f:
f = f.replace('{','({').replace('}','})').format(**self.__dict__)
return f
def __getattr__(self,x):
return eval(self.expand(x))
##
v_circ = formula("circular orbit velocity",
v='({u}/{r})**0.5',
u='{r}*{v}**2',
r='{u}/{v}**2')
v_elip = formula("elipse orbit velocity",
v='(2*{u}/{r}-{u}/{a})**0.5',
u='{r}*{v}**2',
r='{u}/{v}**2')
delta_v = formula("",
dv='g*isp*log(mi/mf)')
f_grav= formula("gravitational force",
fg="{u}*{m}/{r}**2")
f_c = formula("centrifugal force",
fc="{v}**2/{r}")
a_grav= formula("gravitational acceleration",
a="{u}/{r}**2")
e_pot = formula("",
ep="-{u}*{m}/{r}")
e_kin = formula("",
ek="v**2*m/2")
t = formula("orbital period",
t="2*pi*({a}**3/{u})**0.5")
ecc = formula('eccentricity',
e='({ra}-{rp})/({ra}+{rp})')
n = formula('mean motion',
n="({u}/{a}**3)**0.5")
# TODO conflict with equatorial radius
# R=namespace('specific gas constant [J/(kg*K)]')
# R.air = 287
orbit=m=model()
m.v_doc = "velocity [m/s]"
m.v_fun = lambda m: (2*m.u/m.r - m.u/m.a)**0.5
m.a_doc = "semi-major axis [m]"
m.a_fun = lambda m: m.r
m.r_doc = "distance to the center of the central body [m]"
m.u_doc = "G*M of the central body"
grav=m=model()
m.a_doc = "gravitational acceleration"
m.a_fun = lambda m: m.u/m.r**2
m.u_doc = "G*M of the central body"
m.r_doc = "distance to the center of the central body [m]"
if __name__=="__main__":
a_grav.u='GM.earth'
a_grav.r='R.earth+100e3'
print(a_grav.a)
grav.u=GM.earth
grav.r=R.earth+100e3
print(grav.a)
f= formula("gravitational acceleration")
f.formula['a']="{u}/{r}**2"
f.u=GM.earth
f.r=R.earth+100e3
print(f.a)
| [
"[email protected]"
] | |
a282527427ed10a1b26ef595c206ce720b6cbe54 | c7b067c6fee136fbd6cb52fea06acd6f301d1a46 | /create_db_tables.py | 3eaec0c1cc8fba7f017c22dacdff950ab5bee0bb | [] | no_license | KaranKamath/netmaidScraper | d3de9b8b879da08cd6b432fea65ba61c1cef105f | a4b38e834d7d2ac27a8af44a8f6c88606ffa43f0 | refs/heads/master | 2020-05-17T14:41:26.890868 | 2014-10-22T08:11:03 | 2014-10-22T08:11:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,504 | py | import sqlite3
conn = sqlite3.connect('scraper.db')
c = conn.cursor()
c.execute('''CREATE TABLE maids(
urlID INTEGER PRIMARY KEY,
ref_code TEXT,
name TEXT,
type TEXT,
base_salary TEXT,
rest_day_preference TEXT,
maid_agency TEXT,
nationality TEXT,
date_of_birth TEXT,
place_of_birth TEXT,
siblings TEXT,
height TEXT,
weight TEXT,
religion TEXT,
marital_status TEXT,
children TEXT,
education TEXT,
language_skill TEXT,
pref_cares_for_children TEXT,
pref_cares_for_elderly TEXT,
pref_cares_for_disabled TEXT,
pref_housework TEXT,
pref_cooking TEXT,
other_handles_pork TEXT,
other_eats_pork TEXT,
other_handles_beef TEXT,
other_cares_for_dog_or_cat TEXT,
other_gardening TEXT,
other_sewing TEXT,
other_washes_car TEXT,
other_works_off_days_for_compensation TEXT,
working_experience TEXT,
maid_introduction TEXT,
img_path TEXT,
init_date TEXT,
as_of_date TEXT,
expired_date TEXT)''');
conn.commit()
conn.close()
| [
"[email protected]"
] | |
cf1092031a01aaba0eb887802f2bebe8950445e1 | f7324876f381706c4fd7486ddb925317841e6bf6 | /testset_expts.py | c2a777036522cfba0e5918b94cb414fab9885431 | [] | no_license | bradleyrp/amx-extras | 21ebdb93acf919498ed674e1347c7d71913f3bd1 | 8f188c47de0bed0c34a73ee289b6a870956af354 | refs/heads/master | 2021-01-11T15:18:26.263637 | 2018-04-27T17:10:15 | 2018-04-27T17:10:15 | 80,324,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,712 | py | {
'table':{
#####
####
###
##
#
#---mimic a user copy command for realistic test sets
'tags':['aamd_cgmd','tag_support','tested_2017.09.18'],
'quick':"""
import amx,shutil
shutil.copyfile(amx.settings.ready,amx.settings.store)
""",
},
'bilayer_288_demo':{
#####
####
###
##
#
#---a legacy test used to make a small bilayer for the structure-repo
'tags':['cgmd','tested_2017.09.14'],
'prelude':'make go lipidome clean && make clean sure',
'metarun':[
{'step':'bilayer','do':'bilayer_control_cgmd','settings':"""
#---this demo generates a small coarse-grained bilayer for use in a test set
#---this test was used to generate @structure-repo/bilayers-cgmd/bilayer-cgmd-288.gro
step: bilayer
monolayer top: 144
composition top: {'DOPC':0.64,'DOPS':0.16,'POP2':0.2}
composition bottom: {'POPC':1.0}
thickness: 18
"""},
{'quick':'table','settings':"""
ready: s01-bilayer/md.part0001.gro
store: inputs/bilayer-cgmd-288.gro
"""},
]},
###---DEVELOPMENT NOTES
'comment_extras_testset':{'comment':"""
TESTSETS:
1. testset_bilayer_protein_free: attach helix0 structure from @structure-repo to a new, free bilayer
requires 9.4min and is generally stable thanks to npt-bilayer equilibration bilayer_protein_adhesion
may be less stable on dramatically different system sizes (recommend multiply step for large systems)
2. testset_bilayer_protein_flat: attach helix0 structure from @structure-repo to a new, flat bilayer
equivalent to testset_bilayer_protein_free with added restraints
note that users should use script-continue.sh to run the simulation until satisfactory binding
restraints can be released with "make go bilayer_release" which was tested with the enth_demo experiment
3. testset_ultra1: a combination testset that includes items above. useful only for validating automacs
NOTES:
-- the testsets are somewhat slower than other examples (e.g. enth_demo) because they make new bilayers
-- the "table" step simulates a user who made one simulation and copied the result to inputs for another
-- no test sets to date (2017.09.18) work without a pre-made, *complete* protein structure
-- users who adapt these methods should be careful to check their topology and protein placement
"""},
'testset_bilayer_protein_free':{
#####
####
###
##
#
'tags':['cgmd','tested_2017.09.20','note_structure_repo_protein'],
'metarun':[
{'step':'bilayer','do':'bilayer_control_cgmd','settings':"""
step: bilayer
monolayer top: 90
monolayer bottom: 90
composition top: {'DOPC':0.64,'DOPS':0.16,'POP2':0.2}
composition bottom: {'POPC':1.0}
"""},
{'quick':'table','settings':"""
ready: s01-bilayer/md.part0001.gro
store: inputs/bilayer-cgmd-small.gro
"""},
{'step':'protein','do':'martinize','settings':"""
start structure: inputs/structure-repo/proteins/helix0.pdb
"""},
{'step':'adhere','do':'bilayer_protein_adhesion','settings':"""
force field: martini_upright_alt
sources: ['@martini/auto_ff/martini_upright_alt.ff']
placement method: banana
group up: resid 19
group down: resid 7
group origin: resid 7
bilayer structure: inputs/bilayer-cgmd-small.gro
protein_lattice:|{
'nrows':1,'ncols':1,
'lattice_type':'square',
'space_scale':20,
'total_proteins':1,
'protein_shift_up':1.0,}
"""},
]},
'testset_bilayer_protein_flat':{
#####
####
###
##
#
'tags':['cgmd','tested_2017.09.20','note_structure_repo_protein'],
'prelude':"make go lipidome clean && make clean sure",
'metarun':[
{'step':'bilayer','do':'bilayer_control_flat','settings':"""
step: bilayer
monolayer top: 90
monolayer bottom: 90
composition top: {'DOPC':0.64,'DOPS':0.16,'POP2':0.2}
composition bottom: {'POPC':1.0}
"""},
{'quick':'table','settings':"""
ready: s01-bilayer/md.part0001.gro
store: inputs/bilayer-cgmd-small.gro
"""},
{'step':'protein','do':'martinize','settings':"""
start structure: inputs/structure-repo/proteins/helix0.pdb
"""},
{'step':'adhere','do':'bilayer_protein_adhesion','settings':"""
force field: martini_upright_alt
sources: ['@martini/auto_ff/martini_upright_alt.ff']
placement method: banana
group up: resid 19
group down: resid 7
group origin: resid 7
bilayer structure: inputs/bilayer-cgmd-small.gro
protein_lattice:|{
'nrows':1,'ncols':1,
'lattice_type':'square',
'space_scale':20,
'total_proteins':1,
'protein_shift_up':1.0,}
#---EQUILIBRATION
equilibration: ['npt-bilayer-short','npt-bilayer']
mdp specs:|{
'group':'cgmd',
'mdps':{
'input-em-steep-in.mdp':['minimize'],
'input-md-npt-bilayer-short-eq-in.mdp':[{'restrain':'posre-com-only',
'pressure':'npt-semiisotropic-weak',
'nsteps':500000,'groups':'protein','temperature':'protein','dt':0.001}],
'input-md-npt-bilayer-eq-in.mdp':[{'restrain':'posre-com-only',
'pressure':'npt-semiisotropic-weak',
'nsteps':500000,'groups':'protein','temperature':'protein','dt':0.01}],
'input-md-in.mdp':[{'restrain':'posre-com-only','pressure':'npt-semiisotropic-weak',
'nsteps':500000,'groups':'protein','temperature':'protein'}],},}
"""},
]},
'testset_ultra1':{
#####
####
###
##
#
#---a combination testset
'tags':['cgmd','dev'],
'metarun':[
{'step':'bilayer','do':'bilayer_control_cgmd','settings':"""
step: bilayer
monolayer top: 90
composition top: {'DOPC':0.64,'DOPS':0.16,'POP2':0.2}
composition bottom: {'POPC':1.0}
"""},
{'quick':'table','settings':"""
ready: s01-bilayer/md.part0001.gro
store: inputs/bilayer-cgmd-small.gro
"""},
{'step':'bilayer','do':'bilayer_control_flat','settings':"""
step: bilayer
monolayer top: 90
composition top: {'DOPC':0.64,'DOPS':0.16,'POP2':0.2}
composition bottom: {'POPC':1.0}
"""},
{'quick':'table','settings':"""
ready: s01-bilayer/md.part0001.gro
store: inputs/bilayer-cgmd-small-flat.gro
"""},
{'step':'protein','do':'martinize','settings':"""
start structure: inputs/helix0.pdb
"""},
{'step':'adhere','do':'bilayer_protein_adhesion','settings':"""
force field: martini-sources
sources: ['@martini/martini-sources.ff']
placement method: banana
group up: resid 19
group down: resid 7
group origin: resid 7
bilayer structure: inputs/bilayer-cgmd-small.gro
protein_lattice:|{
'nrows':1,'ncols':1,
'lattice_type':'square',
'space_scale':20,
'total_proteins':1,
'protein_shift_up':1.0,}
"""},
{'step':'protein','do':'martinize','settings':"""
start structure: inputs/helix0.pdb
"""},
{'step':'adhere','do':'bilayer_protein_adhesion','settings':"""
force field: martini_upright_alt
sources: ['@martini/auto_ff/martini_upright_alt.ff']
placement method: banana
group up: resid 19
group down: resid 7
group origin: resid 7
bilayer structure: inputs/bilayer-cgmd-small-flat.gro
protein_lattice:|{
'nrows':1,'ncols':1,
'lattice_type':'square',
'space_scale':20,
'total_proteins':1,
'protein_shift_up':1.0,
}
"""},
]},
}
| [
"[email protected]"
] | |
16e55bfaa85d3bafcfc0208ade4760ba28fbb0e5 | 546d07b3f78313895af9bfd70a480eab92bcba68 | /appMain/migrations/0013_auto_20210926_1953.py | 789a1a4e0642f876cc759525fa127955b12d7f6a | [] | no_license | NU25113/KingIceManDjango | 4c1c08319ac70a6e3ad9ca241eb0214dc477a813 | e6b2e6b26869052ad5d9b2d55b023f8c56149988 | refs/heads/master | 2023-08-17T13:27:47.378855 | 2021-09-29T14:35:21 | 2021-09-29T14:35:21 | 411,663,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | # Generated by Django 3.1.6 on 2021-09-26 12:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('appMain', '0012_income'),
]
operations = [
migrations.RemoveField(
model_name='agency_rent',
name='total',
),
migrations.AlterField(
model_name='income',
name='total_price',
field=models.IntegerField(blank=True, default=1, null=True),
),
]
| [
"[email protected]"
] | |
c302bd0f7915622567d722cecc72a0fa8d7a454e | 7f57c12349eb4046c40c48acb35b0f0a51a344f6 | /2015/AddTwoNumbers_v2.py | d543f46cb1cd4d03c07c46e63c81d2e789227d84 | [] | no_license | everbird/leetcode-py | 0a1135952a93b93c02dcb9766a45e481337f1131 | b093920748012cddb77258b1900c6c177579bff8 | refs/heads/master | 2022-12-13T07:53:31.895212 | 2022-12-10T00:48:39 | 2022-12-10T00:48:39 | 11,116,752 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,212 | py | #!/usr/bin/env python
# encoding: utf-8
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @param {ListNode} l1
# @param {ListNode} l2
# @return {ListNode}
def addTwoNumbers(self, l1, l2):
if l1 is None:
return l2
if l2 is None:
return l1
lr = p = ListNode(0)
carry = 0
while l1 or l2 or carry:
a = l1.val if l1 else 0
b = l2.val if l2 else 0
r = a + b + carry
carry = r // 10
p.val = r % 10
l1 = l1.next if l1 else None
l2 = l2.next if l2 else None
if l1 or l2 or carry:
p.next = ListNode(0)
p = p.next
return lr
def print_list(list_head):
print_l(list_head)
print '\n'
def print_l(list_head):
if list_head:
print list_head.val,
print_l(list_head.next)
if __name__ == '__main__':
l1a = ListNode(5)
l1 = l1a
l2a = ListNode(5)
l2 = l2a
s = Solution()
lr = s.addTwoNumbers(l1, l2)
print_list(l1)
print_list(l2)
print_list(lr)
| [
"[email protected]"
] | |
6bfb1f17c3ccc3769a46c7ec0e1de921b8d9f251 | 6e4e6b64c035881f1cff39db616b0a80e1568c51 | /ARC015/q1.py | 8625a990d5f7a2e9da866e9e6ec2cfe7063a5c0c | [] | no_license | Lischero/Atcoder | f7471a85ee553e3ae791e3e5670468aea1fa53cc | f674d6a20a56eebdafa6d50d5d2d0f4030e5eace | refs/heads/master | 2020-05-21T16:23:36.095929 | 2018-10-18T04:27:55 | 2018-10-18T04:27:55 | 60,671,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 58 | py | # -*- coding:utf-8 -*-
n = int(input())
print((9/5*n)+32)
| [
"[email protected]"
] | |
0e0f29f3ce10a11abd47d59d581fd88e0dd5c314 | c4681fb41c49e4b8a7a9e2bb74e1cc59d91eb386 | /src/paymo_functions.py | abd4ae33319e495b671edcd0d38af110cfd4218b | [] | no_license | k4trina/Digital-Wallet | 8984c88ac84cdd8705866e08e76e5875dc4e85f8 | e62d9261c0e8c0dd0bfc7297b934c4d58bb9648d | refs/heads/master | 2020-07-28T12:36:29.998835 | 2016-11-10T19:24:38 | 2016-11-10T19:24:38 | 73,410,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,431 | py | # paymo_functions.py
# Insight Data Engineering Coding Challenge, 2016 November
# Katrina Sitkovits, [email protected]
#
# This file contains helper functions for Features 1, 2, 3, and additional Feature 4
#
# Get sender and receiver IDs from payment line
# ID1 = sender (tx), ID2 = receiver (rx)
def get_IDs ( line ):
index = 0
tx = 0
rx = 0
for segment in line.split(', '):
# ignore timestamp (index==0)
if (index == 1):
tx = segment
elif (index == 2):
rx = segment
break # ignore payment amount and user message
index += 1
return tx, rx
# Update graph connections based on incoming payment records from stream
# Assume all new connections in the graph are marked valid
def update_graph ( graph, tx, rx ):
if tx in graph:
if rx not in graph[tx]: graph[tx].append(rx)
else:
graph[tx] = [rx]
if rx in graph:
if tx not in graph[rx]: graph[rx].append(tx)
else:
graph[rx] = [tx]
return
# Construct initial network graph of users/friends from batch file
def construct_initial_graph ( graph, batch_payment_file ):
batch_payment_input = open(batch_payment_file, "r")
firstline = True
for line in batch_payment_input:
if firstline:
firstline = False
continue
tx, rx = get_IDs(line)
update_graph(graph, tx, rx)
batch_payment_input.close()
# Depth-first search traverses graph starting from vertex node,
# and visits all children that haven't previously been visited
# Returns trusted if node is found within max number of search degree levels
def dfs (graph, vertex, rx, level, visited, max_search_degree):
trusted = False
visited.add(vertex)
vertex_children = set(graph[vertex])-visited
if rx in vertex_children:
trusted = True
else:
if level < max_search_degree-1:
for child in vertex_children:
trusted = dfs(graph,child,rx,level+1,visited,max_search_degree)
if trusted: break
return trusted
# Stream payments from stream file
def stream_payments(graph, stream_payment_file, output_file, max_levels):
stream_payment_input = open(stream_payment_file, "r")
output_verified = open(output_file, "w")
firstline = True
for line in stream_payment_input:
if firstline:
firstline = False
continue
tx, rx = get_IDs(line)
# Perform depth-first search to find receiver up to max_levels of degree separation
if dfs(graph, tx,rx,0,set(),max_levels): output_verified.write("trusted\n")
else: output_verified.write("unverified\n")
# Update network graph since the payment is assumed to be verified once completed
update_graph(graph,tx,rx)
stream_payment_input.close()
output_verified.close()
# RSA-encrypted payment stream from sender to PayMo
# This function implements a more secure version of the stream_payments() function above.
# PayMo first generates both a public and private key using the RSA encryption scheme.
# PayMo shares the public key with all verified users in the graph/network.
# PayMo does not reveal the private key to anyone else.
# When a sender performs a payment, we assume that the payment stream/record
# {timestamp, ID1, ID2, amount, message} is encrypted by the sender using PayMo's public key.
# PayMo decrypts each incoming payment record in the stream using our private key.
# We then verify if the transaction is trusted as in Feature 3.
# This function requires the Python developer package, and the following libraries: crypto and pycrypto
# $ sudo apt-get install python-dev
# $ sudo apt install python-pip
# $ pip install crypto
# $ pip install pycrypto
from Crypto.PublicKey import RSA
from Crypto import Random
def encrypted_stream_payments(graph, stream_payment_file, output_file, max_levels):
# RSA preamble -- PayMo side
key = RSA.generate(2048, Random.new().read) # create RSA key object
my_private_key = key.exportKey('PEM') # generate PayMo's private key
paymo_private_RSA_obj = RSA.importKey(my_private_key) # PayMo's private key object
my_public_key = key.publickey().exportKey('PEM') # generate PayMo's public key
stream_payment_input = open(stream_payment_file, "r")
output_verified = open(output_file, "w")
firstline = True
for line in stream_payment_input:
if firstline:
firstline = False
continue
# Assume that each senders first encrypts the stream payment string with PayMo public key
user_public_RSA_obj = RSA.importKey(my_public_key) # each user creates an RSA object using PayMo's public key
msg_plaintext = str(line)
msg_encypted = user_public_RSA_obj.encrypt(msg_plaintext, 0)
# Assume that the payment/message transmission occurs here
# PayMo decrypts each incoming stream payment with our own private key
msg_decrypted = paymo_private_RSA_obj.decrypt(msg_encypted)
# Perform the same remaining steps on the PayMo side
tx, rx = get_IDs(msg_decrypted)
if dfs(graph, tx,rx,0,set(),max_levels): output_verified.write("trusted\n")
else: output_verified.write("unverified\n")
update_graph(graph,tx,rx)
stream_payment_input.close()
output_verified.close()
| [
"[email protected]"
] | |
80ee460716517889a3de4b4e7bd6a07c04c2dc6b | 8791b5ee6e690e0a0d40ae7c95f2f87ba10da72d | /SPP/guidgrabber/bin/start_ravello_session.py | 8554d8075ab3dd00d1dae57d4d679b08dc40c63d | [] | no_license | thomas-crowe/GUIDGrabber | 18191d87d0b65780b6b7f4011b75ff123efbd6f5 | 73c01e665002f0b549b8a67c2e304251e7f35950 | refs/heads/master | 2020-06-06T02:01:00.649531 | 2019-06-18T20:22:23 | 2019-06-18T20:22:23 | 192,605,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,928 | py | #!/usr/bin/python3
import argparse
import requests
import urllib.parse
import time
import ravello_sdk
from common import *
import re
from requests.auth import HTTPBasicAuth
parser = argparse.ArgumentParser(description="Get Services From CloudForms")
parser.add_argument('--cfurl', help='CloudForms Appliance URL', required=True)
parser.add_argument('--cfuser', help='CloudForms Appliance User', required=True)
parser.add_argument('--cfpass', help='CloudForms Appliance Password', required=True)
parser.add_argument('--ufilter', help='User To Filter Searches To', required=True, default=None)
parser.add_argument('--session', help='Session', required=True, default=None)
parser.add_argument('--insecure', help='Use Insecure SSL Cert', action="store_false")
parser.add_argument('--labcode', help='Lab Code', required=True)
parser.add_argument('--group', help='Group Count for Batch Deletions', type=int, default=10)
parser.add_argument('--sleep', help='Sleep secs between groups', type=int, default=300)
parser.add_argument('--ha', help='primary|secondary|none', default='none', choices=['primary','secondary','none'])
args = parser.parse_args()
cfurl = args.cfurl
cfuser = args.cfuser
cfpass = args.cfpass
userFilter = args.ufilter
session = args.session
sslVerify = args.insecure
labCode = args.labcode
group = args.group
sleept = args.sleep
ha = args.ha
def start(app,app_time,client):
status = application_state(app)
app_name = app['name'].encode('utf-8')
if status == 'STARTED':
print('Application {0} is in already in {1} state, no action needed'.format(app_name,status))
exp = {'expirationFromNowSeconds': 60*app_time}
client.set_application_expiration(app['id'], exp)
print('Setting expiration time of application {0} to {1} minutes'.format(app_name,app_time))
elif 'STARTING' in status or 'STOPPING' in status:
print('Application {0} action in progress, not making any change'.format(app_name))
elif 'STOPPED' in status:
if app_time != 0:
exp = {'expirationFromNowSeconds': 60*app_time}
client.set_application_expiration(app['id'], exp)
print('Setting expiration time of application {0} to {1} minutes'.format(app_name,app_time))
client.start_application(app['id'])
print('Starting application {}'.format(app_name))
else:
log.error('Application {0} is in unknown state {1}, canceling START command'.format(app_name,status))
print('Application {0} is in unknown state {1}, canceling START command'.format(app_name,status))
return False
return True
def gettok():
response = requests.get(cfurl + "/api/auth", auth=HTTPBasicAuth(cfuser, cfpass), verify=sslVerify)
data = response.json()
return data['auth_token']
def apicall(token, url, op, inp = None ):
#print("CFURL: " + cfurl)
#print("URL: " + url)
if url.startswith('http'):
eurl = url
else:
eurl = cfurl + url
head = {'Content-Type': 'application/json', 'X-Auth-Token': token, 'accept': 'application/json;version=2'}
if op == "get":
response = requests.get(eurl, headers=head, verify=sslVerify)
elif op == "post":
response = requests.post(eurl, headers=head, verify=sslVerify, data = inp)
#print("RESPONSE: " + response.text)
obj = response.json()
return obj.get('resources')
token = gettok()
surl = "/api/services?attributes=tags%2Ccustom_attributes&expand=resources"
if userFilter:
url = "/api/users?expand=resources&filter%5B%5D=userid='" + userFilter + "'"
#print("DEBUG: " + url)
users = apicall(token, url, "get", inp = None )
#print("DEBUG users: " + str(users))
if not users:
print(("ERROR: No such user " + userFilter))
exit ()
else:
userID = str(users[0]['id'])
surl = surl + "&filter%5B%5D=evm_owner_id='" + userID + "'"
services = apicall(token, surl, "get", inp = None )
appIDs = []
for svc in services:
lc = ""
ses = ""
appid = ""
for cab in svc['custom_attributes']:
if cab['name'] == 'labCode':
lc = cab['value']
elif cab['name'] == 'session':
ses = cab['value']
elif cab['name'] == 'applicationid':
appid = cab['value']
elif cab['name'] == 'HA':
thisha = cab['value']
if ha != "none":
if ses == session and lc == labCode and thisha == ha:
appIDs.append(appid)
else:
if ses == session and lc == labCode:
#print(svc['name'])
#print(svc['href'])
appIDs.append(appid)
#Connect to Ravello
username,password = get_credentials()
client = connect(username, password)
if not client:
exit (1)
x = 1
for appID in appIDs:
app = client.get_application(appID)
app_time = 480
start(app,app_time,client)
if x >= group:
print('Sleeping')
time.sleep(sleept)
x = 0
x = x + 1
| [
"[email protected]"
] | |
1ec1f56bd18f8a82568356fe621e4593be8a368a | 2565970a2461fec97c0b0972eed161d9bd9e268f | /test_finetuning.py | 74bcbec628e6f36adb5ccd96b187b105430735d5 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | sskram/nn-toolbox | ee40e2d0f8792444a6d46bd477ffc69b144691a1 | b998d61800311d788bf3c4c5f517f1fd6d9c2e66 | refs/heads/master | 2020-07-01T02:55:22.318592 | 2019-07-21T06:46:07 | 2019-07-21T06:46:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,678 | py | import torchvision
from torch.nn import *
from torchvision.datasets import ImageFolder, CIFAR10
from torchvision.models import resnet18
from torchvision.transforms import *
from torch.optim import *
from torch.optim.lr_scheduler import CosineAnnealingLR
from nntoolbox.optim import AdamW
from torch.utils.data import random_split
# from adabound import AdaBound
from nntoolbox.vision.components import *
from nntoolbox.vision.learner import SupervisedImageLearner
from nntoolbox.utils import load_model, LRFinder, get_first_batch, get_device
from nntoolbox.callbacks import *
from nntoolbox.metrics import Accuracy, Loss
from nntoolbox.vision.transforms import Cutout
from nntoolbox.vision.models import ImageClassifier, EnsembleImageClassifier
from nntoolbox.losses import SmoothedCrossEntropy
from nntoolbox.init import lsuv_init
import math
torch.backends.cudnn.benchmark=True
pretrained_model = resnet18()
# print(modules)
from nntoolbox.utils import cut_model, get_trainable_parameters
feature, head = cut_model(pretrained_model)
for param in feature.parameters():
param.requires_grad = False
model = nn.Sequential(
feature,
FeedforwardBlock(
in_channels=512,
out_features=10,
pool_output_size=2,
hidden_layer_sizes=(256, 128)
)
)
# print(model._modules['0']._modules[str(0)])
from typing import List
def unfreeze(module: Sequential, optimizer: Optimizer, unfreeze_from: int, unfreeze_to: int):
"""
Unfreeze a model from ind
:param module:
:param optimizer
:param unfreeze_from:
:param unfreeze_to:
:return:
"""
for ind in range(len(module)):
submodule = module._modules[str(ind)]
if ind < unfreeze_from:
for param in submodule.parameters():
param.requires_grad = False
elif ind < unfreeze_to:
for param in submodule.parameters():
param.requires_grad = True
optimizer.add_param_group({'params': submodule.parameters()})
class GradualUnfreezing(Callback):
def __init__(self, freeze_inds: List[int], unfreeze_every: int):
self._freeze_inds = freeze_inds
self._unfreeze_every = unfreeze_every
# def on_train_begin(self):
# self._freeze_inds = [len(self.learner._model._modules['0'])] + self._freeze_inds
#
# for i in range(1, len(self._freeze_inds)):
# unfreeze_from = self._freeze_inds[i]
# unfreeze_to = self._freeze_inds[i - 1]
#
# unfreeze(self.learner._model._modules['0'], self.learner._optimizer, unfreeze_from, unfreeze_to)
# print("Unfreeze feature after " + str(unfreeze_from))
# for ind in range(len(self.learner._model._modules['0'])):
# for param in self.learner._model._modules['0']._modules[str(ind)].parameters():
# param.requires_grad = False
# print("Unfreeze feature after " + str(freeze_to))
def on_epoch_end(self, logs: Dict[str, Any]) -> bool:
if logs['epoch'] % self._unfreeze_every == 0 \
and logs['epoch'] > 0 \
and logs['epoch'] // self._unfreeze_every < len(self._freeze_inds):
unfreeze_from = self._freeze_inds[logs['epoch'] // self._unfreeze_every]
unfreeze_to = self._freeze_inds[logs['epoch'] // self._unfreeze_every - 1]
# for ind in range(len(self.learner._model._modules['0'])):
# module = self.learner._model._modules['0']._modules[str(ind)]
# if ind < unfreeze_from:
# for param in module.parameters():
# param.requires_grad = False
# else:
# for param in module.parameters():
# param.requires_grad = True
# self.learner._optimizer.add_param_group({'params': module.parameters()})
unfreeze(self.learner._model._modules['0'], self.learner._optimizer, unfreeze_from, unfreeze_to)
print("Unfreeze feature after " + str(unfreeze_from))
return False
unfreezer = GradualUnfreezing([6, 4, 2, 0], 10)
# data = CIFAR10('data/', train=True, download=True, transform=ToTensor())
# train_size = int(0.8 * len(data))
# val_size = len(data) - train_size
# train_dataset, val_dataset = torch.utils.data.random_split(data, [train_size, val_size])
# train_dataset.dataset.transform = Compose(
# [
# RandomHorizontalFlip(),
# RandomResizedCrop(size=32, scale=(0.95, 1.0)),
# # Cutout(length=16, n_holes=1),
# ToTensor()
# ]
# )
#
# test_dataset = torchvision.datasets.CIFAR10('data/', train=False, download=True, transform=ToTensor())
train_val_dataset = ImageFolder(
'data/imagenette-160/train',
transform=Compose([
Resize((128, 128)),
ToTensor()
])
)
test_dataset = ImageFolder(
'data/imagenette-160/val',
transform=Compose([
Resize((128, 128)),
ToTensor()
])
)
train_size = int(0.8 * len(train_val_dataset))
val_size = len(train_val_dataset) - train_size
train_dataset, val_dataset = random_split(train_val_dataset, [train_size, val_size])
train_dataset.dataset.transform = Compose(
[
RandomHorizontalFlip(),
RandomResizedCrop(size=(128, 128), scale=(0.95, 1.0)),
# Cutout(length=16, n_holes=1),
ToTensor()
]
)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=128, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=128, shuffle=False)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=128, shuffle=False)
# print(count_trainable_parameters(model)) # 14437816 3075928
optimizer = SGD(get_trainable_parameters(model), weight_decay=0.0001, lr=0.30, momentum=0.9)
learner = SupervisedImageLearner(
train_data=train_loader,
val_data=val_loader,
model=model,
criterion=SmoothedCrossEntropy().to(get_device()),
optimizer=optimizer,
mixup=True
)
# lr_finder = LRFinder(
# model=model,
# train_data=train_loader,
# criterion=SmoothedCrossEntropy(),
# optimizer=partial(SGD, lr=0.074, weight_decay=0.0001, momentum=0.9),
# device=get_device()
# )
# lr_finder.find_lr(warmup=100, callbacks=[ToDeviceCallback()])
swa = StochasticWeightAveraging(learner, average_after=5025, update_every=670)
callbacks = [
# ManifoldMixupCallback(learner=learner, modules=[layer_1, block_1]),
ToDeviceCallback(),
# MixedPrecisionV2(),
# InputProgressiveResizing(initial_size=80, max_size=160, upscale_every=10, upscale_factor=math.sqrt(2)),
# unfreezer,
Tensorboard(),
# ReduceLROnPlateauCB(optimizer, monitor='accuracy', mode='max', patience=10),
LRSchedulerCB(CosineAnnealingLR(optimizer, eta_min=0.10, T_max=335)),
swa,
LossLogger(),
ModelCheckpoint(learner=learner, filepath="weights/model.pt", monitor='accuracy', mode='max'),
]
metrics = {
"accuracy": Accuracy(),
"loss": Loss()
}
final = learner.learn(
n_epoch=500,
callbacks=callbacks,
metrics=metrics,
final_metric='accuracy'
)
print(final)
load_model(model=model, path="weights/model.pt")
classifier = ImageClassifier(model, tta_transform=Compose([
ToPILImage(),
RandomHorizontalFlip(),
RandomResizedCrop(size=(128, 128), scale=(0.95, 1.0)),
ToTensor()
]))
print(classifier.evaluate(test_loader))
print("Test SWA:")
model = swa.get_averaged_model()
classifier = ImageClassifier(model, tta_transform=Compose([
ToPILImage(),
RandomHorizontalFlip(),
RandomResizedCrop(size=(128, 128), scale=(0.95, 1.0)),
ToTensor()
]))
print(classifier.evaluate(test_loader)) | [
"[email protected]"
] | |
e780ca568e90b5bfc41bbd544fa248b23e021fda | f517ec5a88f7940b13be853050a07c5c3fe6ac2b | /quotetutorial/quotetutorial/spiders/quotes_spider.py | f00853aaf0227be2947b934ac3c28ed3400e5f1d | [] | no_license | Ahmmod097/ScrapyTutorial | e35cc7a717a24ec5313c638e6f91e0614a52307f | 8dd46c802375e38c191c1e055740c200b4cd311d | refs/heads/master | 2020-09-07T14:35:55.806519 | 2019-11-10T15:53:26 | 2019-11-10T15:53:26 | 220,812,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | import scrapy
from ..items import QuotetutorialItem
class QuoteSpider(scrapy.Spider):
name = 'quotes'
start_urls = [
'http://quotes.toscrape.com'
]
def parse(self, response):
items=QuotetutorialItem()
all_div_quotes=response.css("div.quote")
for quote in all_div_quotes:
title = quote.css("span.text::text").extract()
author = quote.css(".author::text").extract()
tags = quote.css(".tag::text").extract()
items['title'] = title
items['author'] = author
items['tags'] = tags
yield items
| [
"[email protected]"
] | |
875bebb4d44ea677fa57ab26b1ee653934bce9e1 | 87712b0c5eea5530a87db87ba66a157083ad5c2a | /searches/misplaced_tiles.py | 346a9167681d479f62208f96efa7b46e0565be42 | [] | no_license | jody-bailey/Slider-Puzzle-V2 | cb4b9d1e9c79cfefbf3235c684467bbecaec65cd | 43da9e3e46a3af403cd602ea56b78a4c196fd953 | refs/heads/master | 2020-04-02T08:58:50.958064 | 2018-11-15T07:30:59 | 2018-11-15T07:30:59 | 154,270,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,859 | py | # Jody Bailey
# Intro to AI
# 10/31/2018
# This class is used to perform the A* Misplaced Tiles search. It was designed
# to be able to function as a stand-alone class as long as it receives the
# required data to start.
from helpers.interface import Interface
from helpers.node import Node
from copy import deepcopy
import heapq
class MisplacedTiles(Interface):
# Constructor
def __init__(self, node):
self.heap = []
heapq.heappush(self.heap, (node.heuristic, 0, node))
heapq.heapify(self.heap)
self.node = node
self.visited = {node.state_string: node.state_string}
self.path = {node: [node.state_string]}
self.counter = 1
self.starting_state = node.state_string
self.solution_found = False
# Method to increment the counter
def count_up(self):
self.counter += 1
# Returns starting state
def get_starting_state(self):
return self.starting_state
# Returns whether a solution was found or not.
def get_solution_found(self):
if self.solution_found:
return 'Yes'
else:
return 'No'
# Returns the path of the current node
def get_path(self):
return self.node.path
# Returns how many nodes of been expored
def get_node_count(self):
return len(self.visited)
# This method is used to test if the numbers are in the right
# place on the board
@staticmethod
def get_goal_position(num):
if num == 1:
return 0, 0
elif num == 2:
return 0, 1
elif num == 3:
return 0, 2
elif num == 4:
return 1, 0
elif num == 5:
return 1, 1
elif num == 6:
return 1, 2
elif num == 7:
return 2, 0
elif num == 8:
return 2, 1
elif num == 0:
return 2, 2
# Method to determine how many tiles are out of place.
@staticmethod
def out_of_place_tiles(array):
total = 0
for i in range(3):
for j in range(3):
position = (i, j)
if position == (0, 0):
if array[i][j] != 1:
total += 1
elif position == (0, 1):
if array[i][j] != 2:
total += 1
elif position == (0, 2):
if array[i][j] != 3:
total += 1
elif position == (1, 0):
if array[i][j] != 4:
total += 1
elif position == (1, 1):
if array[i][j] != 5:
total += 1
elif position == (1, 2):
if array[i][j] != 6:
total += 1
elif position == (2, 0):
if array[i][j] != 7:
total += 1
elif position == (2, 1):
if array[i][j] != 8:
total += 1
elif position == (2, 2):
if array[i][j] != 0:
total += 1
return total
# Method used to check if a state has already been visited
def check_visited(self, state):
return state in self.visited
def get_depth(self, node):
total = 0
while node.parent is not None:
node = node.parent
total += 1
return total
# Method to add the moves found to the queue
def add_moves_to_heap(self, moves, parent):
for move in moves:
if not self.check_visited(move):
array = self.create_array(move)
self.count_up()
# node = self.create_node(array, move, parent=parent)
heuristic = self.out_of_place_tiles(array)
depth = self.get_depth(parent) + 1
heuristic = heuristic + depth
node = Node(array, move, heuristic=heuristic, parent=parent)
try:
this_parent = parent
self.path[node] = deepcopy(this_parent.path)
self.path[node].append(node.state_string)
node.path = self.path[node]
# node.heuristic = self.out_of_place_tiles(node.state_array)
except AttributeError:
'''do nothing'''
# self.heap.put((node.heuristic, node))
heapq.heappush(self.heap, (heuristic, self.counter, node))
self.visited.update({move: move})
# Method to check the current location for children and returns
# those children to the run() method.
def check_moves(self, location):
possible_moves = []
# check left
new_loc = location[0], location[1] - 1
if self.check_bounds(new_loc):
test_node = self.create_array(self.node.state_string)
test_node = self.swap_locations(test_node, location, new_loc)
possible_moves.append(self.get_state_string(test_node))
# check down
new_loc = location[0] + 1, location[1]
if self.check_bounds(new_loc):
test_node = self.create_array(self.node.state_string)
test_node = self.swap_locations(test_node, location, new_loc)
possible_moves.append(self.get_state_string(test_node))
# check right
new_loc = location[0], location[1] + 1
if self.check_bounds(new_loc):
test_node = self.create_array(self.node.state_string)
test_node = self.swap_locations(test_node, location, new_loc)
possible_moves.append(self.get_state_string(test_node))
# check up
new_loc = location[0] - 1, location[1]
if self.check_bounds(new_loc):
test_node = self.create_array(self.node.state_string)
test_node = self.swap_locations(test_node, location, new_loc)
possible_moves.append(self.get_state_string(test_node))
return possible_moves
# Method to get the final path of the goal state.
def print_final_path(self, node):
my_list = self.path[node]
my_array_list = []
print('Final Path of Search:')
for elem in my_list:
my_array_list.append(self.create_array(elem))
# print('\n'.join(str(elem) for elem2 in my_array_list for row in elem2 for elem in row), end=' -> ')
for elem2 in my_array_list:
for row in elem2:
print(' '.join(str(elem) for elem in row))
print()
def get_final_depth(self):
return self.get_depth(self.node)
# Main method of this class. It brings together all of the functionality from
# the other methods and runs the search.
def run(self):
print('Running A* Misplaced Tiles Search...')
while self.heap:
next_node = heapq.heappop(self.heap)
self.node = next_node[2]
if not self.complete(self.node):
# if self.counter % 10000 == 0:
# print('{}'.format(self.counter))
location = self.locate_hole(self.node.state_array)
moves = self.check_moves(location)
self.add_moves_to_heap(moves, self.node)
if not self.heap:
print('empty queue')
print(self.counter)
return
else:
self.solution_found = True
print()
self.print_final_path(self.node)
print()
print('Depth of goal state: {}'.format(len(self.path[self.node])))
print('Total nodes generated: {}'.format(self.counter))
print()
return
| [
"[email protected]"
] | |
1f9df7ad4837b416f7849f88a38ff3ca943298c1 | 2aea0c8107d91519ae9d090c14ff7430d8593a21 | /src/facebook_tools/commands/__init__.py | a9155e7ad498193998050979d86368f5d6862533 | [] | no_license | osmay88/facebook-tools | cd775a2f0f73dd6004d3137e024525f22512788a | 11c061472b4476b9951142c25e1238b9762ce79a | refs/heads/master | 2023-01-02T06:19:26.562796 | 2020-10-22T03:08:04 | 2020-10-22T03:08:04 | 305,755,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | from .token_commands import *
from .messaging_commands import * | [
"[email protected]"
] | |
91a8cc0846d15cb77f1dac39e86a83ba81da4c66 | 9b162310e5db0f714dbd6019894eb5b04192b6aa | /src/windows-gam.spec | 658f6450028749ee0c70daae3de86bc5c86028b0 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0",
"LicenseRef-scancode-rsa-md4",
"HPND-sell-variant",
"LicenseRef-scancode-zeusbench",
"NTP",
"metamail",
"Beerware",
"LicenseRef-scancode-rsa-1990",
"RSA-MD",
"Spencer-94",
"LicenseRef-scancode-other-permissive",
"MIT"
] | permissive | xbl3/GAMADV-XTD | c1d68911f4116157173838856f49151e05cd5658 | a09efb7a10074dc052968ef82c1044f2a0b664b3 | refs/heads/master | 2022-04-08T22:30:39.715172 | 2020-02-22T18:05:00 | 2020-02-22T18:05:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 849 | spec | # -*- mode: python -*-
ssl_json_files = [
('cacerts.pem', '.'),
('cros-aue-dates.json', '.'),
('cloudprint-v2.json', '.'),
('contacts-v3.json', '.'),
('email-audit-v1.json', '.'),
('email-settings-v2.json', '.'),
('sites-v1.json', '.')
]
a = Analysis(['gam.py'],
pathex=['C:\\GAMADV-XTD'],
datas=ssl_json_files,
hiddenimports=[],
hookspath=None,
excludes=['_tkinter'],
runtime_hooks=None)
for d in a.datas:
if 'pyconfig' in d[0]:
a.datas.remove(d)
break
pyz = PYZ(a.pure)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
name='gam.exe',
debug=False,
strip=None,
upx=True,
console=True )
| [
"[email protected]"
] | |
3f79e00b965db50e906a2485c2462430f3587e18 | 7af52c6aa0313a33bb437ea215c1b2dfed737a61 | /DjangoBlogPost/settings.py | e100dd6bf493d8d11eb0d4876c814b2ac5e355de | [] | no_license | eslamfaisal/DjangoBlogPost | c0e6721606d568cf0a3888f6c474430b1cfb71c9 | b7a124a069dd29c1dcc8faa5be2f6c3f430826c0 | refs/heads/main | 2023-05-13T22:23:12.376113 | 2021-06-02T07:14:57 | 2021-06-02T07:14:57 | 370,282,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,390 | py | """
Django settings for DjangoBlogPost project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-g-n&8il)l6ugbnajkf4-1r)!7!h%^(2p!$(8$%%*zaw6a%k^69'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
# django apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# my apps
'personal',
'account',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'DjangoBlogPost.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
AUTH_USER_MODEL = 'account.Account'
WSGI_APPLICATION = 'DjangoBlogPost.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"[email protected]"
] | |
c6e1e8e4c74c351d7a7e2ed69672b2550f5367f3 | 54df8823933ad9e0d4d18b385856678f8b61589c | /mask_detection.py | 04d9205df0b0398c76f5878b8571fffea36c6cee | [] | no_license | SagarBIjja-trilocode/face_mask_detection-master | f679f9001bbc42f17b5c49d521ae80ee59065876 | 9ce926ee352368bf06f2b9b57e80dad3977e0625 | refs/heads/master | 2022-06-08T10:09:16.269289 | 2020-05-07T18:12:48 | 2020-05-07T18:12:48 | 262,119,872 | 1 | 0 | null | 2020-05-07T22:03:24 | 2020-05-07T17:49:13 | Python | UTF-8 | Python | false | false | 12,854 | py | import cv2
import time
import tensorflow as tf
from tensorflow.python.platform import gfile
import numpy as np
import win32com.client
def model_restore_from_pb(pb_path,node_dict):
config = tf.ConfigProto(log_device_placement=True,
allow_soft_placement=True,
)
config.gpu_options.allow_growth = True
#config.gpu_options.per_process_gpu_memory_fraction = 0.6
sess = tf.Session(config=config)
with gfile.FastGFile(pb_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sess.graph.as_default()
tf.import_graph_def(graph_def, name='')
sess.run(tf.global_variables_initializer())
for key,value in node_dict.items():
node = sess.graph.get_tensor_by_name(value)
node_dict[key] = node
return sess,node_dict
def video_init(is_2_write=False,save_path=None):
writer = None
cap = cv2.VideoCapture(0)
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)#default 640x480
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
#total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
# width = 480
# height = 640
# cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
# cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
'''
ref:https://docs.opencv.org/master/dd/d43/tutorial_py_video_display.html
FourCC is a 4-byte code used to specify the video codec.
The list of available codes can be found in fourcc.org.
It is platform dependent. The following codecs work fine for me.
In Fedora: DIVX, XVID, MJPG, X264, WMV1, WMV2. (XVID is more preferable. MJPG results in high size video. X264 gives very small size video)
In Windows: DIVX (More to be tested and added)
In OSX: MJPG (.mp4), DIVX (.avi), X264 (.mkv).
FourCC code is passed as `cv.VideoWriter_fourcc('M','J','P','G')or cv.VideoWriter_fourcc(*'MJPG')` for MJPG.
'''
if is_2_write is True:
#fourcc = cv2.VideoWriter_fourcc('x', 'v', 'i', 'd')
#fourcc = cv2.VideoWriter_fourcc('X', 'V', 'I', 'D')
fourcc = cv2.VideoWriter_fourcc(*'XVID')
if save_path is None:
save_path = 'demo.avi'
writer = cv2.VideoWriter(save_path, fourcc, 20, (int(width), int(height)))
return cap,height,width,writer
def generate_anchors(feature_map_sizes, anchor_sizes, anchor_ratios, offset=0.5):
'''
generate anchors.
:param feature_map_sizes: list of list, for example: [[40,40], [20,20]]
:param anchor_sizes: list of list, for example: [[0.05, 0.075], [0.1, 0.15]]
:param anchor_ratios: list of list, for example: [[1, 0.5], [1, 0.5]]
:param offset: default to 0.5
:return:
'''
anchor_bboxes = []
for idx, feature_size in enumerate(feature_map_sizes):
cx = (np.linspace(0, feature_size[0] - 1, feature_size[0]) + 0.5) / feature_size[0]
cy = (np.linspace(0, feature_size[1] - 1, feature_size[1]) + 0.5) / feature_size[1]
cx_grid, cy_grid = np.meshgrid(cx, cy)
cx_grid_expend = np.expand_dims(cx_grid, axis=-1)
cy_grid_expend = np.expand_dims(cy_grid, axis=-1)
center = np.concatenate((cx_grid_expend, cy_grid_expend), axis=-1)
num_anchors = len(anchor_sizes[idx]) + len(anchor_ratios[idx]) - 1
center_tiled = np.tile(center, (1, 1, 2* num_anchors))
anchor_width_heights = []
# different scales with the first aspect ratio
for scale in anchor_sizes[idx]:
ratio = anchor_ratios[idx][0] # select the first ratio
width = scale * np.sqrt(ratio)
height = scale / np.sqrt(ratio)
anchor_width_heights.extend([-width / 2.0, -height / 2.0, width / 2.0, height / 2.0])
# the first scale, with different aspect ratios (except the first one)
for ratio in anchor_ratios[idx][1:]:
s1 = anchor_sizes[idx][0] # select the first scale
width = s1 * np.sqrt(ratio)
height = s1 / np.sqrt(ratio)
anchor_width_heights.extend([-width / 2.0, -height / 2.0, width / 2.0, height / 2.0])
bbox_coords = center_tiled + np.array(anchor_width_heights)
bbox_coords_reshape = bbox_coords.reshape((-1, 4))
anchor_bboxes.append(bbox_coords_reshape)
anchor_bboxes = np.concatenate(anchor_bboxes, axis=0)
return anchor_bboxes
def decode_bbox(anchors, raw_outputs, variances=[0.1, 0.1, 0.2, 0.2]):
'''
Decode the actual bbox according to the anchors.
the anchor value order is:[xmin,ymin, xmax, ymax]
:param anchors: numpy array with shape [batch, num_anchors, 4]
:param raw_outputs: numpy array with the same shape with anchors
:param variances: list of float, default=[0.1, 0.1, 0.2, 0.2]
:return:
'''
anchor_centers_x = (anchors[:, :, 0:1] + anchors[:, :, 2:3]) / 2
anchor_centers_y = (anchors[:, :, 1:2] + anchors[:, :, 3:]) / 2
anchors_w = anchors[:, :, 2:3] - anchors[:, :, 0:1]
anchors_h = anchors[:, :, 3:] - anchors[:, :, 1:2]
raw_outputs_rescale = raw_outputs * np.array(variances)
predict_center_x = raw_outputs_rescale[:, :, 0:1] * anchors_w + anchor_centers_x
predict_center_y = raw_outputs_rescale[:, :, 1:2] * anchors_h + anchor_centers_y
predict_w = np.exp(raw_outputs_rescale[:, :, 2:3]) * anchors_w
predict_h = np.exp(raw_outputs_rescale[:, :, 3:]) * anchors_h
predict_xmin = predict_center_x - predict_w / 2
predict_ymin = predict_center_y - predict_h / 2
predict_xmax = predict_center_x + predict_w / 2
predict_ymax = predict_center_y + predict_h / 2
predict_bbox = np.concatenate([predict_xmin, predict_ymin, predict_xmax, predict_ymax], axis=-1)
return predict_bbox
def single_class_non_max_suppression(bboxes, confidences, conf_thresh=0.2, iou_thresh=0.5, keep_top_k=-1):
'''
do nms on single class.
Hint: for the specific class, given the bbox and its confidence,
1) sort the bbox according to the confidence from top to down, we call this a set
2) select the bbox with the highest confidence, remove it from set, and do IOU calculate with the rest bbox
3) remove the bbox whose IOU is higher than the iou_thresh from the set,
4) loop step 2 and 3, util the set is empty.
:param bboxes: numpy array of 2D, [num_bboxes, 4]
:param confidences: numpy array of 1D. [num_bboxes]
:param conf_thresh:
:param iou_thresh:
:param keep_top_k:
:return:
'''
if len(bboxes) == 0: return []
conf_keep_idx = np.where(confidences > conf_thresh)[0]
bboxes = bboxes[conf_keep_idx]
confidences = confidences[conf_keep_idx]
pick = []
xmin = bboxes[:, 0]
ymin = bboxes[:, 1]
xmax = bboxes[:, 2]
ymax = bboxes[:, 3]
area = (xmax - xmin + 1e-3) * (ymax - ymin + 1e-3)
idxs = np.argsort(confidences)
while len(idxs) > 0:
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# keep top k
if keep_top_k != -1:
if len(pick) >= keep_top_k:
break
overlap_xmin = np.maximum(xmin[i], xmin[idxs[:last]])
overlap_ymin = np.maximum(ymin[i], ymin[idxs[:last]])
overlap_xmax = np.minimum(xmax[i], xmax[idxs[:last]])
overlap_ymax = np.minimum(ymax[i], ymax[idxs[:last]])
overlap_w = np.maximum(0, overlap_xmax - overlap_xmin)
overlap_h = np.maximum(0, overlap_ymax - overlap_ymin)
overlap_area = overlap_w * overlap_h
overlap_ratio = overlap_area / (area[idxs[:last]] + area[i] - overlap_area)
need_to_be_deleted_idx = np.concatenate(([last], np.where(overlap_ratio > iou_thresh)[0]))
idxs = np.delete(idxs, need_to_be_deleted_idx)
# if the number of final bboxes is less than keep_top_k, we need to pad it.
# TODO
return conf_keep_idx[pick]
def mask_detection(is_2_write=False,save_path=None):
#----var
pb_path = "face_mask_detection.pb"
node_dict = {'input':'data_1:0',
'detection_bboxes':'loc_branch_concat_1/concat:0',
'detection_scores':'cls_branch_concat_1/concat:0'}
conf_thresh = 0.5
iou_thresh = 0.4
frame_count = 0
FPS = "0"
#====anchors config
feature_map_sizes = [[33, 33], [17, 17], [9, 9], [5, 5], [3, 3]]
anchor_sizes = [[0.04, 0.056], [0.08, 0.11], [0.16, 0.22], [0.32, 0.45], [0.64, 0.72]]
anchor_ratios = [[1, 0.62, 0.42]] * 5
id2class = {0: 'Mask', 1: 'NoMask'}
#----video streaming init
cap, height, width, writer = video_init(is_2_write=is_2_write,save_path=save_path)
#----model init
#====generate anchors
anchors = generate_anchors(feature_map_sizes, anchor_sizes, anchor_ratios)
# for inference , the batch size is 1, the model output shape is [1, N, 4],
# so we expand dim for anchors to [1, anchor_num, 4]
anchors_exp = np.expand_dims(anchors, axis=0)
#====model restore from pb file
sess,node_dict = model_restore_from_pb(pb_path, node_dict)
tf_input = node_dict['input']
model_shape = tf_input.shape#[N,H,W,C]
print("model_shape = ", model_shape)
detection_bboxes = node_dict['detection_bboxes']
detection_scores = node_dict['detection_scores']
sampleNum=0
while (cap.isOpened()):
#----get image
ret, img = cap.read()
if ret:
#----image processing
img_resized = cv2.resize(img, (model_shape[2], model_shape[1]))
img_resized = cv2.cvtColor(img_resized, cv2.COLOR_BGR2RGB)
img_resized = img_resized.astype('float32')
img_resized /= 255
#----mask detection
y_bboxes_output, y_cls_output = sess.run([detection_bboxes, detection_scores],
feed_dict={tf_input: np.expand_dims(img_resized, axis=0)})
#remove the batch dimension, for batch is always 1 for inference.
y_bboxes = decode_bbox(anchors_exp, y_bboxes_output)[0]
y_cls = y_cls_output[0]
# To speed up, do single class NMS, not multiple classes NMS.
bbox_max_scores = np.max(y_cls, axis=1)
bbox_max_score_classes = np.argmax(y_cls, axis=1)
# keep_idx is the alive bounding box after nms.
keep_idxs = single_class_non_max_suppression(y_bboxes,
bbox_max_scores,
conf_thresh=conf_thresh,
iou_thresh=iou_thresh,
)
#====draw bounding box
for idx in keep_idxs:
conf = float(bbox_max_scores[idx])
class_id = bbox_max_score_classes[idx]
bbox = y_bboxes[idx]
# clip the coordinate, avoid the value exceed the image boundary.
xmin = max(0, int(bbox[0] * width))
ymin = max(0, int(bbox[1] * height))
xmax = min(int(bbox[2] * width), width)
ymax = min(int(bbox[3] * height), height)
if class_id == 0:
color = (0, 255, 0) # (B,G,R)
else:
color = (0, 0, 255) # (B,G,R)
cv2.rectangle(img, (int(xmin), int(ymin)), (int(xmax), int(ymax)), color, 2)
cv2.putText(img, "%s: %.2f" % (id2class[class_id], conf), (xmin + 2, ymin - 2),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, color)
# print("%s" % (id2class[class_id]))
sampleNum=sampleNum+1
if(("%s" % (id2class[class_id]))=='NoMask'):
cv2.imwrite("TrainingImage\ "+str(sampleNum) + ".jpg", img)
# speaker = win32com.client.Dispatch("SAPI.SpVoice")
# speaker.Speak("No mask!")
#----FPS count
if frame_count == 0:
t_start = time.time()
frame_count += 1
if frame_count >= 10:
FPS = "FPS=%1f" % (10 / (time.time() - t_start))
frame_count = 0
cv2.putText(img, "Trilocode Technology", (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 3)
#----image display
cv2.imshow("Trilocode Technology", img)
#----image writing
if writer is not None:
writer.write(img)
#----'q' key pressed?
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
print("get image failed")
break
#----release
cap.release()
if writer is not None:
writer.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
save_path = r"TrainingImage\demo.avi"
mask_detection(is_2_write=False, save_path=save_path) | [
"[email protected]"
] | |
606c5539a569b32e5fe5a87e273691730ee9f23e | 77ab710ae6a574d7a77b28c33a53a7fe5d05b8e2 | /Module5/practice/06_task_sort.py | 5bd3ce3161e4a41151f63c0e2829468f14617c71 | [] | no_license | BlackCranium/SpecialistPython2 | 37e0c08ffa7d9558ca0b62f37361c8fd6f857aca | 2b4a33fd910246d137e6a74dd2a418352e0db30e | refs/heads/master | 2023-06-13T22:43:57.489325 | 2021-02-13T02:06:05 | 2021-02-13T02:06:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | # Призеры олимпиады
# По результатам олимпиады участники награждаются дипломами.
# Набравшие одинаковые баллы получают дипломы одинаковой степени.
# Призером олимпиады считается участник, получивший диплом не хуже III степени.
# По результатам олимпиады определите количество призеров.
# Вход: натуральное число призеров(N < 100) и далее N натуральных# чисел – результаты участников.
# Выход: одно число – число призеров.
# Пример:
# Вход
#
# 10 1 3 4 3 5 6 7 7 6 1
# Выход
# 5
| [
"[email protected]"
] | |
9507f7fc6f1009cde77a4ff32c0f6a4a4ac1ce98 | 1b9d4ee23cf14f4a82409f8eefa7d37f8b7c1e92 | /venv/bin/easy_install-3.6 | 7505534fa2bf3bab1a4d0e3a5f6e681f54493fbb | [] | no_license | narupi/CyberSecurityProgramming | a0a3a439c2760b255ca89d4e1a775054322e34a0 | b464445e0a45ae63acc96b09ed548235bb93d040 | refs/heads/master | 2020-04-27T03:29:13.711563 | 2019-03-05T21:51:18 | 2019-03-05T21:51:18 | 174,025,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | 6 | #!/home/narupi/PycharmProjects/CyberSecurityProgramming/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.6')()
)
| [
"[email protected]"
] | |
8dffd484828558ccc0f9cc76142d46da927d7d7a | f390de67e6dd2ca8c6e369460084cb4d9c8c4a0c | /coursebuilder/common/safe_dom.py | 8b20c79a16c6a622524c55376c0f9e26cdb3f246 | [
"CC-BY-2.5",
"CC-BY-3.0",
"Apache-2.0"
] | permissive | andredias/course-builder | d2170076ef982444d55883b06b87f1a0da46a33b | ac4aa3131228a260c0a53b5d050dc78108126b88 | refs/heads/master | 2020-04-06T03:51:42.666679 | 2015-02-24T19:33:25 | 2015-02-24T19:33:25 | 33,021,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,342 | py | """Classes to build sanitized HTML."""
__author__ = 'John Orr ([email protected])'
import cgi
import re
def escape(strg):
return cgi.escape(strg, quote=1).replace("'", ''').replace('`', '`')
class Node(object):
"""Base class for the sanitizing module."""
def __init__(self):
self._parent = None
def _set_parent(self, parent):
self._parent = parent
@property
def parent(self):
return self._parent
@property
def sanitized(self):
raise NotImplementedError()
def __str__(self):
return self.sanitized
# pylint: disable=incomplete-protocol
class NodeList(object):
"""Holds a list of Nodes and can bulk sanitize them."""
def __init__(self):
self.list = []
self._parent = None
def __len__(self):
return len(self.list)
def _set_parent(self, parent):
assert self != parent
self._parent = parent
@property
def parent(self):
return self._parent
def append(self, node):
assert node is not None, 'Cannot add an empty value to the node list'
self.list.append(node)
node._set_parent(self) # pylint: disable=protected-access
return self
@property
def children(self):
return [] + self.list
def empty(self):
self.list = []
return self
def delete(self, node):
_list = []
for child in self.list:
if child != node:
_list.append(child)
self.list = _list
def insert(self, index, node):
assert node is not None, 'Cannot add an empty value to the node list'
self.list.insert(index, node)
node._set_parent(self) # pylint: disable=protected-access
return self
@property
def sanitized(self):
sanitized_list = []
for node in self.list:
sanitized_list.append(node.sanitized)
return ''.join(sanitized_list)
def __str__(self):
return self.sanitized
class Text(Node):
"""Holds untrusted text which will be sanitized when accessed."""
def __init__(self, unsafe_string):
super(Text, self).__init__()
self._value = unicode(unsafe_string)
@property
def sanitized(self):
return escape(self._value)
class Comment(Node):
"""An HTML comment."""
def __init__(self, unsafe_string=''):
super(Comment, self).__init__()
self._value = unicode(unsafe_string)
def get_value(self):
return self._value
@property
def sanitized(self):
return '<!--%s-->' % escape(self._value)
def add_attribute(self, **attr):
pass
def add_text(self, unsafe_string):
self._value += unicode(unsafe_string)
class Element(Node):
"""Embodies an HTML element which will be sanitized when accessed."""
_ALLOWED_NAME_PATTERN = re.compile(r'^[a-zA-Z][_\-a-zA-Z0-9]*$')
_VOID_ELEMENTS = frozenset([
'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen',
'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr'])
def __init__(self, tag_name, **attr):
"""Initializes an element with given tag name and attributes.
Tag name will be restricted to alpha chars, attribute names
will be quote-escaped.
Args:
tag_name: the name of the element, which must match
_ALLOWED_NAME_PATTERN.
**attr: the names and value of the attributes. Names must match
_ALLOWED_NAME_PATTERN and values will be quote-escaped.
"""
assert Element._ALLOWED_NAME_PATTERN.match(tag_name), (
'tag name %s is not allowed' % tag_name)
for attr_name in attr:
assert Element._ALLOWED_NAME_PATTERN.match(attr_name), (
'attribute name %s is not allowed' % attr_name)
super(Element, self).__init__()
self._tag_name = tag_name
self._children = []
self._attr = {}
for _name, _value in attr.items():
self._attr[_name.lower()] = _value
def has_attribute(self, name):
return name.lower() in self._attr
@property
def attributes(self):
return self._attr.keys()
def set_attribute(self, name, value):
self._attr[name.lower()] = value
return self
def get_escaped_attribute(self, name):
return escape(self._attr[name.lower()])
def add_attribute(self, **attr):
for attr_name, value in attr.items():
assert Element._ALLOWED_NAME_PATTERN.match(attr_name), (
'attribute name %s is not allowed' % attr_name)
self._attr[attr_name.lower()] = value
return self
def add_child(self, node):
node._set_parent(self) # pylint: disable=protected-access
self._children.append(node)
return self
def append(self, node):
return self.add_child(node)
def add_children(self, node_list):
for child in node_list.list:
self.add_child(child)
return self
def empty(self):
self._children = []
return self
def add_text(self, text):
return self.add_child(Text(text))
def can_have_children(self):
return True
@property
def children(self):
return [] + self._children
@property
def tag_name(self):
return self._tag_name
@property
def sanitized(self):
"""Santize the element and its descendants."""
assert Element._ALLOWED_NAME_PATTERN.match(self._tag_name), (
'tag name %s is not allowed' % self._tag_name)
buff = '<' + self._tag_name
for attr_name, value in sorted(self._attr.items()):
if attr_name == 'classname':
attr_name = 'class'
elif attr_name.startswith('data_'):
attr_name = attr_name.replace('_', '-')
if value is None:
value = ''
buff += ' %s="%s"' % (
attr_name, escape(value))
if self._children:
buff += '>'
for child in self._children:
buff += child.sanitized
buff += '</%s>' % self._tag_name
elif self._tag_name.lower() in Element._VOID_ELEMENTS:
buff += '/>'
else:
buff += '></%s>' % self._tag_name
return buff
class A(Element):
"""Embodies an 'a' tag. Just a conveniece wrapper on Element."""
def __init__(self, href, **attr):
"""Initialize an 'a' tag to a given target.
Args:
href: The value to put in the 'href' tag of the 'a' element.
**attr: the names and value of the attributes. Names must match
_ALLOWED_NAME_PATTERN and values will be quote-escaped.
"""
super(A, self).__init__('a', **attr)
self.add_attribute(href=href)
class ScriptElement(Element):
"""Represents an HTML <script> element."""
def __init__(self, **attr):
super(ScriptElement, self).__init__('script', **attr)
def can_have_children(self):
return False
def add_child(self, unused_node):
raise ValueError()
def add_children(self, unused_nodes):
raise ValueError()
def empty(self):
raise ValueError()
def add_text(self, text):
"""Add the script body."""
class Script(Text):
def __init__(self, script):
# Pylint is just plain wrong about warning here; suppressing.
# pylint: disable=bad-super-call
super(Script, self).__init__(None)
self._script = script
@property
def sanitized(self):
if '</script>' in self._script:
raise ValueError('End script tag forbidden')
return self._script
self._children.append(Script(text))
class Entity(Node):
"""Holds an XML entity."""
ENTITY_PATTERN = re.compile('^&([a-zA-Z]+|#[0-9]+|#x[0-9a-fA-F]+);$')
def __init__(self, entity):
assert Entity.ENTITY_PATTERN.match(entity)
super(Entity, self).__init__()
self._entity = entity
@property
def sanitized(self):
assert Entity.ENTITY_PATTERN.match(self._entity)
return self._entity
| [
"[email protected]"
] | |
129d0bd2ad47162e4995b7434dd11315aeb8900a | 49f02d816373df9d5d3324f7e7857367995e244a | /main.py | 2e2c25ec0755a9c7c770da254fbbc108fc2f666b | [] | no_license | harindr404/python | 1a1337c18627d3f6de52e4eef59cc9b77f44b0c3 | 5834b187dfc5424e23aa8a5964cee13a66019fae | refs/heads/master | 2021-01-22T10:46:21.428866 | 2017-02-16T08:45:19 | 2017-02-16T08:45:19 | 82,035,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 908 | py | import urllib
from PIL import image
import pytesseract
from resizeimage import resizeimage
import os
import cv2
import numpy as np
src_path="C:\Users\harindra sai tej\PycharmProjects\untitled"
img_path="C:\Users\harindra sai tej\PycharmProjects\untitled\A-wise-man-can-learn"
def get_string(img_path):
img=cv2.imread(img_path)
img=cv2.cvtColor(img, cv2,COLOR_BGR2GRAY)
kernel=np.ones((1,1), np.uint8)
img= cv2.dilate(img, kernel, iterations=1)
img= cv2.erode(img, kernel, iterations=1)
cv2.imwrite(src_path + "A-wise-man-can-learn.png", img)
img= cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.AWISEMANCANLEARN.png_BINARY, 11, 2)
cv2.imwrite(src_path+ "A-wise-man-can-learn.png.png", img)
result=pytesseract.image to string(image.open(src path +"A-wise-man-can-learn.png"))
return result
print 'Start recognition'
print get string(src path + "img.png")
| [
"[email protected]"
] | |
d36baa1063d379c7832ecf0345d162789b73d35d | aca12201f04df6e5f8dc86e47249ddb063bfe56d | /MusicTransformer/model.py | 458c1f2bc3b93e3be3b8f067091da8efedea2c49 | [] | no_license | modulabs/RubatoLab | e2605388b517a792eb4f9b0a00831cc2e35e6dd8 | d7860508e3dfe34cb2ea823b7e8ccac9b3f26b43 | refs/heads/master | 2020-06-23T22:44:59.451592 | 2020-01-07T08:32:03 | 2020-01-07T08:32:03 | 198,775,552 | 10 | 3 | null | 2020-01-06T09:09:21 | 2019-07-25T06:57:17 | Jupyter Notebook | UTF-8 | Python | false | false | 9,647 | py | import tensorflow as tf
import numpy as np
# ref : https://github.com/scpark20/music-transformer/blob/master/music-transformer.ipynb
class RelativeGlobalAttention(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads):
super(RelativeGlobalAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
self.headDim = d_model // num_heads
self.contextDim = int(self.headDim * self.num_heads)
assert d_model % self.num_heads == 0
self.wq = tf.keras.layers.Dense(self.headDim)
self.wk = tf.keras.layers.Dense(self.headDim)
self.wv = tf.keras.layers.Dense(self.headDim)
def call(self, v, k, q):
# [Heads, Batch, Time, HeadDim]
q = tf.stack([self.wq(q) for _ in range(self.num_heads)])
k = tf.stack([self.wk(k) for _ in range(self.num_heads)])
v = tf.stack([self.wv(v) for _ in range(self.num_heads)])
print("inputs")
print("[Heads, Batch, Time, HeadDim]", q.shape)
self.batch_size = q.shape[1]
self.max_len = q.shape[2]
#skewing
# Heads, Time, HeadDim
E = self.add_weight('E', shape=[self.num_heads, self.max_len, self.headDim])
# [Heads, Batch * Time, HeadDim]
Q_ = tf.reshape(q, [self.num_heads, self.batch_size * self.max_len, self.headDim])
# [Heads, Batch * Time, Time]
S = tf.matmul(Q_, E, transpose_b=True)
# [Heads, Batch, Time, Time]
S = tf.reshape(S, [self.num_heads, self.batch_size, self.max_len, self.max_len])
# [Heads, Batch, Time, Time+1]
S = tf.pad(S, ((0, 0), (0, 0), (0, 0), (1, 0)))
# [Heads, Batch, Time+1, Time]
S = tf.reshape(S, [self.num_heads, self.batch_size, self.max_len + 1, self.max_len])
# [Heads, Batch, Time, Time]
S = S[:, :, 1:]
# [Heads, Batch, Time, Time]
attention = (tf.matmul(q, k, transpose_b=True) + S) / np.sqrt(self.depth)
# mask tf 2.0 == tf.linalg.band_part
mask = tf.linalg.band_part(tf.ones([self.max_len, self.max_len]), -1, 0)
attention = attention * mask - tf.cast(1e10, attention.dtype) * (1-mask)
score = tf.nn.softmax(attention, axis=3)
print("Score : ", score.shape)
# [Heads, Batch, Time, HeadDim]
context = tf.matmul(score, v)
print("[Heads, Batch, Time, HeadDim] : ", context.shape)
# [Batch, Time, Heads, HeadDim]
context = tf.transpose(context, [1, 2, 0, 3])
print("[Batch, Time, Heads, HeadDim] : ", context.shape)
# [Batch, Time, ContextDim]
context = tf.reshape(context, [self.batch_size, self.max_len, self.num_heads * self.headDim])
print("[Batch, Time, ContextDim] : ", context.shape)
# [Batch, Time, ContextDim]
context = tf.keras.layers.Dense(EmbeddingDim, activation='relu')(context)
print("[Batch, Time, ContextDim] : ", context.shape)
# [Batch, Time, EventDim]
logits = tf.keras.layers.Dense(EventDim)(context)
return logits
class EncoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(EncoderLayer, self).__init__()
self.rga = RelativeGlobalAttention(d_model, num_heads)
self.ffn = point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask):
attn_output, _ = self.rga(x, x, x) # (batch_size, input_seq_len, d_model)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(x + attn_output) # (batch_size, input_seq_len, d_model)
ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = self.layernorm2(out1 + ffn_output) # (batch_size, input_seq_len, d_model)
return out2
class DecoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(DecoderLayer, self).__init__()
self.rga1 = RelativeGlobalAttention(d_model, num_heads)
self.rga2 = RelativeGlobalAttention(d_model, num_heads)
self.ffn = point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
self.dropout3 = tf.keras.layers.Dropout(rate)
def call(self, x, enc_output, training, look_ahead_mask, padding_mask):
# enc_output.shape == (batch_size, input_seq_len, d_model)
attn1, attn_weights_block1 = self.rga1(x, x, x) # (batch_size, target_seq_len, d_model)
attn1 = self.dropout1(attn1, training=training)
out1 = self.layernorm1(attn1 + x)
attn2, attn_weights_block2 = self.rga2(
enc_output, enc_output, out1) # (batch_size, target_seq_len, d_model)
attn2 = self.dropout2(attn2, training=training)
out2 = self.layernorm2(attn2 + out1) # (batch_size, target_seq_len, d_model)
ffn_output = self.ffn(out2) # (batch_size, target_seq_len, d_model)
ffn_output = self.dropout3(ffn_output, training=training)
out3 = self.layernorm3(ffn_output + out2) # (batch_size, target_seq_len, d_model)
return out3, attn_weights_block1, attn_weights_block2
class Encoder(tf.keras.layers.Layer):
def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size,
maximum_position_encoding, rate=0.1):
super(Encoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Embedding(input_vocab_size, d_model)
self.pos_encoding = positional_encoding(maximum_position_encoding,
self.d_model)
self.enc_layers = [EncoderLayer(d_model, num_heads, dff, rate)
for _ in range(num_layers)]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask):
seq_len = tf.shape(x)[1]
# adding embedding and position encoding.
x = self.embedding(x) # (batch_size, input_seq_len, d_model)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x = self.enc_layers[i](x, training, mask)
return x # (batch_size, input_seq_len, d_model)
class Decoder(tf.keras.layers.Layer):
def __init__(self, num_layers, d_model, num_heads, dff, target_vocab_size,
maximum_position_encoding, rate=0.1):
super(Decoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Embedding(target_vocab_size, d_model)
self.pos_encoding = positional_encoding(maximum_position_encoding, d_model)
self.dec_layers = [DecoderLayer(d_model, num_heads, dff, rate)
for _ in range(num_layers)]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, enc_output, training,
look_ahead_mask, padding_mask):
seq_len = tf.shape(x)[1]
attention_weights = {}
x = self.embedding(x) # (batch_size, target_seq_len, d_model)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x, block1, block2 = self.dec_layers[i](x, enc_output, training,
look_ahead_mask, padding_mask)
attention_weights['decoder_layer{}_block1'.format(i+1)] = block1
attention_weights['decoder_layer{}_block2'.format(i+1)] = block2
# x.shape == (batch_size, target_seq_len, d_model)
return x, attention_weights
class Transformer(tf.keras.Model):
def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size,
target_vocab_size, pe_input, pe_target, rate=0.1):
super(Transformer, self).__init__()
self.encoder = Encoder(num_layers, d_model, num_heads, dff,
input_vocab_size, pe_input, rate)
self.decoder = Decoder(num_layers, d_model, num_heads, dff,
target_vocab_size, pe_target, rate)
self.final_layer = tf.keras.layers.Dense(target_vocab_size)
def call(self, inp, tar, training, enc_padding_mask,
look_ahead_mask, dec_padding_mask):
enc_output = self.encoder(inp, training, enc_padding_mask) # (batch_size, inp_seq_len, d_model)
# dec_output.shape == (batch_size, tar_seq_len, d_model)
dec_output, attention_weights = self.decoder(
tar, enc_output, training, look_ahead_mask, dec_padding_mask)
final_output = self.final_layer(dec_output) # (batch_size, tar_seq_len, target_vocab_size)
return final_output, attention_weights
| [
"[email protected]"
] | |
80ceed4ac066ee786c77744bdc31d0acfd1fd6e0 | ca28f1535bb9a4b6504d5f6a5c5abf1a4569037f | /pos_umbrella/pos_umbrella/report/eod_report/eod_report.py | fc9a6b62af7844ad6a75cb96ea3541872cb50a0a | [
"MIT"
] | permissive | worldkingpradeep/pos_umbrella | 8b8f83cb7d638f15a1808e779656e250549c5e26 | 6fa7a51a9c019b533befcf85955fdd5e165c6a5c | refs/heads/master | 2023-04-20T06:30:37.054666 | 2021-05-14T17:19:59 | 2021-05-14T17:19:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,285 | py | # Copyright (c) 2013, jan and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
def execute(filters=None):
columns, data = [], []
from_date = filters.get("from_date")
to_date = filters.get("to_date")
pos_profile = filters.get("pos_profile")
print(filters.get("with_details"))
with_details = filters.get("with_details")
if from_date > to_date:
frappe.throw("From Date should be before To Date")
else:
columns.append({"fieldname": "store_name", "label": "Store Name", "fieldtype": "Data", "width": 150})
if with_details:
columns.append({"fieldname": "invoice_number", "label": "Invoice Number", "fieldtype": "Link", "options": "Sales Invoice", "width": 150})
columns.append({"fieldname": "item_code", "label": "Item_code", "fieldtype": "Data", "width": 120})
columns.append({"fieldname": "item_name", "label": "Item Name", "fieldtype": "Data", "width": 230})
columns.append({"fieldname": "quantity", "label": "Quantity", "fieldtype": "Data", "width": 100})
columns.append({"fieldname": "rate", "label": "Rate", "fieldtype": "Data", "width": 100})
columns.append({"fieldname": "amount", "label": "Amount", "fieldtype": "Data", "width": 100})
columns.append({"fieldname": "discount", "label": "Discount", "fieldtype": "Data", "width": 100})
columns.append({"fieldname": "write_off", "label": "Write Off", "fieldtype": "Data", "width": 100})
columns.append({"fieldname": "loyalty", "label": "Loyalty", "fieldtype": "Data", "width": 100})
columns.append({"fieldname": "net_sale", "label": "Net Sale", "fieldtype": "Data", "width": 100})
columns.append({"fieldname": "vat", "label": "VAT", "fieldtype": "Data", "width": 100})
columns.append({"fieldname": "gross_sale", "label": "Gross Sale", "fieldtype": "Data", "width": 100})
condition = ""
if pos_profile:
condition += " and pos_profile='{0}' ".format(pos_profile)
if with_details:
condition += " and is_pos=1"
condition += " ORDER By pos_profile ASC"
query = """ SELECT * FROM `tabSales Invoice`
WHERE docstatus=1 and posting_date BETWEEN '{0}' and '{1}' {2}""".format(from_date, to_date,condition)
print(query)
sales_invoices = frappe.db.sql(query, as_dict=True)
for idx,i in enumerate(sales_invoices):
if not with_details:
obj = {
"invoice_number": i.name,
"store_name": i.pos_profile,
"discount": i.discount_amount,
"write_off": i.write_off_amount,
"loyalty": i.loyalty_amount,
"net_sale": i.total,
"gross_sale": i.grand_total,
"vat": i.total_taxes_and_charges,
}
mode_of_payments = frappe.db.sql(""" SELECT * FROM `tabSales Invoice Payment` WHERE parent=%s """,i.name,as_dict=True)
for ii in mode_of_payments:
check_mop(columns,ii)
obj[ii.mode_of_payment] = ii.amount
data.append(obj)
else:
obj = {}
obj["invoice_number"] = i.name
obj["store_name"] = i.pos_profile
invoice_items = frappe.db.sql(""" SELECT * FROM `tabSales Invoice Item` WHERE parent=%s""", i.name, as_dict=1)
for idxx,x in enumerate(invoice_items):
if idxx == 0:
obj["item_code"] = x.item_code
obj["item_name"] = x.item_name
obj["quantity"] = x.qty
obj["rate"] = x.rate
obj["amount"] = x.amount
obj["discount"] = i.discount_amount
obj["write_off"] = i.write_off_amount
obj["loyalty"] = i.loyalty_amount
obj["net_sale"] = i.total
obj["gross_sale"] = i.grand_total
obj["vat"] = i.total_taxes_and_charges
mode_of_payments = frappe.db.sql(""" SELECT * FROM `tabSales Invoice Payment` WHERE parent=%s """,
i.name, as_dict=True)
for ii in mode_of_payments:
check_mop(columns, ii)
obj[ii.mode_of_payment] = ii.amount
else:
obj = {}
obj["item_code"] = x.item_code
obj["item_name"] = x.item_name
obj["quantity"] = x.qty
obj["rate"] = x.rate
obj["amount"] = x.amount
data.append(obj)
return columns, data
def check_mop(columns, ii):
add = True
for i in columns:
if i.get("label") == ii.mode_of_payment:
add = False
if add:
columns.append({
"fieldname": ii.mode_of_payment,
"label": ii.mode_of_payment,
"fieldtype": "Data",
"width": 150
}) | [
"[email protected]"
] | |
3544de98ec60610ff530b7cad0e147d033b35846 | 8d03bc359f6c7e2addc36d423cf979c54da5617e | /mysite/urls.py | b06eed44f976296399080361e8a464ddcfc74a84 | [] | no_license | JiehuiSun/chat | f878cd5fc2cdc7b6d0ae6b76f97bb9c173e4b2a2 | 518d0323bedc299b15bde329eb61d93c14be668c | refs/heads/master | 2021-06-28T16:02:40.193928 | 2019-06-04T08:58:17 | 2019-06-04T08:58:17 | 190,165,897 | 3 | 0 | null | 2021-06-10T21:31:53 | 2019-06-04T08:59:39 | Python | UTF-8 | Python | false | false | 971 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
# from django.conf.urls import url
# from django.contrib import admin
# urlpatterns = [
# url(r'^admin/', admin.site.urls),
# ]
from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url('chat/', include('chat.urls')),
url('admin/', admin.site.urls),
]
| [
"[email protected]"
] | |
d28dc3b15e2c4a8f860d6f069ffd150fe3d54a26 | bc69d5c5eb705c066c87e952501e34bae6741095 | /pe25/pe25.py | 8a096135e3e91200861da180f6f13edd5553d67a | [] | no_license | dpflann/Project_Euler | 2c32cbb084d87cf7d1e9f321009027a582c6c4db | 4b829b7e48d5ff2f755addb159fc2a67265191a6 | refs/heads/master | 2020-12-23T02:47:32.145571 | 2017-11-07T12:09:56 | 2017-11-07T12:09:56 | 6,812,820 | 2 | 0 | null | 2017-11-07T12:09:57 | 2012-11-22T12:32:29 | Python | UTF-8 | Python | false | false | 1,286 | py | # The 12th term, F12, is the first term to contain three digits.
#
# What is the first term in the Fibonacci sequence to contain 1000 digits?
import math as m
def digits(n):
log10n = m.log10(n)
ceilLog10n = m.ceil(log10n)
if log10n == ceilLog10n:
return log10n + 1
return ceilLog10n
def fibonacci(n):
phi = ( 1 + m.sqrt(5) ) / 2.0
return m.floor( ( phi / m.sqrt(5) ) + .5 )
# Fib(n) = [ phi^n / sqrt(5) ] where [ ] = integer closest to
# http://en.wikipedia.org/wiki/Fibonacci_number#Computation_by_rounding
def firstFibWithNdigits(n):
phi = ( 1 + m.sqrt(5) ) / 2.0
n = 1
log10phi = m.log10(phi)
log10RootFive = m.log10(m.sqrt(5))
logFib = n * log10phi - log10RootFive
while ( logFib <= 999 ):
n = n + 1
logFib = n * log10phi - log10RootFive
return n
# The first function is inefficient
def fFwNd(n):
phi = ( 1 + m.sqrt(5) ) / 2.0
log10phi = m.log10(phi)
log10RootFive = m.log10(m.sqrt(5))
limit = n - 1
nth = (limit + log10RootFive) / log10phi
return m.ceil(nth)
def solve():
print "The first Fibonacci number to have 1000 digits is %d." % firstFibWithNdigits(1000)
print "The first Fibonacci number to have 1000 digits is %d." % fFwNd(1000)
if __name__ == '__main__':
solve()
| [
"[email protected]"
] | |
bb8a2329570788c89b7c148065caa934ba72c375 | dab9b0cff33585c2a1575a715dd6e45c6f02020e | /exercises/17_serialization/task_17_4.py | 2a3ad560580ae573951a8ba39c61026eaed8f589 | [] | no_license | notoriginalnik/pyneng-exe | 33f2beb7e5fa0667f4e5db7e23a7e8a66a17f3df | 3e790d63edfa788a86e3a3fbb6c53ff160a6f12a | refs/heads/master | 2020-12-14T11:00:48.596852 | 2020-01-19T10:11:05 | 2020-01-19T10:11:05 | 234,720,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,565 | py | # -*- coding: utf-8 -*-
'''
Задание 17.4
Создать функцию write_last_log_to_csv.
Аргументы функции:
* source_log - имя файла в формате csv, из которого читаются данные (пример mail_log.csv)
* output - имя файла в формате csv, в который будет записан результат
Функция ничего не возвращает.
Функция write_last_log_to_csv обрабатывает csv файл mail_log.csv.
В файле mail_log.csv находятся логи изменения имени пользователя. При этом, email
пользователь менять не может, только имя.
Функция write_last_log_to_csv должна отбирать из файла mail_log.csv только
самые свежие записи для каждого пользователя и записывать их в другой csv файл.
Для части пользователей запись только одна и тогда в итоговый файл надо записать только ее.
Для некоторых пользователей есть несколько записей с разными именами.
Например пользователь с email [email protected] несколько раз менял имя:
C=3PO,[email protected],16/12/2019 17:10
C3PO,[email protected],16/12/2019 17:15
C-3PO,[email protected],16/12/2019 17:24
Из этих трех записей, в итоговый файл должна быть записана только одна - самая свежая:
C-3PO,[email protected],16/12/2019 17:24
Для сравнения дат удобно использовать объекты datetime из модуля datetime.
Чтобы упростить работу с датами, создана функция convert_datetimestr_to_datetime - она
конвертирует строку с датой в формате 11/10/2019 14:05 в объект datetime.
Полученные объекты datetime можно сравнивать между собой.
Функцию convert_datetimestr_to_datetime использовать не обязательно.
'''
import datetime
def convert_datetimestr_to_datetime(datetime_str):
"""
Конвертирует строку с датой в формате 11/10/2019 14:05 в объект datetime.
"""
return datetime.datetime.strptime(datetime_str, '%d/%m/%Y %H:%M')
| [
"[email protected]"
] | |
1f9bc6df32c49b4d1b0b4f285e91f730facd4014 | 5a3d2450d1af3331a7b7b9e6e4252de17c67b3b3 | /masakarimonitors/tests/unit/hostmonitor/consul_check/test_consul_helper.py | 379b3254819b4a9daee33947cbee5a92914e0a94 | [
"Apache-2.0"
] | permissive | openstack/masakari-monitors | f4bc3900a555ad3b18867f073d2f8ccd11250d5d | 4aa998d492143f63d5ac32c6c000dff81e69a7af | refs/heads/master | 2023-08-15T14:28:52.627648 | 2023-02-28T13:33:26 | 2023-02-28T13:33:26 | 71,113,678 | 11 | 12 | null | null | null | null | UTF-8 | Python | false | false | 4,853 | py | # Copyright(c) 2021 Inspur
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import testtools
from unittest import mock
from oslo_config import fixture as fixture_config
from masakarimonitors.hostmonitor.consul_check import consul_helper
class FakerAgentMembers(object):
def __init__(self):
self.agent_members = []
def create_agent(self, name, status=1):
agent = {
'Name': name,
'Status': status,
'Port': 'agent_lan_port',
'Addr': 'agent_ip',
'Tags': {
'dc': 'storage',
'role': 'consul',
'port': 'agent_server_port',
'wan_join_port': 'agent_wan_port',
'expect': '3',
'id': 'agent_id',
'vsn_max': '3',
'vsn_min': '2',
'vsn': '2',
'raft_vsn': '2',
},
'ProtocolMax': 5,
'ProtocolMin': 1,
'ProtocolCur': 2,
'DelegateMax': 5,
'DelegateMin': 2,
'DelegateCur': 4,
}
self.agent_members.append(agent)
def generate_agent_members(self):
return self.agent_members
class TestConsulManager(testtools.TestCase):
def setUp(self):
super(TestConsulManager, self).setUp()
self.CONF = self.useFixture(fixture_config.Config()).conf
self.consul_manager = consul_helper.ConsulManager(self.CONF)
self.consul_manager.agents = {
'manage': consul_helper.ConsulAgent('manage'),
'tenant': consul_helper.ConsulAgent('tenant'),
'storage': consul_helper.ConsulAgent('storage'),
}
def test_get_health(self):
fake_manage_agents = FakerAgentMembers()
fake_manage_agents.create_agent('node01', status=1)
fake_manage_agents.create_agent('node02', status=1)
fake_manage_agents.create_agent('node03', status=1)
agent_manage_members = fake_manage_agents.generate_agent_members()
fake_tenant_agents = FakerAgentMembers()
fake_tenant_agents.create_agent('node01', status=1)
fake_tenant_agents.create_agent('node02', status=1)
fake_tenant_agents.create_agent('node03', status=1)
agent_tenant_members = fake_tenant_agents.generate_agent_members()
fake_storage_agents = FakerAgentMembers()
fake_storage_agents.create_agent('node01', status=1)
fake_storage_agents.create_agent('node02', status=1)
fake_storage_agents.create_agent('node03', status=3)
agent_storage_members = fake_storage_agents.generate_agent_members()
with mock.patch.object(self.consul_manager.agents['manage'],
'get_agents', return_value=agent_manage_members):
with mock.patch.object(self.consul_manager.agents['tenant'],
'get_agents', return_value=agent_tenant_members):
with mock.patch.object(self.consul_manager.agents['storage'],
'get_agents', return_value=agent_storage_members):
excepted_health = {
"node01": ['up', 'up', 'up'],
"node02": ['up', 'up', 'up'],
"node03": ['up', 'up', 'down'],
}
sequence = ['manage', 'tenant', 'storage']
agents_health = self.consul_manager.get_health(sequence)
self.assertEqual(excepted_health, agents_health)
class TestConsulAgent(testtools.TestCase):
def setUp(self):
super(TestConsulAgent, self).setUp()
self.consul_agent = consul_helper.ConsulAgent('test')
def test_get_health(self):
fake_agents = FakerAgentMembers()
fake_agents.create_agent('node01', status=1)
fake_agents.create_agent('node02', status=1)
fake_agents.create_agent('node03', status=3)
agent_members = fake_agents.generate_agent_members()
with mock.patch.object(self.consul_agent, 'get_agents',
return_value=agent_members):
excepted_health = {
"node01": 'up',
"node02": 'up',
"node03": 'down',
}
agents_health = self.consul_agent.get_health()
self.assertEqual(excepted_health, agents_health)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.