blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a9f9ae7217821fc44bbee35b1d83f74a567b88af
|
09402595382795a40203099cb144974400ad9490
|
/mundo1/ex030.py
|
59bbc3229bf968d361d76827f8f4f3479db4ae16
|
[] |
no_license
|
afreitasdotdev/cev-python
|
5da6cee7c579c599986cb736fd17abfd23531a8d
|
9a9726530c9ffd52888cda24b9cbfaf3da91ccf9
|
refs/heads/master
| 2021-07-04T15:41:19.391181 | 2020-08-26T00:17:54 | 2020-08-26T00:17:54 | 164,366,493 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 243 |
py
|
# Crie um programa que leia um numero inteiro e mostre na tela se
# ele é PAR ou IMPAR
num = int(input('Digite um numero: '))
if num % 2 == 0:
print('O numero {} é par'.format(num))
else:
print('O número {} é impar'.format(num))
|
[
"[email protected]"
] | |
fa3ae7f5ccd6a41322f07b548a062a83ba1d35b3
|
b6e02f46e2e8c8582fa53329d69577ac192b3519
|
/scraper.py
|
224e3ebaf68a5b148a9db6db492a3695b70522cb
|
[] |
no_license
|
AxSmasher44/simple-amazon-listing-webscraper
|
7afe56a52f2bba4e27bdf4294dad752a3bd99613
|
f741b51e88a69cdfb6d2e44dd2772280baa0103e
|
refs/heads/master
| 2022-12-17T18:12:56.504114 | 2020-09-29T12:41:34 | 2020-09-29T12:41:34 | 299,605,048 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,286 |
py
|
import requests
from bs4 import BeautifulSoup
from csv import DictWriter
"""WARNING:Don't send requests continuously!!!
Only works for amazon india"""
def amazon_scraper(item):
if " " in item:
url_item = item.split(" ")
url_string = "+".join(url_item)
URL = "https://www.amazon.in/s?k="+url_string+"&ref=nb_sb_noss_2"
else:
URL = "https://www.amazon.in/s?k="+item+"&ref=nb_sb_noss_2"
all_items = []
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
products = soup.find_all("div", class_="sg-col-inner")
for product in products:
all_items.append({
"Name":product.find("span", class_="a-size-medium a-color-base a-text-normal").get_text(),
"Price":product.find("span", class_="a-offscreen").get_text()
})
return all_items
print(amazon_scraper("apple watch"))
def product_listings_to_csv(product):
item_dict = amazon_scraper(product)
with open(product+".csv", "w", newline="", encoding="UTF-8") as file:
headers = ["Name", "Price"]
csv_writer = DictWriter(file, fieldnames=headers)
csv_writer.writeheader()
for item in item_dict:
csv_writer.writerow(item)
|
[
"[email protected]"
] | |
683b203b51163f3f03eae2a44d83f8a8517fc84f
|
d6750f31b5e15030190e9fdeb6c59075df98a624
|
/выражения_в_F_строке.py
|
3e5a7f851fa905f0e0db0618cf6668895afb9ca9
|
[] |
no_license
|
Mgomelya/Study
|
a107d16261919a9c484cdc15e373d087b40c5208
|
f49350ab592574cd12399405fc95f9fc1cd3e10f
|
refs/heads/master
| 2023-08-07T12:55:46.068988 | 2021-10-07T14:42:45 | 2021-10-07T14:42:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 440 |
py
|
def calc_stat(listened): # от англ. calculate statistics, посчитать статистику
N = len(listened)
M = sum(listened)//60
S = sum(listened)%60
return f'Вы прослушали {N} песен, общей продолжительностью {M} минут и {S} секунд.'
# напишите код функции calc_stat
print(calc_stat([193, 148, 210, 144, 174, 159, 163, 189, 230, 204]))
|
[
"[email protected]"
] | |
aa6977b0b274bab8863a388a9723f9b4e5b84d81
|
c74c907a32da37d333096e08d2beebea7bea65e7
|
/kaikeba/image caption/image_captioning/image_captioning/models.py
|
0ee1e2560a93401662355584208c4c607a7e887f
|
[] |
no_license
|
wangqiang79/learn
|
6b37cc41140cc2200d928f3717cfc72357d10d54
|
e4b949a236fa52de0e199c69941bcbedd2c26897
|
refs/heads/master
| 2022-12-25T06:24:39.163061 | 2020-07-13T15:43:13 | 2020-07-13T15:43:13 | 231,796,188 | 2 | 2 | null | 2022-12-08T07:03:05 | 2020-01-04T16:45:33 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 9,252 |
py
|
import torch
from torch import nn
import torchvision
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Encoder(nn.Module):
"""
Encoder.
"""
def __init__(self, encoded_image_size=14):
super(Encoder, self).__init__()
self.enc_image_size = encoded_image_size
vgg = torchvision.models.vgg16(pretrained=True) # pretrained VGG16
# Remove linear and pool layers (since we're not doing classification)
modules = list(vgg.children())[:-2]
self.vgg = nn.Sequential(*modules)
# Resize image to fixed size to allow input images of variable size
self.adaptive_pool = nn.AdaptiveAvgPool2d((encoded_image_size, encoded_image_size))
self.fine_tune()
def forward(self, images):
"""
Forward propagation.
:param images: images, a tensor of dimensions (batch_size, 3, image_size, image_size)
:return: encoded images
"""
out = self.vgg(images) # (batch_size, 512, image_size/32, image_size/32)
out = self.adaptive_pool(out) # (batch_size, 512, encoded_image_size, encoded_image_size)
out = out.permute(0, 2, 3, 1) # (batch_size, encoded_image_size, encoded_image_size, 512)
return out
def fine_tune(self, fine_tune=True):
"""
:param fine_tune: Allow?
"""
for p in self.vgg.parameters():
p.requires_grad = False
for c in list(self.vgg.children())[0:-2]:
for p in c.parameters():
p.requires_grad = fine_tune
class Attention(nn.Module):
"""
Attention Network.
"""
def __init__(self, encoder_dim, decoder_dim, attention_dim):
"""
:param encoder_dim: feature size of encoded images
:param decoder_dim: size of decoder's RNN
:param attention_dim: size of the attention network
"""
super(Attention, self).__init__()
self.encoder_att = nn.Linear(encoder_dim, attention_dim) # linear layer to transform encoded image
self.decoder_att = nn.Linear(decoder_dim, attention_dim) # linear layer to transform decoder's output
self.full_att = nn.Linear(attention_dim, 1) # linear layer to calculate values to be softmax-ed
self.relu = nn.ReLU()
self.softmax = nn.Softmax(dim=1) # softmax layer to calculate weights
def forward(self, encoder_out, decoder_hidden):
"""
Forward propagation.
:param encoder_out: encoded images, a tensor of dimension (batch_size, num_pixels, encoder_dim)
:param decoder_hidden: previous decoder output, a tensor of dimension (batch_size, decoder_dim)
:return: attention weighted encoding, weights
"""
att1 = self.encoder_att(encoder_out) # (batch_size, num_pixels, attention_dim)
att2 = self.decoder_att(decoder_hidden) # (batch_size, attention_dim)
att = self.full_att(self.relu(att1 + att2.unsqueeze(1))).squeeze(2) # (batch_size, num_pixels)
alpha = self.softmax(att) # (batch_size, num_pixels)
attention_weighted_encoding = (encoder_out * alpha.unsqueeze(2)).sum(dim=1) # (batch_size, encoder_dim)
return attention_weighted_encoding, alpha
class DecoderWithAttention(nn.Module):
"""
Decoder.
"""
def __init__(self, attention_dim, embed_dim, decoder_dim, vocab_size, encoder_dim=512, dropout=0.5):
"""
:param attention_dim: size of attention network
:param embed_dim: embedding size
:param decoder_dim: size of decoder's RNN
:param vocab_size: size of vocabulary
:param encoder_dim: feature size of encoded images
:param dropout: dropout
"""
super(DecoderWithAttention, self).__init__()
self.encoder_dim = encoder_dim
self.attention_dim = attention_dim
self.embed_dim = embed_dim
self.decoder_dim = decoder_dim
self.vocab_size = vocab_size
self.dropout = dropout
self.attention = Attention(encoder_dim, decoder_dim, attention_dim) # attention network
self.embedding = nn.Embedding(vocab_size, embed_dim) # embedding layer
self.dropout = nn.Dropout(p=self.dropout)
self.decode_step = nn.LSTMCell(embed_dim + encoder_dim, decoder_dim, bias=True) # decoding LSTMCell
self.init_h = nn.Linear(encoder_dim, decoder_dim) # linear layer to find initial hidden state of LSTMCell
self.init_c = nn.Linear(encoder_dim, decoder_dim) # linear layer to find initial cell state of LSTMCell
self.f_beta = nn.Linear(decoder_dim, encoder_dim) # linear layer to create a sigmoid-activated gate
self.sigmoid = nn.Sigmoid()
self.fc = nn.Linear(decoder_dim, vocab_size) # linear layer to find scores over vocabulary
self.init_weights() # initialize some layers with the uniform distribution
def init_weights(self):
"""
Initializes some parameters with values from the uniform distribution, for easier convergence.
"""
self.embedding.weight.data.uniform_(-0.1, 0.1)
self.fc.bias.data.fill_(0)
self.fc.weight.data.uniform_(-0.1, 0.1)
def load_pretrained_embeddings(self, embeddings):
"""
Loads embedding layer with pre-trained embeddings.
:param embeddings: pre-trained embeddings
"""
self.embedding.weight = nn.Parameter(embeddings)
def fine_tune_embeddings(self, fine_tune=True):
"""
Allow fine-tuning of embedding layer? (Only makes sense to not-allow if using pre-trained embeddings).
:param fine_tune: Allow?
"""
for p in self.embedding.parameters():
p.requires_grad = fine_tune
def init_hidden_state(self, encoder_out):
"""
Creates the initial hidden and cell states for the decoder's LSTM based on the encoded images.
:param encoder_out: encoded images, a tensor of dimension (batch_size, num_pixels, encoder_dim)
:return: hidden state, cell state
"""
mean_encoder_out = encoder_out.mean(dim=1)
h = self.init_h(mean_encoder_out) # (batch_size, decoder_dim)
c = self.init_c(mean_encoder_out)
return h, c
def forward(self, encoder_out, encoded_captions, caption_lengths):
"""
Forward propagation.
:param encoder_out: encoded images, a tensor of dimension (batch_size, enc_image_size, enc_image_size, encoder_dim)
:param encoded_captions: encoded captions, a tensor of dimension (batch_size, max_caption_length)
:param caption_lengths: caption lengths, a tensor of dimension (batch_size, 1)
:return: scores for vocabulary, sorted encoded captions, decode lengths, weights, sort indices
"""
batch_size = encoder_out.size(0)
encoder_dim = encoder_out.size(-1)
vocab_size = self.vocab_size
# Flatten image
encoder_out = encoder_out.view(batch_size, -1, encoder_dim) # (batch_size, num_pixels, encoder_dim)
num_pixels = encoder_out.size(1)
# Sort input data by decreasing lengths; why? apparent below
caption_lengths, sort_ind = caption_lengths.squeeze(1).sort(dim=0, descending=True)
encoder_out = encoder_out[sort_ind]
encoded_captions = encoded_captions[sort_ind]
# Embedding
embeddings = self.embedding(encoded_captions) # (batch_size, max_caption_length, embed_dim)
# Initialize LSTM state
h, c = self.init_hidden_state(encoder_out) # (batch_size, decoder_dim)
# We won't decode at the <end> position, since we've finished generating as soon as we generate <end>
# So, decoding lengths are actual lengths - 1
decode_lengths = (caption_lengths - 1).tolist()
# Create tensors to hold word predicion scores and alphas
predictions = torch.zeros(batch_size, max(decode_lengths), vocab_size).to(device)
alphas = torch.zeros(batch_size, max(decode_lengths), num_pixels).to(device)
# At each time-step, decode by
# attention-weighing the encoder's output based on the decoder's previous hidden state output
# then generate a new word in the decoder with the previous word and the attention weighted encoding
for t in range(max(decode_lengths)):
batch_size_t = sum([l > t for l in decode_lengths])
attention_weighted_encoding, alpha = self.attention(encoder_out[:batch_size_t],
h[:batch_size_t])
gate = self.sigmoid(self.f_beta(h[:batch_size_t])) # gating scalar, (batch_size_t, encoder_dim)
attention_weighted_encoding = gate * attention_weighted_encoding
h, c = self.decode_step(
torch.cat([embeddings[:batch_size_t, t, :], attention_weighted_encoding], dim=1),
(h[:batch_size_t], c[:batch_size_t])) # (batch_size_t, decoder_dim)
preds = self.fc(self.dropout(h)) # (batch_size_t, vocab_size)
predictions[:batch_size_t, t, :] = preds
alphas[:batch_size_t, t, :] = alpha
return predictions, encoded_captions, decode_lengths, alphas, sort_ind
|
[
"[email protected]"
] | |
2b5c2b47741847463dd2ef27f3cfc82839073bb1
|
54bc8c0d95e8d28a8d7e4a6eb9add31030f2c5fc
|
/From Jump Drive/class8s/c8.py
|
1f36e21396626c9d71048da3399631f7e5634909
|
[] |
no_license
|
matthewpleasant/python_for_gis
|
9a3f7925b45a42c65b69be1de49691230e51d8f0
|
c9676133ecaf3231d91a75c2ff0e9b2ad515c0c9
|
refs/heads/master
| 2021-04-25T14:17:26.701244 | 2017-12-07T22:09:17 | 2017-12-07T22:09:17 | 110,298,833 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,358 |
py
|
import arcpy
home=arcpy.env.workspace = "C:/Users/mpleasan/Desktop/class8s/working/class8.gdb"
path1 = "C:/Users/mpleasan/Desktop/class8s/"
fcs = arcpy.ListFeatureClasses()
print fcs
sc = arcpy.da.SearchCursor("pointsSP", ['Shape@'])
##for r in sc:
## print type(r[0]) # ex. output: <class 'arcpy.arcobjects.geometries.PointGeometry'>
## print r[0].isMultipart # ex. output: False
## for q in r[0]: # Accessing the geometry object!!
## print type(q) # ex. output: <type 'geoprocessing point object'>
## print q.X, q.Y # ex . output: 575934.185446 614162.241113
##del sc
##
##sc = arcpy.da.SearchCursor("pointsMP", ['Shape@'])
##
##for r in sc:
## print type(r[0])
## print r[0].isMultipart
## for q in r[0]:
### print type(q)
## print q.X, q.Y
##del sc
##sc = arcpy.da.SearchCursor("pointsMP", ['Shape@XY'])
##for r in sc:
## print type(r[0])
## print r[0][0], r[0][1]
##
##sc = arcpy.da.SearchCursor("trailsSP", ['Shape@'])
##
### NOW THAT YOU'RE USING LINES, YOU HAVE TO ACCESS THE ARRAY
##
##for r in sc:
## print type(r[0])
## print r[0].isMultipart
## for q in r[0]:
## print type(q)
## for s in q: # THIS IS WHERE YOU ACCESS THE ARRAY, Q IS THE ARRAY
#### print type(s)
## print s.X, s.Y
##del sc
##sc = arcpy.da.SearchCursor("trailsMP", ['Shape@'])
##
##coords = []
##
##for r in sc:
## print type(r[0]) # geometry object
## print r[0].isMultipart
## for q in r[0]: # accessing the arrays that are within the geometry object
## print type(q) # an array
## for s in q:
#### print type(s)
#### print s.X, s.Y
## t = (s.X, s.Y) # Stores tuple of points
## coords.append(t) # Creating a list of tuples containing the x, y coordinates
##
##del sc
##print coords
##############################################
##f1 = open(path1 + "newStations.txt", "r") # Stores a "file" object in the f1 variable in read mode. This object has methods and properties
##
##ic = arcpy.InsertCursor("pointsSP", ["Shape@"])
##
##for dc in f1.readlines(): # iterates through each line in the file
## out = dc.split(",") # splits each line by commas, creating lists
## pt = arcpy.Point(float(out[1]), float(out[2])) # Have to turn the coordinates into floats, because they're decimals
## ic.insertRow([arcpy.PointGeometry(pt)]) # Creates a PointGeometry, and inserts it
##del ic
##f1.close() # Closes the file
del ic
ic = arcpy.InsertCursor("pointsMP", ["Shape@"])
f1 = open(path1 + "newStations.txt", "r")
d = {} # Empty dictionary
for dc in f1.readlines():
out = dc.split(",")
out[3] = out[3][0] # Accounts for new line characters (line breaks) at the end of the lines in the file
if out[3] not in d:
d[out[3]] = arcpy.Array() # Runs twice, creates arrays for "A" and "B" and stores them in dictionary above ## CREATING ARRAY, WHICH HAS "ADD METHOD"
pt = arcpy.Point(float(out[1]), float(out[2]))
print pt.X, pt.Y
d[out[3]].add(pt) # Use the add fuction to add the point to the array
f1.close()
for x in d.keys(): #iterates through a list of keys -- not the values
ic.insertRow([arcpy.Multipoint(d[x])]) # REMEMBER THE BRACKETS HERE
del ic
|
[
"[email protected]"
] | |
cc99431e6928a9a383dda93218d492c8f7bfd8f9
|
c09f9e22658cdf54fbb308b79595f7e0e4b6d6a7
|
/color space change/track_blue.py
|
04a95458e3b7197e517cc0c18a2585b54b61b9d8
|
[] |
no_license
|
zx563147474/opencv_learning
|
ce252a9e161be60dae1b46a45c45cd064efd8201
|
679934f68a70aed375fcb33ff12f54e62acfecd9
|
refs/heads/master
| 2020-06-26T05:45:13.377899 | 2019-09-30T05:07:40 | 2019-09-30T05:07:40 | 199,551,010 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 970 |
py
|
# 蓝色的HSV值
import cv2
import numpy as np
blue = np.uint8([[[255, 0, 0]]])
print(blue)
hsv_blue = cv2.cvtColor(blue, cv2.COLOR_BGR2HSV)
print(hsv_blue) # [[[120 255 255]]]
# 3.追踪蓝色物体
capture = cv2.VideoCapture(0, cv2.CAP_DSHOW)
# 蓝色的范围,不同光照条件下不一样,可灵活调整
lower_blue = np.array([100, 110, 110])
upper_blue = np.array([130, 255, 255])
while(True):
# 1.捕获视频中的一帧
ret, frame = capture.read()
# 2.从BGR转换到HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# 3.inRange():介于lower/upper之间的为白色,其余黑色
mask = cv2.inRange(hsv, lower_blue, upper_blue)
# 4.只保留原图中的蓝色部分
res = cv2.bitwise_and(frame, frame, mask=mask)
cv2.imshow('frame', frame)
cv2.imshow('mask', mask)
cv2.imshow('res', res)
if cv2.waitKey(1) == ord('q'):
capture.release()
cv2.destroyAllWindows()
break
|
[
"[email protected]"
] | |
7b14e461e9ba7105b24ef8d77b490e8ec0419f57
|
c0239d75a8199ec84ad683f945c21785c1b59386
|
/dingtalk/api/rest/OapiChatTagDeleteRequest.py
|
2292ed627d4873421afe37fd82864be50c362d9b
|
[] |
no_license
|
luss613/oauth_dingtalk
|
9f253a75ce914c577dbabfb84e97fd883e80e04b
|
1e2554642d2b16c642a031670d08efa4a74e8252
|
refs/heads/master
| 2023-04-23T01:16:33.450821 | 2020-06-18T08:22:57 | 2020-06-18T08:22:57 | 264,966,287 | 1 | 1 | null | 2020-06-18T08:31:24 | 2020-05-18T14:33:25 |
Python
|
UTF-8
|
Python
| false | false | 348 |
py
|
'''
Created by auto_sdk on 2019.10.31
'''
from dingtalk.api.base import RestApi
class OapiChatTagDeleteRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.chatid = None
self.group_tag = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.chat.tag.delete'
|
[
"[email protected]"
] | |
8f173791ac25db2f261ca3b803329440d7724ed9
|
1c902085a3a0ba2d366424a665f6f83bc2c63435
|
/level8.py
|
c8ebf10bc1babbdeb542c0a17ee232fe81053acc
|
[] |
no_license
|
MR-Geri/varios
|
e17ddd48cca14f92b2fad2c7f6f9b4b234108e02
|
310cc6ddd8856e50b1c93dc826a1485e1cc8e7b8
|
refs/heads/master
| 2022-11-28T18:56:21.170744 | 2020-08-13T19:13:09 | 2020-08-13T19:13:09 | 285,392,850 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 323 |
py
|
import bz2
un = b'BZh91AY&SYA\xaf\x82\r\x00\x00\x01\x01\x80\x02\xc0\x02\x00 \x00!\x9ah3M\x07<]\xc9\x14\xe1BA\x06\xbe\x084'
pw = b'BZh91AY&SY\x94$|\x0e\x00\x00\x00\x81\x00\x03$ \x00!\x9ah3M\x13<]\xc9\x14\xe1BBP\x91\xf08'
decoded_un = bz2.decompress(un)
decoded_pw = bz2.decompress(pw)
print(decoded_un)
print(decoded_pw)
|
[
"[email protected]"
] | |
67d18a6184ac4dcad18c5acd2ecb55388c9e1b90
|
fe9c15246ef41d353f6e4ecd7ceca27d155861ed
|
/Time Complexity/TapeEquilibrium.py
|
a57b2243606a7063493bcd570204952a022b03e5
|
[] |
no_license
|
LaurentiuMM/Codility-Lessons---Python
|
e078e9c962cde6b565e0570261da70295763bb62
|
f6a2459d2e4771eae2ec2651fdd3e27d9c19cb56
|
refs/heads/master
| 2020-07-06T03:42:29.556261 | 2019-08-17T12:13:20 | 2019-08-17T12:13:20 | 202,876,630 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 507 |
py
|
# <<< start solution
import sys
import time
def solution(A):
N = len(A)
sums = [0] * N
sums[0] = A[0]
for i in range(1, N):
sums[i] = sums[i - 1] + A[i]
min_diff = sys.maxsize
for i in range(0, N - 1):
min_diff = min(min_diff, abs(2 * sums[i] - sums[N - 1]))
return min_diff
# end solution >>>
start_time = time.time()
# <<< start testing
A = [3, 1, 2, 4, 3]
result = solution(A)
print(result)
# end testing >>>
end_time = time.time()
print("Time: " + str(end_time - start_time))
|
[
"[email protected]"
] | |
b4c7dc9e5dd44603c578afbbbbda6d03f98ab0c0
|
da61401d1d520ecead67a7c9bbf53bcc61784968
|
/competancy_mgt/applications/competancy_mgt_app.py
|
c0f6e56a73785c21f97431ea6d34841a382b9184
|
[] |
no_license
|
saifsayyad/Competency_mgt
|
ecb9ff91d136242f9ccda8d9fc7a1f8dbd706f27
|
ef0e67a10eac7896e06374ea7a7ed5c74d0a84dd
|
refs/heads/master
| 2020-11-27T00:56:17.546672 | 2020-06-15T12:09:17 | 2020-06-15T12:09:17 | 229,249,771 | 1 | 0 | null | 2020-04-30T13:47:23 | 2019-12-20T11:07:59 |
CSS
|
UTF-8
|
Python
| false | false | 590 |
py
|
import os
from groundwork import App
from competancy_mgt.applications.configuration import APP_PATH
class COMPETANCY_MGT_APP:
def __init__(self):
# Let's be sure our APP_PATH really exists
if not os.path.exists(APP_PATH):
os.makedirs(APP_PATH)
self.app = App([os.path.join(os.path.dirname(__file__), "configuration.py")])
def start(self):
self.app.plugins.activate(self.app.config.get("PLUGINS", None))
self.app.commands.start_cli()
def start_app():
COMPETANCY_MGT_APP().start()
if "main" in __name__:
start_app()
|
[
"[email protected]"
] | |
187ba8799480652d89c93f0faa7a2c97b7f99b6a
|
d61f7eda203a336868c010abb8f9a6f45dd51adb
|
/497. Random Point in Non-overlapping Rectangles.py
|
01542c98bf043ff665c52427319b5c46b11bdf49
|
[] |
no_license
|
Mschikay/leetcode
|
b91df914afc728c2ae1a13d3994568bb6c1dcffb
|
7c5e5fe76cee542f67cd7dd3a389470b02597548
|
refs/heads/master
| 2020-04-17T12:11:38.810325 | 2019-10-06T02:37:32 | 2019-10-06T02:37:32 | 166,570,922 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 761 |
py
|
class Solution:
def __init__(self, rects: List[List[int]]):
self.rects = rects
self.prefix = [0]
for x1, y1, x2, y2 in rects:
self.prefix.append((x2 - x1 + 1) * (y2 - y1 + 1) + self.prefix[-1])
def pick(self) -> List[int]:
num = random.randint(0, self.prefix[-1])
l, h = 0, len(self.prefix) - 1
while l <= h:
m = (l + h) // 2
if self.prefix[m] < num:
l = m + 1
else:
h = m - 1
x1, y1, x2, y2 = self.rects[l - 1]
x = random.randint(x1, x2)
y = random.randint(y1, y2)
return [x, y]
# Your Solution object will be instantiated and called as such:
# obj = Solution(rects)
# param_1 = obj.pick()
|
[
"[email protected]"
] | |
419b6c02dcf9186aa633d2437aefac974a53e4c2
|
c51090c1814ab0c3618d059913e9ba7514c18954
|
/fabfile.py
|
d6e834b06af0e25c9fbd2e459710caf2c8a91102
|
[] |
no_license
|
Narsiba/RTWPage
|
338361a5fcf36a236a760ee834ad9389c6575749
|
8fa1852ccf7e0737019de3ef6fd1d84bccd0323f
|
refs/heads/master
| 2020-06-15T17:31:02.327853 | 2017-02-26T08:36:18 | 2017-02-26T08:36:18 | 75,275,141 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| true | false | 21,822 |
py
|
from __future__ import print_function, unicode_literals
from future.builtins import open
import os
import re
import sys
from contextlib import contextmanager
from functools import wraps
from getpass import getpass, getuser
from glob import glob
from importlib import import_module
from posixpath import join
from mezzanine.utils.conf import real_project_name
from fabric.api import abort, env, cd, prefix, sudo as _sudo, run as _run, \
hide, task, local
from fabric.context_managers import settings as fab_settings
from fabric.contrib.console import confirm
from fabric.contrib.files import exists, upload_template
from fabric.contrib.project import rsync_project
from fabric.colors import yellow, green, blue, red
from fabric.decorators import hosts
################
# Config setup #
################
env.proj_app = real_project_name("RTWPage")
conf = {}
if sys.argv[0].split(os.sep)[-1] in ("fab", "fab-script.py"):
# Ensure we import settings from the current dir
try:
conf = import_module("%s.settings" % env.proj_app).FABRIC
try:
conf["HOSTS"][0]
except (KeyError, ValueError):
raise ImportError
except (ImportError, AttributeError):
print("Aborting, no hosts defined.")
exit()
env.db_pass = conf.get("DB_PASS", None)
env.admin_pass = conf.get("ADMIN_PASS", None)
env.user = conf.get("SSH_USER", getuser())
env.password = conf.get("SSH_PASS", None)
env.key_filename = conf.get("SSH_KEY_PATH", None)
env.hosts = conf.get("HOSTS", [""])
env.proj_name = conf.get("PROJECT_NAME", env.proj_app)
env.venv_home = conf.get("VIRTUALENV_HOME", "/home/%s/.virtualenvs" % env.user)
env.venv_path = join(env.venv_home, env.proj_name)
env.proj_path = "/home/%s/mezzanine/%s" % (env.user, env.proj_name)
env.manage = "%s/bin/python %s/manage.py" % (env.venv_path, env.proj_path)
env.domains = conf.get("DOMAINS", [conf.get("LIVE_HOSTNAME", env.hosts[0])])
env.domains_nginx = " ".join(env.domains)
env.domains_regex = "|".join(env.domains)
env.domains_python = ", ".join(["'%s'" % s for s in env.domains])
env.ssl_disabled = "#" if len(env.domains) > 1 else ""
env.vcs_tools = ["git", "hg"]
env.deploy_tool = conf.get("DEPLOY_TOOL", "rsync")
env.reqs_path = conf.get("REQUIREMENTS_PATH", None)
env.locale = conf.get("LOCALE", "en_US.UTF-8")
env.num_workers = conf.get("NUM_WORKERS",
"multiprocessing.cpu_count() * 2 + 1")
env.secret_key = conf.get("SECRET_KEY", "")
env.nevercache_key = conf.get("NEVERCACHE_KEY", "")
# Remote git repos need to be "bare" and reside separated from the project
if env.deploy_tool == "git":
env.repo_path = "/home/%s/git/%s.git" % (env.user, env.proj_name)
else:
env.repo_path = env.proj_path
##################
# Template setup #
##################
# Each template gets uploaded at deploy time, only if their
# contents has changed, in which case, the reload command is
# also run.
templates = {
"nginx": {
"local_path": "deploy/nginx.conf.template",
"remote_path": "/etc/nginx/sites-enabled/%(proj_name)s.conf",
"reload_command": "service nginx restart",
},
"supervisor": {
"local_path": "deploy/supervisor.conf.template",
"remote_path": "/etc/supervisor/conf.d/%(proj_name)s.conf",
"reload_command": "supervisorctl update gunicorn_%(proj_name)s",
},
"cron": {
"local_path": "deploy/crontab.template",
"remote_path": "/etc/cron.d/%(proj_name)s",
"owner": "root",
"mode": "600",
},
"gunicorn": {
"local_path": "deploy/gunicorn.conf.py.template",
"remote_path": "%(proj_path)s/gunicorn.conf.py",
},
"settings": {
"local_path": "deploy/local_settings.py.template",
"remote_path": "%(proj_path)s/%(proj_app)s/local_settings.py",
},
}
######################################
# Context for virtualenv and project #
######################################
@contextmanager
def virtualenv():
"""
Runs commands within the project's virtualenv.
"""
with cd(env.venv_path):
with prefix("source %s/bin/activate" % env.venv_path):
yield
@contextmanager
def project():
"""
Runs commands within the project's directory.
"""
with virtualenv():
with cd(env.proj_path):
yield
@contextmanager
def update_changed_requirements():
"""
Checks for changes in the requirements file across an update,
and gets new requirements if changes have occurred.
"""
reqs_path = join(env.proj_path, env.reqs_path)
get_reqs = lambda: run("cat %s" % reqs_path, show=False)
old_reqs = get_reqs() if env.reqs_path else ""
yield
if old_reqs:
new_reqs = get_reqs()
if old_reqs == new_reqs:
# Unpinned requirements should always be checked.
for req in new_reqs.split("\n"):
if req.startswith("-e"):
if "@" not in req:
# Editable requirement without pinned commit.
break
elif req.strip() and not req.startswith("#"):
if not set(">=<") & set(req):
# PyPI requirement without version.
break
else:
# All requirements are pinned.
return
pip("-r %s/%s" % (env.proj_path, env.reqs_path))
###########################################
# Utils and wrappers for various commands #
###########################################
def _print(output):
print()
print(output)
print()
def print_command(command):
_print(blue("$ ", bold=True) +
yellow(command, bold=True) +
red(" ->", bold=True))
@task
def run(command, show=True, *args, **kwargs):
"""
Runs a shell comand on the remote server.
"""
if show:
print_command(command)
with hide("running"):
return _run(command, *args, **kwargs)
@task
def sudo(command, show=True, *args, **kwargs):
"""
Runs a command as sudo on the remote server.
"""
if show:
print_command(command)
with hide("running"):
return _sudo(command, *args, **kwargs)
def log_call(func):
@wraps(func)
def logged(*args, **kawrgs):
header = "-" * len(func.__name__)
_print(green("\n".join([header, func.__name__, header]), bold=True))
return func(*args, **kawrgs)
return logged
def get_templates():
"""
Returns each of the templates with env vars injected.
"""
injected = {}
for name, data in templates.items():
injected[name] = dict([(k, v % env) for k, v in data.items()])
return injected
def upload_template_and_reload(name):
"""
Uploads a template only if it has changed, and if so, reload the
related service.
"""
template = get_templates()[name]
local_path = template["local_path"]
if not os.path.exists(local_path):
project_root = os.path.dirname(os.path.abspath(__file__))
local_path = os.path.join(project_root, local_path)
remote_path = template["remote_path"]
reload_command = template.get("reload_command")
owner = template.get("owner")
mode = template.get("mode")
remote_data = ""
if exists(remote_path):
with hide("stdout"):
remote_data = sudo("cat %s" % remote_path, show=False)
with open(local_path, "r") as f:
local_data = f.read()
# Escape all non-string-formatting-placeholder occurrences of '%':
local_data = re.sub(r"%(?!\(\w+\)s)", "%%", local_data)
if "%(db_pass)s" in local_data:
env.db_pass = db_pass()
local_data %= env
clean = lambda s: s.replace("\n", "").replace("\r", "").strip()
if clean(remote_data) == clean(local_data):
return
upload_template(local_path, remote_path, env, use_sudo=True, backup=False)
if owner:
sudo("chown %s %s" % (owner, remote_path))
if mode:
sudo("chmod %s %s" % (mode, remote_path))
if reload_command:
sudo(reload_command)
def rsync_upload():
"""
Uploads the project with rsync excluding some files and folders.
"""
excludes = ["*.pyc", "*.pyo", "*.db", ".DS_Store", ".coverage",
"local_settings.py", "/static", "/.git", "/.hg"]
local_dir = os.getcwd() + os.sep
return rsync_project(remote_dir=env.proj_path, local_dir=local_dir,
exclude=excludes)
def vcs_upload():
"""
Uploads the project with the selected VCS tool.
"""
if env.deploy_tool == "git":
remote_path = "ssh://%s@%s%s" % (env.user, env.host_string,
env.repo_path)
if not exists(env.repo_path):
run("mkdir -p %s" % env.repo_path)
with cd(env.repo_path):
run("git init --bare")
local("git push -f %s master" % remote_path)
with cd(env.repo_path):
run("GIT_WORK_TREE=%s git checkout -f master" % env.proj_path)
run("GIT_WORK_TREE=%s git reset --hard" % env.proj_path)
elif env.deploy_tool == "hg":
remote_path = "ssh://%s@%s/%s" % (env.user, env.host_string,
env.repo_path)
with cd(env.repo_path):
if not exists("%s/.hg" % env.repo_path):
run("hg init")
print(env.repo_path)
with fab_settings(warn_only=True):
push = local("hg push -f %s" % remote_path)
if push.return_code == 255:
abort()
run("hg update")
def db_pass():
"""
Prompts for the database password if unknown.
"""
if not env.db_pass:
env.db_pass = getpass("Enter the database password: ")
return env.db_pass
@task
def apt(packages):
"""
Installs one or more system packages via apt.
"""
return sudo("apt-get install -y -q " + packages)
@task
def pip(packages):
"""
Installs one or more Python packages within the virtual environment.
"""
with virtualenv():
return run("pip install %s" % packages)
def postgres(command):
"""
Runs the given command as the postgres user.
"""
show = not command.startswith("psql")
return sudo(command, show=show, user="postgres")
@task
def psql(sql, show=True):
"""
Runs SQL against the project's database.
"""
out = postgres('psql -c "%s"' % sql)
if show:
print_command(sql)
return out
@task
def backup(filename):
"""
Backs up the project database.
"""
tmp_file = "/tmp/%s" % filename
# We dump to /tmp because user "postgres" can't write to other user folders
# We cd to / because user "postgres" might not have read permissions
# elsewhere.
with cd("/"):
postgres("pg_dump -Fc %s > %s" % (env.proj_name, tmp_file))
run("cp %s ." % tmp_file)
sudo("rm -f %s" % tmp_file)
@task
def restore(filename):
"""
Restores the project database from a previous backup.
"""
return postgres("pg_restore -c -d %s %s" % (env.proj_name, filename))
@task
def python(code, show=True):
"""
Runs Python code in the project's virtual environment, with Django loaded.
"""
setup = "import os;" \
"os.environ[\'DJANGO_SETTINGS_MODULE\']=\'%s.settings\';" \
"import django;" \
"django.setup();" % env.proj_app
full_code = 'python -c "%s%s"' % (setup, code.replace("`", "\\\`"))
with project():
if show:
print_command(code)
result = run(full_code, show=False)
return result
def static():
"""
Returns the live STATIC_ROOT directory.
"""
return python("from django.conf import settings;"
"print(settings.STATIC_ROOT)", show=False).split("\n")[-1]
@task
def manage(command):
"""
Runs a Django management command.
"""
return run("%s %s" % (env.manage, command))
###########################
# Security best practices #
###########################
@task
@log_call
@hosts(["root@%s" % host for host in env.hosts])
def secure(new_user=env.user):
"""
Minimal security steps for brand new servers.
Installs system updates, creates new user (with sudo privileges) for future
usage, and disables root login via SSH.
"""
run("apt-get update -q")
run("apt-get upgrade -y -q")
run("adduser --gecos '' %s" % new_user)
run("usermod -G sudo %s" % new_user)
run("sed -i 's:RootLogin yes:RootLogin no:' /etc/ssh/sshd_config")
run("service ssh restart")
print(green("Security steps completed. Log in to the server as '%s' from "
"now on." % new_user, bold=True))
#########################
# Install and configure #
#########################
@task
@log_call
def install():
"""
Installs the base system and Python requirements for the entire server.
"""
# Install system requirements
sudo("apt-get update -y -q")
apt("nginx libjpeg-dev python-dev python-setuptools git-core "
"postgresql libpq-dev memcached supervisor python-pip")
run("mkdir -p /home/%s/logs" % env.user)
# Install Python requirements
sudo("pip install -U pip virtualenv virtualenvwrapper mercurial")
# Set up virtualenv
run("mkdir -p %s" % env.venv_home)
run("echo 'export WORKON_HOME=%s' >> /home/%s/.bashrc" % (env.venv_home,
env.user))
run("echo 'source /usr/local/bin/virtualenvwrapper.sh' >> "
"/home/%s/.bashrc" % env.user)
print(green("Successfully set up git, mercurial, pip, virtualenv, "
"supervisor, memcached.", bold=True))
@task
@log_call
def create():
"""
Creates the environment needed to host the project.
The environment consists of: system locales, virtualenv, database, project
files, SSL certificate, and project-specific Python requirements.
"""
# Generate project locale
locale = env.locale.replace("UTF-8", "utf8")
with hide("stdout"):
if locale not in run("locale -a"):
sudo("locale-gen %s" % env.locale)
sudo("update-locale %s" % env.locale)
sudo("service postgresql restart")
run("exit")
# Create project path
run("mkdir -p %s" % env.proj_path)
# Set up virtual env
run("mkdir -p %s" % env.venv_home)
with cd(env.venv_home):
if exists(env.proj_name):
if confirm("Virtualenv already exists in host server: %s"
"\nWould you like to replace it?" % env.proj_name):
run("rm -rf %s" % env.proj_name)
else:
abort()
run("virtualenv %s" % env.proj_name)
# Upload project files
if env.deploy_tool in env.vcs_tools:
vcs_upload()
else:
rsync_upload()
# Create DB and DB user
pw = db_pass()
user_sql_args = (env.proj_name, pw.replace("'", "\'"))
user_sql = "CREATE USER %s WITH ENCRYPTED PASSWORD '%s';" % user_sql_args
psql(user_sql, show=False)
shadowed = "*" * len(pw)
print_command(user_sql.replace("'%s'" % pw, "'%s'" % shadowed))
psql("CREATE DATABASE %s WITH OWNER %s ENCODING = 'UTF8' "
"LC_CTYPE = '%s' LC_COLLATE = '%s' TEMPLATE template0;" %
(env.proj_name, env.proj_name, env.locale, env.locale))
# Set up SSL certificate
if not env.ssl_disabled:
conf_path = "/etc/nginx/conf"
if not exists(conf_path):
sudo("mkdir %s" % conf_path)
with cd(conf_path):
crt_file = env.proj_name + ".crt"
key_file = env.proj_name + ".key"
if not exists(crt_file) and not exists(key_file):
try:
crt_local, = glob(join("deploy", "*.crt"))
key_local, = glob(join("deploy", "*.key"))
except ValueError:
parts = (crt_file, key_file, env.domains[0])
sudo("openssl req -new -x509 -nodes -out %s -keyout %s "
"-subj '/CN=%s' -days 3650" % parts)
else:
upload_template(crt_local, crt_file, use_sudo=True)
upload_template(key_local, key_file, use_sudo=True)
# Install project-specific requirements
upload_template_and_reload("settings")
with project():
if env.reqs_path:
pip("-r %s/%s" % (env.proj_path, env.reqs_path))
pip("gunicorn setproctitle psycopg2 "
"django-compressor python-memcached")
# Bootstrap the DB
manage("createdb --noinput --nodata")
python("from django.conf import settings;"
"from django.contrib.sites.models import Site;"
"Site.objects.filter(id=settings.SITE_ID).update(domain='%s');"
% env.domains[0])
for domain in env.domains:
python("from django.contrib.sites.models import Site;"
"Site.objects.get_or_create(domain='%s');" % domain)
if env.admin_pass:
pw = env.admin_pass
user_py = ("from django.contrib.auth import get_user_model;"
"User = get_user_model();"
"u, _ = User.objects.get_or_create(username='admin');"
"u.is_staff = u.is_superuser = True;"
"u.set_password('%s');"
"u.save();" % pw)
python(user_py, show=False)
shadowed = "*" * len(pw)
print_command(user_py.replace("'%s'" % pw, "'%s'" % shadowed))
return True
@task
@log_call
def remove():
"""
Blow away the current project.
"""
if exists(env.venv_path):
run("rm -rf %s" % env.venv_path)
if exists(env.proj_path):
run("rm -rf %s" % env.proj_path)
for template in get_templates().values():
remote_path = template["remote_path"]
if exists(remote_path):
sudo("rm %s" % remote_path)
if exists(env.repo_path):
run("rm -rf %s" % env.repo_path)
sudo("supervisorctl update")
psql("DROP DATABASE IF EXISTS %s;" % env.proj_name)
psql("DROP USER IF EXISTS %s;" % env.proj_name)
##############
# Deployment #
##############
@task
@log_call
def restart():
"""
Restart gunicorn worker processes for the project.
If the processes are not running, they will be started.
"""
pid_path = "%s/gunicorn.pid" % env.proj_path
if exists(pid_path):
run("kill -HUP `cat %s`" % pid_path)
else:
sudo("supervisorctl update")
@task
@log_call
def deploy():
"""
Deploy latest version of the project.
Backup current version of the project, push latest version of the project
via version control or rsync, install new requirements, sync and migrate
the database, collect any new static assets, and restart gunicorn's worker
processes for the project.
"""
if not exists(env.proj_path):
if confirm("Project does not exist in host server: %s"
"\nWould you like to create it?" % env.proj_name):
create()
else:
abort()
# Backup current version of the project
with cd(env.proj_path):
backup("last.db")
if env.deploy_tool in env.vcs_tools:
with cd(env.repo_path):
if env.deploy_tool == "git":
run("git rev-parse HEAD > %s/last.commit" % env.proj_path)
elif env.deploy_tool == "hg":
run("hg id -i > last.commit")
with project():
static_dir = static()
if exists(static_dir):
run("tar -cf static.tar --exclude='*.thumbnails' %s" %
static_dir)
else:
with cd(join(env.proj_path, "..")):
excludes = ["*.pyc", "*.pio", "*.thumbnails"]
exclude_arg = " ".join("--exclude='%s'" % e for e in excludes)
run("tar -cf {0}.tar {1} {0}".format(env.proj_name, exclude_arg))
# Deploy latest version of the project
with update_changed_requirements():
if env.deploy_tool in env.vcs_tools:
vcs_upload()
else:
rsync_upload()
with project():
manage("collectstatic -v 0 --noinput")
manage("migrate --noinput")
for name in get_templates():
upload_template_and_reload(name)
restart()
return True
@task
@log_call
def rollback():
"""
Reverts project state to the last deploy.
When a deploy is performed, the current state of the project is
backed up. This includes the project files, the database, and all static
files. Calling rollback will revert all of these to their state prior to
the last deploy.
"""
with update_changed_requirements():
if env.deploy_tool in env.vcs_tools:
with cd(env.repo_path):
if env.deploy_tool == "git":
run("GIT_WORK_TREE={0} git checkout -f "
"`cat {0}/last.commit`".format(env.proj_path))
elif env.deploy_tool == "hg":
run("hg update -C `cat last.commit`")
with project():
with cd(join(static(), "..")):
run("tar -xf %s/static.tar" % env.proj_path)
else:
with cd(env.proj_path.rsplit("/", 1)[0]):
run("rm -rf %s" % env.proj_name)
run("tar -xf %s.tar" % env.proj_name)
with cd(env.proj_path):
restore("last.db")
restart()
@task
@log_call
def all():
"""
Installs everything required on a new system and deploy.
From the base software, up to the deployed project.
"""
install()
if create():
deploy()
|
[
"[email protected]"
] | |
51ddb757a62ae6e72e67154b814429ba16760036
|
d22c05787c28dbbcbc889c66d71d6a8e9bc1c407
|
/Get_Requests_From_Local_NIC/NIC_package_get.py
|
01830018bce79b77832d9869092beece00aea024
|
[
"MIT"
] |
permissive
|
muxinsq/2020-Works-ApiSecurity
|
fe1f0319558e30f1b345968a693830b278171dea
|
95569f7c19ab59731623b8f40658714643d304e4
|
refs/heads/master
| 2021-04-11T01:59:56.370877 | 2020-03-21T04:17:45 | 2020-03-21T04:17:45 | 248,984,504 | 1 | 0 |
MIT
| 2020-03-21T13:44:12 | 2020-03-21T13:44:11 | null |
UTF-8
|
Python
| false | false | 850 |
py
|
#coding:utf-8
from scapy.all import *
import sys
package_output='document' #'screen'/'document'/'all'
def package_print(packet):
if (package_output == 'screen') or (package_output == 'all'):
print("\n".join(packet.sprintf("{Raw:%Raw.load%}").split(r"\r\n"))+'\n\n\n')
if (package_output == 'document') or (package_output == 'all'):
outputxt=open(sys.path[0]+'//package.txt','a')
print(sys.path[0]+'//package.txt')
outputxt.write("\n".join(packet.sprintf("{Raw:%Raw.load%}").split(r"\r\n"))+'\n\n\n')
#return "\n".join(packet.sprintf("{Raw:%Raw.load%}").split(r"\r\n"))+'\n\n\n'
sniff(
iface='Realtek 8821CE Wireless LAN 802.11ac PCI-E NIC',
prn=package_print,
lfilter=lambda p: ("GET" in str(p)) or ("POST" in str(p)),
filter="tcp")
#iface='XXX' 监听本地名为XXX的网卡
|
[
"[email protected]"
] | |
9ed6c6ad5e53713efd3778aebd13d1c36a523e85
|
3caed455cffc9fbc1a23dd362fcd4af147902b54
|
/02-Data-Toolkit/01-Data-Analysis/01-Notebook/hello.py
|
2263a2fe4977121dcc8155d3ca589138e8a495bb
|
[] |
no_license
|
flo1222/data-challenges-old
|
3d06b782cede11fdba6123583259878b870f8653
|
b93f93d09892e7b5aec08810029a450ed011c704
|
refs/heads/master
| 2023-08-16T04:51:13.032204 | 2021-09-26T11:41:52 | 2021-09-26T11:41:52 | 410,534,853 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 89 |
py
|
# pylint: disable=missing-docstring
def hello_world():
return "Hello from hello.py"
|
[
"[email protected]"
] | |
49dd4df042d6c013c345ea9fb026e7f1835e8969
|
7673ea54ff4d1a61373a0900efe27351bf63a2a8
|
/Debugging/trainres.py
|
075851cc1c57541a1c34a4cc02e041643be4d4fb
|
[] |
no_license
|
mdkul22/ece285sp19-project
|
d7a048f727ed02830ad7a5fc61483bca564845c8
|
cc789eae150102eff4972169d64145d1904cf779
|
refs/heads/master
| 2020-05-18T23:26:34.587607 | 2019-06-10T02:27:08 | 2019-06-10T02:27:08 | 184,711,405 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,567 |
py
|
import os
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data as td
import torch.nn.functional as F
import torchvision as tv
from PIL import Image
import nntools as nt
import matplotlib.pyplot as plt
from dataloader import VOCDataset, myimshow
import model
class statsmanager(nt.StatsManager):
def __init__self():
super(statsmanager,self).__init__()
def init(self):
super(statsmanager,self).init()
self.m_ap=0
def accumulate(self,loss,x,y,d):
#Do m_ap calculations
super(statsmanager,self).accumulate(loss,x,y,d)
def summarize(self):
loss=super(statsmanager,self).summarize()
return {'loss':loss}
def plot(self,fig,ax1, ax2 ,im):
ax1.set_title('Image')
x,y=train_set[0]
myimshow(x,ax=ax1)
ax2.set_title('Loss')
ax2.plot([exp1.history[k]['loss']for k in range(exp1.epoch)])
plt.tight_layout()
fig.canvas.draw()
lr=1e-3
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(device)
vgg = model.Resnet18Transfer(num_classes=20,n_batch=4)
vgg.to(device)
adam=torch.optim.Adam(vgg.parameters(),lr=lr)
stats_manager=statsmanager()
train_set=VOCDataset('../VOCdevkit/VOC2012/')
valid_set=VOCDataset('../VOCdevkit/VOC2012/', mode="val")
x,y=train_set[0]
exp1=nt.Experiment(vgg,train_set,valid_set,adam,stats_manager,batch_size=4,output_dir="runres",perform_validation_during_training=True)
fig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1)
exp1.run(num_epochs=5,plot=lambda exp:plot(exp,fig=fig,ax1=ax1, ax2=ax2 ,im=x))
|
[
"[email protected]"
] | |
c47fccc01fc9a053870024da3c03694e2581fa93
|
f83dec9a439dac4033948719c43a5faba315171b
|
/article/migrations/0001_initial.py
|
0195b1f910e543f45a73f0369dc7571852f691de
|
[] |
no_license
|
kevinngetich/showcase
|
446d49950963a0401eec1a156b46ab023d6b0483
|
c3a5bfd26e0bf084815073f6d702109257fd4c23
|
refs/heads/master
| 2020-05-19T00:48:21.239700 | 2019-05-03T11:21:43 | 2019-05-03T11:21:43 | 184,743,784 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 638 |
py
|
# Generated by Django 2.1.3 on 2019-02-10 15:27
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('article_title', models.CharField(max_length=250)),
('article_text', models.CharField(max_length=5000)),
('image', models.ImageField(upload_to='article_image')),
],
),
]
|
[
"[email protected]"
] | |
1154b16d6df238a8dda389b0016c8502dba497ac
|
304fa43e6d8b78ef27a52dc31fc6d70f27ae856e
|
/league/Leagues.py
|
efc33f99e923fefed85e6b32e4d3d8c8dc1702ba
|
[] |
no_license
|
yaooo/Stock-Fantasy-League
|
0049a303eeecc0c72a960321780d0da29babc961
|
01ce360d569c767e229c6cf60171600f12cedd6b
|
refs/heads/master
| 2021-05-09T04:48:39.119384 | 2018-03-24T21:46:24 | 2018-03-24T21:46:24 | 119,289,339 | 3 | 2 | null | 2018-01-29T00:37:51 | 2018-01-28T19:14:16 | null |
UTF-8
|
Python
| false | false | 167 |
py
|
from flask_restful import reqparse, abort, Resource
class Leagues(Resource):
@staticmethod
def get(cur, LID):
return 'Hello, Brian, ID = ' + str(LID)
|
[
"[email protected]"
] | |
983e5124705f4cb91b3e31c3652d19a6497e3c25
|
fe0fff66d2d0f56b87882dfe41c577a87865e6f1
|
/Img_to_word.py
|
1d7c7e974ae083c84b70e079acc956daac0e6825
|
[] |
no_license
|
ymmh123456789/OCR
|
7b73933b2c5bf23a5cfc9e60395271d5819301b3
|
f8fd503c530934633002d17bdd9fe0a99c33686f
|
refs/heads/master
| 2021-01-01T15:43:07.293151 | 2017-08-02T01:18:35 | 2017-08-02T01:18:35 | 97,682,598 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,160 |
py
|
# coding:utf-8
import os
rootdir = os.path.join(os.getcwd(),"Book\\8\\")
files = [os.path.join(rootdir,item) for item in os.listdir(rootdir) if item.endswith('.xml')] # 舉例找出檔名結尾為.xml的檔案
def load_text(file):
with open(file,'r') as I:
print rootdir
count = 0
flag = False
for line in I:
line = line.strip()
# line = list(line)
count += 1
print count, line
img_pos = line.find("img")
if img_pos != -1:
flag = True
new_img = ''
start_or_end = False
for i in range(img_pos, len(line)):
if line[i] == '"' and not start_or_end:
start_or_end = True
elif line[i] == '"' and start_or_end:
break
elif start_or_end:
new_img += line[i]
else:
pass
if new_img != '':
with open(rootdir + "match.txt", "a") as W:
W.writelines(new_img+"\n")
print new_img
elif flag:
word_bool = True
i = 0
new_line = ''
while i < len(line):
if line[i] == "<" or not word_bool:
if line[i] == ">":
word_bool = True
else:
word_bool = False
# del line[i]
# i -= 1
else:
new_line += line[i]
i+=1
if new_line != '':
new_line = new_line.replace('\xe3\x80\x80', "")
# new_line = new_line.replace(u"\xa0", "")
with open(rootdir + "match.txt", "a") as W:
W.writelines(new_line+"\n")
print new_line
else:
pass
# print files
if __name__ == "__main__":
for file in files:
load_text(file)
|
[
"[email protected]"
] | |
ba802b9c190c3083aa933b00887236c6ad1b4277
|
71f39b722f1204738b53e90d8566bcf6da99d494
|
/apps/incubator/migrations/0004_auto_20181106_1401.py
|
08eb2dabca3ab2ddac44d79bf4d15373287cca68
|
[] |
no_license
|
kingvern/txplatform
|
cd9fc36fe3ba536b7578d734f520d0f091db4b22
|
235465b742d0ba13132f872e0f3818990f232888
|
refs/heads/master
| 2022-12-17T00:03:50.675329 | 2018-11-16T10:02:35 | 2018-11-16T10:02:35 | 149,862,235 | 0 | 0 | null | 2022-11-22T02:53:29 | 2018-09-22T09:17:06 |
JavaScript
|
UTF-8
|
Python
| false | false | 751 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2018-11-06 14:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('incubator', '0003_couveuse_financial_park'),
]
operations = [
migrations.AddField(
model_name='couveuse',
name='url_companys',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='\u5b75\u5316\u9875\u9762'),
),
migrations.AddField(
model_name='couveuse',
name='url_introduce',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='\u4ecb\u7ecd\u9875\u9762'),
),
]
|
[
"[email protected]"
] | |
a516166dbb3533ae75e2a0c8ace848504fad019a
|
925ccdf93bb69ad3160057cc04cd0457a4def0e6
|
/simurg/logger/logstash_formatter.py
|
d9ed823b8b24983b8219bd40a9bdb750d5a93a79
|
[
"MIT"
] |
permissive
|
pasmod/simurg
|
e9fe185bb4ddb1dfe35d3459b89891a168a8fb4b
|
9fe84fb300810ab7f703385c2dd1e5e7afa712f9
|
refs/heads/master
| 2021-04-12T03:42:26.815547 | 2017-03-28T08:05:04 | 2017-03-28T08:05:04 | 53,422,442 | 5 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,093 |
py
|
'''
This library is provided to allow standard python
logging to output log data as JSON formatted strings
ready to be shipped out to logstash.
'''
import logging
import socket
import datetime
import traceback as tb
import json
def _default_json_default(obj):
"""
Coerce everything to strings.
All objects representing time get output as ISO8601.
"""
if isinstance(obj, (datetime.datetime, datetime.date, datetime.time)):
return obj.isoformat()
else:
return str(obj)
class LogstashFormatter(logging.Formatter):
"""
A custom formatter to prepare logs to be
shipped out to logstash.
"""
def __init__(self,
fmt=None,
datefmt=None,
json_cls=None,
json_default=_default_json_default):
"""
:param fmt: Config as a JSON string, allowed fields;
extra: provide extra fields always present in logs
source_host: override source host name
:param datefmt: Date format to use (required by logging.Formatter
interface but not used)
:param json_cls: JSON encoder to forward to json.dumps
:param json_default: Default JSON representation for unknown types,
by default coerce everything to a string
"""
if fmt is not None:
self._fmt = json.loads(fmt)
else:
self._fmt = {}
self.json_default = json_default
self.json_cls = json_cls
if 'extra' not in self._fmt:
self.defaults = {}
else:
self.defaults = self._fmt['extra']
if 'source_host' in self._fmt:
self.source_host = self._fmt['source_host']
else:
try:
self.source_host = socket.gethostname()
except:
self.source_host = ""
class LogstashFormatterV2(LogstashFormatter):
"""
A custom formatter to prepare logs to be
shipped out to logstash V1 format.
"""
def _make_timestamp(self, now):
sft = now.strftime("%Y-%m-%dT%H:%M:%S")
millis = ".%03dZ" % (now.microsecond / 1000)
return sft + millis
def _drop_some(self, fields):
for field in ['args', 'created', 'filename', 'funcName', 'levelno',
'lineno', 'module', 'msecs', 'pathname', 'process',
'processName', 'relativeCreated', 'source_host',
'stack_info', 'thread', 'threadName']:
fields.pop(field, None)
def _filter_severity(self, fields):
severity = fields.pop('levelname').lower()
if 'warning' == severity:
severity = 'warn'
elif 'critical' == severity:
severity = 'fatal'
fields['severity'] = severity
def _filter_message(self, fields):
fields['message'] = fields.pop('msg', None)
if type(fields['message']) is dict:
params = fields.pop('message')
fields['message'] = params.pop('message', None)
fields['params'] = params
def _filter_exception(self, fields):
if 'exc_info' in fields:
if fields['exc_info']:
formatted = tb.format_exception(*fields['exc_info'])
fields['exception'] = formatted
fields.pop('exc_info')
if 'exc_text' in fields and not fields['exc_text']:
fields.pop('exc_text')
def format(self, record):
"""
Format a log record to JSON, if the message is a dict
assume an empty message and use the dict as additional
fields.
"""
fields = record.__dict__.copy()
self._drop_some(fields)
self._filter_severity(fields)
self._filter_message(fields)
self._filter_exception(fields)
fields['@timestamp'] = self._make_timestamp(datetime.datetime.utcnow())
fields['@version'] = 1
logr = self.defaults.copy()
logr.update(fields)
return json.dumps(logr, default=self.json_default, cls=self.json_cls)
|
[
"[email protected]"
] | |
3b65c4d7657ad654a6586dcd3158a572adc38cb0
|
ea04e29123cdbe5f49be236ee1df8397da25b330
|
/reportcards/urls.py
|
8a6a5fc8ba1879bbb8b6c936ab33b55758529138
|
[
"MIT"
] |
permissive
|
Davy-71993/MySchool
|
bf187673e4796010dd8fd4bcbf27b0b87199794d
|
fa02c8ec19d71873fc0d714cf652d8ad05f2f0e7
|
refs/heads/master
| 2022-05-28T23:24:06.865580 | 2020-05-04T19:03:04 | 2020-05-04T19:03:04 | 261,251,617 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 606 |
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.creat_marklist, name='creat_marklist'),
path('<mkl>', views.setup_marklist, name='setup_marklist'),
path('marklist/<pk>/', views.marklist, name='marklist'),
path('marksbysubject/<mkl>/<sub>/', views.marks_by_subject, name='marks_by_subject'),
path('marksbystudent/<mkl>/<std>/', views.marks_by_student, name='marks_by_student'),
path('marklistbystream/<mkl>/', views.marks_by_stream, name='marks_by_stream'),
path('classmarklist/<klass>/<term>/', views.class_marklist, name='class_marklist'),
]
|
[
"[email protected]"
] | |
1158637ae1565bfda9ea360030dc766550d3b79e
|
734104aef567d19e2d7cdcf05f1f887bbd29c5eb
|
/hawkeye_autotest/selenium/crondns.py
|
54491f62e148582dbd986c3953dc1a83bc61c3bc
|
[
"Apache-2.0"
] |
permissive
|
miaolujing/python_script
|
4d7719bc9294013bacf261e03dd06aa216e389cc
|
57ccf89f53ce0ce551804b5693515d8a8db4ce78
|
refs/heads/master
| 2021-01-10T06:33:08.120663 | 2017-02-17T05:58:09 | 2017-02-17T05:58:09 | 47,525,529 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,501 |
py
|
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re, config, os, sys, db, addtask, assertresult, deletetask
class Crondns(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.base_url = config.baseurl
self.verificationErrors = []
self.accept_next_alert = True
def test_crondns(self):
#登陆
driver = self.driver
driver.get(self.base_url + "/")
driver.implicitly_wait(30)
driver.switch_to_window(driver.window_handles[0])
driver.find_element_by_id("name").clear()
driver.find_element_by_id("name").send_keys(config.user)
driver.find_element_by_id("password").clear()
driver.find_element_by_id("password").send_keys(config.passwd)
driver.find_element_by_id("sign").click()
driver.implicitly_wait(30)
driver.find_element_by_link_text(u"监控配置").click()
time.sleep(5)
driver.switch_to_window(driver.window_handles[0])
driver.find_element_by_link_text(u"监控图管理").click()
time.sleep(5)
driver.switch_to_window(driver.window_handles[0])
driver.find_element_by_link_text(u"报表管理").click()
time.sleep(5)
driver.switch_to_window(driver.window_handles[1])
#新增定时任务
if addtask.adddns('{ "hostName":"'+config.crondns+'","nameserver":""}',config.cronhost1,"300","dns") == False:
print "post dnscrontask fail"
driver.quit()
os._exit(0)
else:
print "post dnscrontask suss"
time.sleep(5)
#判断定时任务新增是否成功
if len(db.readtaskid(config.crondns)) < 1:
print "add dnscrontask fail"
driver.quit()
os._exit(0)
else:
print "add dnscrontask suss"
#新增监控图
datas = db.readtaskid(config.crondns)
data = ''
counts = ''
for i in range(0, len(datas)):
if i < len(datas)-1:
data += "metric="+config.crondns+" metric="+str(datas[i])+"\n"
if i == len(datas)-1:
data += "metric="+config.crondns+" metric="+str(datas[i])+""
if i < len(datas)-1:
counts +="metric="+config.crondns+" metric="+str(datas[i])+"|"
if i == len(datas)-1:
counts +="metric="+config.crondns+" metric="+str(datas[i])+""
if addtask.addmonitor(config.dnsname,config.dashhost1,"3600",data,"h","true") ==False:
print "post dnsmonitortask fail"
driver.quit()
os._exit(0)
else:
print "post dnsmonitortask suss"
time.sleep(5)
#判断监控图任务是否新增成功
if db.readmonitor(counts) == config.dnsname:
print "add dnsmonitortask suss"
else:
print "add dnsmonitortask fail"
driver.quit()
os._exit(0)
time.sleep(300)
# 查看报表展现
driver.switch_to_window(driver.window_handles[3])
driver.find_element_by_xpath("//div[@id='container']/div/div/ul/li/div/button").click()
driver.find_element_by_css_selector("li.screen > a > small").click()
driver.find_element_by_xpath("//div[@id='container']/div/div/ul/li[2]/div/button").click()
driver.find_element_by_css_selector("div.btn-group.open > ul.dropdown-menu > li.screen > a > small").click()
assertresult.graphgetdns()
time.sleep(2)
# 删除监控图管理
driver.switch_to_window(driver.window_handles[2])
time.sleep(2)
driver.find_element_by_id("g_name").clear()
driver.find_element_by_id("g_name").send_keys(config.dnsname)
driver.find_element_by_css_selector("button.btn.btn-info").click()
time.sleep(2)
driver.find_element_by_name("btSelectAll").click()
time.sleep(2)
driver.find_element_by_css_selector("button.btn.btn-warning").click()
self.assertRegexpMatches(self.close_alert_and_get_its_text(), u"确认删除1个图表配置?")
#删除定时任务
deletetask.crondel(config.crondns)
time.sleep(2)
#退出登录
driver.switch_to_window(driver.window_handles[0])
time.sleep(2)
driver.find_element_by_link_text(u"用户登录/退出").click()
driver.find_element_by_link_text(u"退出").click()
driver.quit()
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException as e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException as e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
eba8e549a12d0b22421b0407b63826019b87c74c
|
92ee5595f23ac1076b5e76d2679dd31ab89705fe
|
/sequencinator/matrix_tools.py
|
639e10407e45a3889629e62fe7023eb92b4c4843
|
[] |
no_license
|
DiogoM1/MBINF-AASB_Projeto
|
188f9c94968066186e98f773112adb7e81e4de00
|
144b6ebc18a4da4dd70f5d0d9ba9f1acdd768ae2
|
refs/heads/main
| 2023-01-27T11:53:01.331760 | 2020-12-02T18:17:38 | 2020-12-02T18:17:38 | 309,809,268 | 0 | 0 | null | 2020-12-02T15:27:38 | 2020-11-03T21:17:08 |
Python
|
UTF-8
|
Python
| false | false | 894 |
py
|
def max_matrix(matrix):
# finds max value
# https://www.educba.com/numpy-argmax/
max_list = []
for a in range(0, len(matrix)):
max_list.append(max(matrix[a]))
return max(max_list)
def find_last_max(matrix):
# finds the last max coordinates
max = max_matrix(matrix)
max_index_sum = 0
index_a, index_b = 0, 0
for a in range(0, len(matrix)):
for b in range(0, len(matrix[0])):
# TODO: fix "desempate" logic
if matrix[a][b] == max and a+b >= max_index_sum:
index_a, index_b = a, b
return index_a, index_b
def find_all_max(matrix):
# finds the all max coordinates
max = max_matrix(matrix)
index_list = []
for a in range(0, len(matrix)):
for b in range(0, len(matrix[0])):
if matrix[a][b] == max:
index_list.append((a, b))
return index_list
|
[
"[email protected]"
] | |
77415745cb1aa2ac72dc1415a343d7a3c84e3fa2
|
a6c86e21f66968b8ddce0382e7bcceb43765d29e
|
/src/serialio.py
|
63613d515ce5bcf7666aa8a76470af627b28e356
|
[] |
no_license
|
HenkVanAsselt/IFR1200
|
5ef5491d9d0077bfd5b2af9f98841d27b93d5863
|
8c39f7d6b590795fb45733c40cb03108c36822e5
|
refs/heads/master
| 2021-04-04T15:34:14.087285 | 2020-08-13T11:38:19 | 2020-08-13T11:38:19 | 248,463,967 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,451 |
py
|
"""IFR 1200 serial commands and tests
"""
# Global imports
import time
import serial
# Local imports
import src.lib.helper
from src.lib.helper import debug
# ============================================================================
class IFR1200Io:
"""Class representing the serial Interface to IFR 1200"""
# -------------------------------------------------------------------
def __init__(self):
"""Intialize class IfrInterface"""
self.comport = 'COM1:'
self.baudrate = 9600
self.serialport = None
debug('Intializing serial port')
try:
self.serialport = serial.Serial(self.comport, baudrate=self.baudrate, timeout=0.2)
debug('serial port is opened')
except Exception as e:
debug(f"EXCEPTION: {e}")
self.serialport = None
return
self.send_lf()
ret = self.ask_uok()
if ret == b'%':
debug(f"IFR1200Io is initialized")
else:
print('Error in initializing class IFR1200Io')
# -------------------------------------------------------------------
def send_command(self, cmd) -> bytes:
"""Send the given command to the IFR 1200 and wait for the response
:param cmd: The string with the command to send
:returns: The response (if any)
"""
if not self.serialport:
raise serial.SerialException
bytes_waiting = self.serialport.inWaiting()
if bytes_waiting:
debug(f"UNEXPECTED: Still {bytes_waiting} bytes waiting in serial port before sending command {cmd}")
s = self.serialport.read(bytes_waiting)
debug(f"RESPONSE1 = {s}")
if isinstance(cmd, bytes):
pass
elif isinstance(cmd, str):
cmd = cmd.encode('utf-8')
debug(f"Command: {cmd}")
self.serialport.write(cmd)
self.serialport.flush()
time.sleep(0.2)
# Only get response in the following cases:
if b"?" in cmd or b'DUMP' in cmd or b'MTR1' in cmd or b'MTR2' in cmd or b'DTME' in cmd:
ret = self.get_response(cmd)
return ret
bytes_waiting = self.serialport.inWaiting()
if bytes_waiting:
debug(f"UNEXPECTED: Still {bytes_waiting} bytes waiting in serial port after sending cmd {cmd}")
s = self.serialport.read(bytes_waiting)
debug(f"RESPONSE2 = {s}")
return s
# -------------------------------------------------------------------
def get_response(self, cmd='') -> bytes:
""" Wait for a response from the IFR.
:param cmd: The command to send
:returns: The stripped response
"""
if not self.serialport:
raise serial.SerialException
# Determine the number of waiting, incoming bytes
# debug(f"waiting for response for command [{cmd}]")
time.sleep(0.2) # Wait somewhat more time.
bytes_waiting = self.serialport.inWaiting()
# debug(f'bytes waiting = {bytes_waiting}')
# Read the bytes, and append them to the return string
s = self.serialport.read(bytes_waiting)
debug(f"Response: {s}")
if s.startswith(b"**"):
print(f"ERROR: INVALID COMMAND [{cmd.decode().strip()}]")
return s.strip()
# -------------------------------------------------------------------
def send_lf(self) -> str:
"""Send a single linefeed
The 1200 should say on its display: RS-232 ENABLED
:return: response of this command
"""
cmd = b"\n\n"
ret = self.send_command(cmd)
return ret
# -------------------------------------------------------------------
def ask_uok(self):
""" Check if the IFR can be reached and responds
:returns: The response, which should be b'%\r\n'
"""
cmd = b"UOK?\n"
response = self.send_command(cmd)
return response
# -------------------------------------------------------------------
def close(self):
"""Close the serial port"""
self.serialport.close()
# -------------------------------------------------------------------
if __name__ == "__main__":
"""The main of this module will perform some tests"""
# src.lib.helper.clear_debug_window()
debug(f"Testing {__file__}")
io = IFR1200Io()
io.close()
|
[
"[email protected]"
] | |
1ec433659be952df3dbd83f186482baaffd5b2fe
|
6f5b206e416ba8a16d3a559ad9e9329fbaed672b
|
/pinballbase/fpsmeter.py
|
7c2adad96d5e64ebef736247099a6b62aff88bd4
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
OpenDisneyGames/PiratesPinball
|
66a51935db81f4698d35e55f97ffddbb2a98cb8a
|
411728429083e2f36a691b8db7966f91a1ea6a1f
|
refs/heads/master
| 2022-10-06T00:58:58.861183 | 2020-06-08T07:34:38 | 2020-06-08T07:34:38 | 270,551,727 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,651 |
py
|
import direct.directbase.DirectStart
from direct.showbase.DirectObject import DirectObject
from direct.gui.DirectGui import *
import direct.task.Task, time
from functools import reduce
class FPSMeter(DirectObject):
__module__ = __name__
def __init__(self, displayFPS=True, numSamples=20):
self.displayFPS = displayFPS
self.numSamples = numSamples
self.fps = 0.0
self.samples = []
if displayFPS:
self.initLabel()
def initLabel(self):
self.fpsLabel = DirectLabel(relief=None, pos=(-0.95, 0, 0.9), text='0.0 fps', color=Vec4(0, 0, 1, 1), text_scale=0.1)
self.fpsLabel.hide()
return
def enable(self):
self.disable()
self.samples = []
self.lastTime = time.time()
if self.displayFPS:
self.fpsLabel.show()
task = Task.Task(self.fpsTask)
taskMgr.add(task, 'fpsTask')
def disable(self):
if self.displayFPS:
self.fpsLabel.hide()
taskMgr.remove('fpsTask')
def fpsTask(self, task):
self.updateFPS()
if self.displayFPS:
self.updateDisplay()
return Task.cont
def updateFPS(self):
t = time.time()
dt = t - self.lastTime
self.samples.append(dt)
if len(self.samples) > self.numSamples:
self.samples.pop(0)
self.lastTime = t
denom = reduce(lambda x, y: x + y, self.samples)
if denom != 0:
self.fps = len(self.samples) / denom
else:
self.fps = 100.0
def updateDisplay(self):
self.fpsLabel['text'] = '% 3.1f fps' % self.fps
|
[
"[email protected]"
] | |
80dc412129683b4b0477fa6e4975e286fb8875a3
|
15703d435938a852e0ca586840e412c1e54740ce
|
/logistica/models.py
|
87df9929625f0d7d99bc2992bda31aa81373b9b5
|
[] |
no_license
|
fcaapps/dashcase
|
718f0b18545e27b302abfb41e0ce49ca77b185bf
|
e6d6e498783439ad78f666c7658b761ed30127e0
|
refs/heads/master
| 2022-12-10T09:59:23.683301 | 2018-10-16T13:10:41 | 2018-10-16T13:10:41 | 150,775,427 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 336 |
py
|
from django.db import models
class PermissoesLogistica(models.Model):
class Meta:
managed = False # No database table creation or deletion operations \
# will be performed for this model.
permissions = (
('logistica_permissoes', 'Permissão Global de Logistica'),
)
|
[
"[email protected]"
] | |
5b451f8fca2b8cdfbc03a067f3d1368e8042e36c
|
056a1050e0e0bf78eeedaf978841a61e5203936a
|
/Python/Flask_Blog/05-Package-Structure/flaskblog/models.py
|
8dd3af1c5fdbfc8535f369b1cab4ddc0456a738a
|
[
"MIT"
] |
permissive
|
DSNR/snippets
|
cc3990f0ac467e19754f0a76598809eddede95fd
|
12006dd083be60c6444d8b5ca48fd917005e081b
|
refs/heads/master
| 2023-08-23T18:55:34.038472 | 2021-10-11T02:35:28 | 2021-10-11T02:35:28 | 415,750,242 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,017 |
py
|
from datetime import datetime
from flaskblog import db
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
image_file = db.Column(db.String(20), nullable=False, default='default.jpg')
password = db.Column(db.String(60), nullable=False)
posts = db.relationship('Post', backref='author', lazy=True)
def __repr__(self):
return f"User('{self.username}', '{self.email}', '{self.image_file}')"
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False)
date_posted = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
content = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"Post('{self.title}', '{self.date_posted}')"
|
[
"[email protected]"
] | |
b83dd742d29f7987ba26646427a8b0001749c6d4
|
90974492bf7b78969e976ba97f6a51510fb84fe8
|
/qlearning-3-qtable.py
|
4a1ce78aabee227a05b56f16bd170a2bacdee8b5
|
[] |
no_license
|
nexthybrid/Python-Q-Learning
|
82c719d475ad871fd6ed5648c7abddbea1fcc4dd
|
23d2392dbb379d2981b1845679f4cf7dacbe90ad
|
refs/heads/main
| 2023-01-01T22:36:43.105496 | 2020-10-08T02:21:07 | 2020-10-08T02:21:07 | 301,920,508 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 983 |
py
|
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
style.use('ggplot')
def get_q_color(value, vals):
if value == max(vals):
return "green", 1.0
else:
return "red", 0.3
fig = plt.figure(figsize=(12, 9))
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312)
ax3 = fig.add_subplot(313)
i = 24900
q_table = np.load(f"qtables/{i}-qtable.npy")
for x, x_vals in enumerate(q_table):
for y, y_vals in enumerate(x_vals):
ax1.scatter(x, y, c=get_q_color(y_vals[0], y_vals)[0], marker="o", alpha=get_q_color(y_vals[0], y_vals)[1])
ax2.scatter(x, y, c=get_q_color(y_vals[1], y_vals)[0], marker="o", alpha=get_q_color(y_vals[1], y_vals)[1])
ax3.scatter(x, y, c=get_q_color(y_vals[2], y_vals)[0], marker="o", alpha=get_q_color(y_vals[2], y_vals)[1])
ax1.set_ylabel("Action 0")
ax2.set_ylabel("Action 1")
ax3.set_ylabel("Action 2")
plt.show()
|
[
"[email protected]"
] | |
68c69662eccb7e28430427119124efb2630482b3
|
04ab24e9e7742895771de803959d1167e416a57e
|
/app/main/services/category_service.py
|
4f566ef30aca6a112baede20ead7f70131098cf4
|
[] |
no_license
|
TTomas78/Taller-Python
|
2ee35c4a72d59b5e1b83a454d071ed40b547371d
|
2e9d72b9621c022cad916d6494528daee81182d2
|
refs/heads/master
| 2022-10-10T03:13:03.162248 | 2019-11-27T22:05:53 | 2019-11-27T22:05:53 | 211,518,544 | 1 | 0 | null | 2022-09-16T18:10:58 | 2019-09-28T15:09:18 |
Python
|
UTF-8
|
Python
| false | false | 180 |
py
|
from app.main.repositories.category_repository import CategoryRepository
class CategoryService():
@staticmethod
def get_all():
return CategoryRepository.get_all()
|
[
"[email protected]"
] | |
ca6d981f70f9f5e2d0d59bf1cec839c78115a1ef
|
a1bc22600af8889ea1b96b102a021a4a360654d5
|
/restapi/libs/ConnectionManager.py
|
d0eddd35935d2ac3a2b6e9da225c5336c50530e1
|
[] |
no_license
|
IndominusByte/hydro-tech-backend
|
940e32f3d4981ec92f78c7efb2f11add0fa17bf5
|
ac1ae3f05eb0b5a2c9da80560e7ee5e66e52848f
|
refs/heads/main
| 2023-05-04T14:11:12.267438 | 2021-05-30T01:08:30 | 2021-05-30T01:08:30 | 371,831,757 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,048 |
py
|
import logging, json
from fastapi import WebSocket
from fastapi_jwt_auth import AuthJWT
from fastapi_jwt_auth.exceptions import MissingTokenError
from controllers.ChatController import ChatCrud, ChatFetch
from controllers.UserController import UserFetch
from controllers.ReportController import ReportCrud
from controllers.SettingUserController import SettingUserFetch
from controllers.AlertController import AlertCrud, AlertLogic
from schemas.chats.ChatSchema import ChatData
from schemas.reports.ReportSchema import ReportCreate
from schemas.dashboard.DashboardSchema import (
DashboardSetValueServo,
DashboardSetValueHydro,
DashboardHydroData,
)
from pydantic import ValidationError
from user_agents import parse
from typing import List, Union
from config import settings
from redis import Redis
logger = logging.getLogger("uvicorn.info")
class ConnectionManager:
async def connect(
self,
websocket: WebSocket,
authorize: AuthJWT,
csrf_token: str = None,
token: str = None
):
await websocket.accept()
# user authentication
if csrf_token:
authorize.jwt_required("websocket",websocket=websocket,csrf_token=csrf_token) # check user login
decode_token = authorize.get_raw_jwt()
elif token:
authorize.jwt_required("websocket",token=token) # check IoT device login
decode_token = authorize.get_raw_jwt(token)
else:
raise MissingTokenError(status_code=1008,message="Missing access token from Query or Path")
# set state to websocket
user_agent = websocket.headers.get('user-agent')
if user_agent != 'arduino-WebSocket-Client':
websocket.state.type = "user"
websocket.state.device = str(parse(user_agent))
else:
websocket.state.type = "IoT"
websocket.state.device = user_agent
websocket.state.ip = websocket.client.host
websocket.state.user_id = decode_token['sub']
# remove all duplicate connection
for connection in self.active_connections:
if self.check_duplicate_connection(connection,websocket) is True:
await self.disconnect(connection,'duplicate')
self.active_connections.append(websocket)
def check_duplicate_connection(self, connection: WebSocket, websocket: WebSocket) -> bool:
return connection.state.type == websocket.state.type and \
connection.state.device == websocket.state.device and \
connection.state.ip == websocket.state.ip and \
connection.state.user_id == websocket.state.user_id
async def send_data(self, kind: str, connection: WebSocket, data: Union[str, bytes]) -> None:
try:
if kind == 'text': await connection.send_text(data)
if kind == 'bytes': await connection.send_bytes(data)
except RuntimeError:
await self.disconnect(connection,'invalid_data')
async def disconnect(self, websocket: WebSocket, msg: str):
try:
logger.info(f'{tuple(websocket.client)} - "WebSocket {websocket.url.path}" [disconnect-{msg}]')
self.active_connections.remove(websocket)
await websocket.close()
except ValueError:
pass
class ConnectionDashboard(ConnectionManager):
def __init__(self):
self.active_connections: List[WebSocket] = []
def set_type_device(self, kind: str, websocket: WebSocket) -> None:
if kind == "Hydro": websocket.state.type = "IoT:Hydro"
if kind == "Servo": websocket.state.type = "IoT:Servo"
if kind == "Camera": websocket.state.type = "IoT:Camera"
async def broadcast(self, msg_data: str, websocket: WebSocket, redis_conn: Redis) -> None:
try:
msg_data = dict(i.split(':') for i in msg_data.rstrip().split(','))
web_data = ",".join([":".join([key, str(val)]) for key, val in msg_data.items()])
# set type device IoT when make connection
self.set_type_device(msg_data['kind'], websocket)
# save data from hydro to db
if msg_data['kind'] == 'Hydro':
try:
m = ReportCreate(**msg_data)
user_id = int(websocket.state.user_id)
setting_user = await SettingUserFetch.filter_by_user_id(user_id) # get setting user
if setting_user is not None and setting_user['planted'] is True:
# create alert
if m.tank <= 50:
msg = f"Water remaining {m.tank}%"
if await AlertLogic.check_alert(user_id,'water_tank',msg) is False:
await AlertCrud.create_alert(**{
'type': 'water_tank',
'message': msg,
'user_id': user_id
})
if m.temp <= 15 or m.temp >= 30:
msg = "Oh no, your water temperature is abnormal," + \
f" water temperature right now {m.temp}°C"
if await AlertLogic.check_alert(user_id,'water_temp',msg) is False:
await AlertCrud.create_alert(**{
'type': 'water_temp',
'message': msg,
'user_id': user_id
})
# create report
await ReportCrud.create_report(**m.dict(),user_id=user_id)
except ValidationError:
await self.disconnect(websocket,'validation')
for connection in self.active_connections:
# send data web to camera for capture image & streaming
if (
msg_data['kind'] in ['live_cam_true', 'live_cam_false'] and
connection.state.type == "IoT:Camera" and
connection.state.user_id == websocket.state.user_id
):
await self.send_data('text', connection, web_data)
# send data web to camera for image calibration
if (
msg_data['kind'] in ['image_cal_true','image_cal_false'] and
connection.state.type == "IoT:Camera" and
connection.state.user_id == websocket.state.user_id
):
if msg_data['kind'] == 'image_cal_true':
redis_conn.set(f"camera_cal:{connection.state.user_id}","true",settings.image_cal_expires)
await self.send_data('text', connection, 'kind:capture_image')
if msg_data['kind'] == 'image_cal_false':
redis_conn.set(f"camera_cal:{connection.state.user_id}","false",settings.image_cal_expires)
# send data setting servo to Servo IoT
if (
msg_data['kind'] == 'set_value_servo' and
connection.state.type == "IoT:Servo" and
connection.state.user_id == websocket.state.user_id
):
# validation incoming data from user
try:
DashboardSetValueServo(**msg_data)
await self.send_data('text', connection, web_data)
except ValidationError:
await self.disconnect(websocket,'validation')
# send data hydro to user
if (
msg_data['kind'] == 'Hydro' and
connection.state.type == "user" and
connection.state.user_id == websocket.state.user_id
):
# validation incoming data from user
try:
DashboardHydroData(**msg_data)
await self.send_data('text', connection, web_data)
except ValidationError:
await self.disconnect(websocket,'validation')
# send data setting user to Hydro IoT
if (
msg_data['kind'] == 'set_hydro' and
connection.state.type == "IoT:Hydro" and
connection.state.user_id == websocket.state.user_id
):
# validation incoming data from user
try:
DashboardSetValueHydro(**msg_data)
await self.send_data('text', connection, web_data)
except ValidationError:
await self.disconnect(websocket,'validation')
except ValueError:
pass
async def streaming(self, stream: bytes, websocket: WebSocket) -> None:
# send data streaming to user and not device IoT
for connection in self.active_connections:
if (
connection.state.type == "user" and
connection.state.user_id == websocket.state.user_id
):
await self.send_data('bytes', connection, stream)
async def reset_servo(self) -> None:
for connection in self.active_connections:
if connection.state.type == "IoT:Servo":
user_id = int(connection.state.user_id)
setting_user = await SettingUserFetch.filter_by_user_id(user_id)
await self.send_data(
'text',
connection,
f"kind:set_value_servo,sh:{setting_user['servo_horizontal']},sv:{setting_user['servo_vertical']}"
)
async def capture_image(self) -> None:
for connection in self.active_connections:
if connection.state.type == "IoT:Camera":
await self.send_data('text', connection, 'kind:capture_image')
class ConnectionChat(ConnectionManager):
def __init__(self):
self.active_connections: List[WebSocket] = []
async def connect(
self,
websocket: WebSocket,
authorize: AuthJWT,
csrf_token: str = None,
):
await ConnectionManager.connect(self,websocket,authorize,csrf_token=csrf_token)
await self.list_user_status()
async def broadcast(self, msg_data: str, websocket: WebSocket) -> None:
if msg_data != "kind:get_list_user_status":
# save message to database
chat_id = await ChatCrud.create_chat(message=msg_data,user_id=int(websocket.state.user_id))
chat_db = await ChatFetch.filter_by_id(chat_id)
msg = ChatData(**{index:value for index,value in chat_db.items()}).dict()
# send message to all user
[
await self.send_data('text', connection, json.dumps(msg,default=str))
for connection in self.active_connections
]
else: await self.list_user_status()
async def list_user_status(self):
user_all = await UserFetch.get_user_id()
user_connection = [int(connection.state.user_id) for connection in self.active_connections]
online_user = [str(x) for x in user_all if x in user_connection]
offline_user = [str(x) for x in user_all if x not in user_connection]
total_online = len(online_user)
total_offline = len(offline_user)
msg = {
'total_online': total_online,
'total_offline': total_offline,
'online_user': online_user,
'offline_user': offline_user
}
[
await self.send_data('text', connection, json.dumps(msg,default=str))
for connection in self.active_connections
]
async def disconnect(self, websocket: WebSocket, msg: str):
await ConnectionManager.disconnect(self,websocket,msg)
await self.list_user_status()
|
[
"[email protected]"
] | |
4ef1b4ad59a6038701ac54ce8f91f3c6bd2f7011
|
2e57fa141bd7fe4903ac5b31367e27686d343806
|
/code/run_pretrain_new.py
|
885233c44851b525d3eb589749f6c0d5ca45b472
|
[
"MIT"
] |
permissive
|
nikit91/ERNIE
|
8b9e9efcaa786dc69ce1a931173bef3890f2f942
|
a40e498e5b0adbfdce39a478737418199f773d96
|
refs/heads/master
| 2023-03-03T09:53:04.332667 | 2021-02-13T14:07:26 | 2021-02-13T14:07:26 | 311,699,328 | 0 | 0 |
MIT
| 2023-05-11T01:48:12 | 2020-11-10T15:21:19 |
Python
|
UTF-8
|
Python
| false | false | 21,283 |
py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import logging
import argparse
import random
from tqdm import tqdm, trange
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from knowledge_bert.tokenization import BertTokenizer
from knowledge_bert.modeling import BertForPreTraining
from knowledge_bert.optimization import BertAdam
from knowledge_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x/warmup
return 1.0
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
default=False,
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
default=False,
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_lower_case",
default=False,
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
default=False,
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
default=False,
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument('--init_fs_path',
type=str, default="./",
help="File system path for distributed training with init_method using shared file system \n"
"./ (default value): current directory\n")
parser.add_argument('--nodes_count',
type=int, default=1,
help="Number of nodes to determine the world size for distributed training \n"
"1 (default value): count of nodes\n")
args = parser.parse_args()
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = 1
torch.distributed.init_process_group(backend='gloo',
init_method='file://'+args.init_fs_path,
rank=args.local_rank,
world_size=args.nodes_count)
# torch.cuda.set_device(args.local_rank)
# device = torch.device("cuda", args.local_rank)
# n_gpu = 1
# # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
# torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
# random.seed(args.seed)
# np.random.seed(args.seed)
# torch.manual_seed(args.seed)
# keeping same seed value
torch.manual_seed(100)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
os.makedirs(args.output_dir, exist_ok=True)
task_name = args.task_name.lower()
vecs = []
vecs.append([0]*100) # CLS
with open("kg_embed/entity2vec.vec", 'r') as fin:
for line in fin:
vec = line.strip().split('\t')
vec = [float(x) for x in vec]
vecs.append(vec)
embed = torch.FloatTensor(vecs)
embed = torch.nn.Embedding.from_pretrained(embed)
#embed = torch.nn.Embedding(5041175, 100)
logger.info("Shape of entity embedding: "+str(embed.weight.size()))
del vecs
train_data = None
num_train_steps = None
if args.do_train:
# TODO
import indexed_dataset
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler,BatchSampler
import iterators
#train_data = indexed_dataset.IndexedCachedDataset(args.data_dir)
train_data = indexed_dataset.IndexedDataset(args.data_dir, fix_lua_indexing=True)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_sampler = BatchSampler(train_sampler, args.train_batch_size, True)
def collate_fn(x):
x = torch.LongTensor([xx for xx in x])
entity_idx = x[:, 4*args.max_seq_length:5*args.max_seq_length]
# Build candidate
uniq_idx = np.unique(entity_idx.numpy())
ent_candidate = embed(torch.LongTensor(uniq_idx+1))
ent_candidate = ent_candidate.repeat([n_gpu, 1])
# build entity labels
d = {}
dd = []
for i, idx in enumerate(uniq_idx):
d[idx] = i
dd.append(idx)
ent_size = len(uniq_idx)-1
def map(x):
if x == -1:
return -1
else:
rnd = random.uniform(0, 1)
if rnd < 0.05:
return dd[random.randint(1, ent_size)]
elif rnd < 0.2:
return -1
else:
return x
ent_labels = entity_idx.clone()
d[-1] = -1
ent_labels = ent_labels.apply_(lambda x: d[x])
entity_idx.apply_(map)
ent_emb = embed(entity_idx+1)
mask = entity_idx.clone()
mask.apply_(lambda x: 0 if x == -1 else 1)
mask[:,0] = 1
return x[:,:args.max_seq_length], x[:,args.max_seq_length:2*args.max_seq_length], x[:,2*args.max_seq_length:3*args.max_seq_length], x[:,3*args.max_seq_length:4*args.max_seq_length], ent_emb, mask, x[:,6*args.max_seq_length:], ent_candidate, ent_labels
train_iterator = iterators.EpochBatchIterator(train_data, collate_fn, train_sampler)
num_train_steps = int(
len(train_data) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)
# Prepare model
model, missing_keys = BertForPreTraining.from_pretrained(args.bert_model,
cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank))
#if args.fp16:
# model.half()
model.to(device)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_linear = ['layer.2.output.dense_ent', 'layer.2.intermediate.dense_1', 'bert.encoder.layer.2.intermediate.dense_1_ent', 'layer.2.output.LayerNorm_ent']
no_linear = [x.replace('2', '11') for x in no_linear]
param_optimizer = [(n, p) for n, p in param_optimizer if not any(nl in n for nl in no_linear)]
#param_optimizer = [(n, p) for n, p in param_optimizer if not any(nl in n for nl in missing_keys)]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight', 'LayerNorm_ent.bias', 'LayerNorm_ent.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
t_total = num_train_steps
if args.local_rank != -1:
t_total = t_total // torch.distributed.get_world_size()
if args.fp16:
try:
#from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False)
#max_grad_norm=1.0)
if args.loss_scale == 0:
model, optimizer = amp.initialize(model, optimizer, opt_level="O2")
# optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale=args.loss_scale)
# optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
#logger.info(dir(optimizer))
#op_path = os.path.join(args.bert_model, "pytorch_op.bin")
#optimizer.load_state_dict(torch.load(op_path))
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=t_total)
if args.local_rank != -1:
try:
#from apex.parallel import DistributedDataParallel as DDP
from torch.nn.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model, find_unused_parameters=True)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
global_step = 0
if args.do_train:
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_data))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_steps)
model.train()
import datetime
fout = open(os.path.join(args.output_dir, "loss.{}".format(datetime.datetime.now())), 'w')
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_iterator.next_epoch_itr(), desc="Iteration")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, masked_lm_labels, input_ent, ent_mask, next_sentence_label, ent_candidate, ent_labels = batch
# if args.fp16:
# loss, original_loss = model(input_ids, segment_ids, input_mask, masked_lm_labels, input_ent.half(), ent_mask, next_sentence_label, ent_candidate.half(), ent_labels)
# else:
loss, original_loss = model(input_ids, segment_ids, input_mask, masked_lm_labels, input_ent, ent_mask, next_sentence_label, ent_candidate, ent_labels)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
original_loss = original_loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
fout.write("{} {}\n".format(loss.item()*args.gradient_accumulation_steps, original_loss.item()))
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
# modify learning rate with special warm up BERT uses
lr_this_step = args.learning_rate * warmup_linear(global_step/t_total, args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
#if global_step % 1000 == 0:
# model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
# output_model_file = os.path.join(args.output_dir, "pytorch_model.bin_{}".format(global_step))
# torch.save(model_to_save.state_dict(), output_model_file)
fout.close()
# Save a trained model
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
# Save the optimizer
#output_optimizer_file = os.path.join(args.output_dir, "pytorch_op.bin")
#torch.save(optimizer.state_dict(), output_optimizer_file)
# Load a trained model that you have fine-tuned
# model_state_dict = torch.load(output_model_file)
# model = BertForSequenceClassification.from_pretrained(args.bert_model, state_dict=model_state_dict)
# model.to(device)
# if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# eval_examples = processor.get_dev_examples(args.data_dir)
# eval_features = convert_examples_to_features(
# eval_examples, label_list, args.max_seq_length, tokenizer)
# logger.info("***** Running evaluation *****")
# logger.info(" Num examples = %d", len(eval_examples))
# logger.info(" Batch size = %d", args.eval_batch_size)
# all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
# all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
# all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
# all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
# eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# # Run prediction for full data
# eval_sampler = SequentialSampler(eval_data)
# eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
# model.eval()
# eval_loss, eval_accuracy = 0, 0
# nb_eval_steps, nb_eval_examples = 0, 0
# for input_ids, input_mask, segment_ids, label_ids in eval_dataloader:
# input_ids = input_ids.to(device)
# input_mask = input_mask.to(device)
# segment_ids = segment_ids.to(device)
# label_ids = label_ids.to(device)
# with torch.no_grad():
# tmp_eval_loss = model(input_ids, segment_ids, input_mask, label_ids)
# logits = model(input_ids, segment_ids, input_mask)
# logits = logits.detach().cpu().numpy()
# label_ids = label_ids.to('cpu').numpy()
# tmp_eval_accuracy = accuracy(logits, label_ids)
# eval_loss += tmp_eval_loss.mean().item()
# eval_accuracy += tmp_eval_accuracy
# nb_eval_examples += input_ids.size(0)
# nb_eval_steps += 1
# eval_loss = eval_loss / nb_eval_steps
# eval_accuracy = eval_accuracy / nb_eval_examples
# result = {'eval_loss': eval_loss,
# 'eval_accuracy': eval_accuracy,
# 'global_step': global_step,
# 'loss': tr_loss/nb_tr_steps}
# output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
# with open(output_eval_file, "w") as writer:
# logger.info("***** Eval results *****")
# for key in sorted(result.keys()):
# logger.info(" %s = %s", key, str(result[key]))
# writer.write("%s = %s\n" % (key, str(result[key])))
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
f5ee8b5d8dd3b3a4a43097ae43519dc2bac3abce
|
45e56bbdf63bdefad218fe00de5441f06a5b6d30
|
/tests/test_models.py
|
b16bf4205e0ce2bfa5f917cfc1f3e6821b037acd
|
[] |
no_license
|
ssciolist/quantified-self-django
|
25768e7ebbbb4380f2303fdc627a3a9d33af930b
|
81cca994c1e30223b9fb925bc6db04b0bcbf9bd6
|
refs/heads/master
| 2020-03-22T10:21:40.125705 | 2018-07-12T13:20:17 | 2018-07-12T13:20:17 | 139,762,170 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,282 |
py
|
from django.test import TestCase
from qs_app.models import Food, Meal
class FoodTestClass(TestCase):
def setUp(self):
Food.objects.create(name="Croissant", calories='400')
Food.objects.create(name="Salad", calories='300')
def test_food_attrs(self):
buttery_goodness = Food.objects.get(name="Croissant")
salad = Food.objects.get(name="Salad")
self.assertEqual(buttery_goodness.calories, 400)
self.assertEqual(salad.calories, 300)
class MealTestClass(TestCase):
def setUp(self):
Food.objects.create(name="Croissant", calories='400')
Food.objects.create(name="Salad", calories='300')
def test_meal_food_relation(self):
roll = Food.objects.get(name="Croissant")
salad = Food.objects.get(name="Salad")
breakfast = Meal.objects.get(name="Breakfast")
lunch = Meal.objects.get(name="Lunch")
breakfast.foods.add(roll)
lunch.foods.add(roll, salad)
self.assertEqual(breakfast.foods.count(), 1)
self.assertEqual(lunch.foods.count(), 2)
self.assertQuerysetEqual(breakfast.foods.all(), [f'{roll}'], transform=str)
self.assertQuerysetEqual(lunch.foods.all().order_by('id'), [f'{roll}', f'{salad}'], transform=str, ordered=True)
|
[
"[email protected]"
] | |
0b2a0b151096fb26a50ff2bcd2397014f5308370
|
5aa33c8438645d696c49c1a39d031f3f5eec0917
|
/node_modules/react-scripts/node_modules/chokidar/node_modules/fsevents/build/config.gypi
|
35186ae6b05180060f69027c6d809a3434770e23
|
[
"MIT"
] |
permissive
|
alorr10/catch-of-the-day
|
0413087e3bee7d92399ab3100728d48159058e9d
|
575be4f7f20781bbbf8885a030e0580e60075967
|
refs/heads/master
| 2021-07-22T08:45:43.898457 | 2017-10-31T23:19:25 | 2017-10-31T23:19:25 | 108,476,163 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,110 |
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"debug_http2": "false",
"debug_nghttp2": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_gyp_path": "tools/icu/icu-system.gyp",
"icu_small": "false",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "false",
"node_module_version": 57,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local/Cellar/node/8.4.0",
"node_release_urlbase": "",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "57.dylib",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_snapshot": "false",
"want_separate_host_toolset": 0,
"want_separate_host_toolset_mkpeephole": 0,
"xcode_version": "8.1",
"nodedir": "/Users/aleclorraine/.node-gyp/8.4.0",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"fallback_to_build": "true",
"module": "/Users/aleclorraine/Engineering/React-For-Beginners-Starter-Files/catch-of-the-day/node_modules/react-scripts/node_modules/chokidar/node_modules/fsevents/lib/binding/Release/node-v57-darwin-x64/fse.node",
"module_name": "fse",
"module_path": "/Users/aleclorraine/Engineering/React-For-Beginners-Starter-Files/catch-of-the-day/node_modules/react-scripts/node_modules/chokidar/node_modules/fsevents/lib/binding/Release/node-v57-darwin-x64",
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/zsh",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"cache_lock_retries": "10",
"global_style": "",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/aleclorraine/.npm-init.js",
"userconfig": "/Users/aleclorraine/.npmrc",
"node_version": "8.4.0",
"user": "501",
"auth_type": "legacy",
"editor": "vi",
"save": "",
"tag": "latest",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "Infinity",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/Users/aleclorraine/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "npm/4.6.1 node/v8.4.0 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/sw/50kvs6217hvdgxx7c93wvc7m0000gn/T",
"unsafe_perm": "true",
"prefix": "/usr/local",
"link": ""
}
}
|
[
"[email protected]"
] | |
9e195b586132269338308ac6751fc54a8bb6c43f
|
e2b137653587d55ff954e59649ae40f7b344a15f
|
/csvjoin/csvjoin.py
|
8ddd5af3da7310a0547a95d5a43ff9604a5407ae
|
[
"MIT"
] |
permissive
|
lorcan/CSVJoin
|
f9d7050f1a97d2ad019aef9267b6989b60755fa7
|
b547dca4c7bd365896856a33d590645c8f472735
|
refs/heads/master
| 2016-09-05T12:50:07.595971 | 2014-01-01T10:53:17 | 2014-01-01T10:53:17 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,386 |
py
|
"""
csvjoin.py
Copyright (c) 2013 Lorcan Coyle, http://lorcancoyle.org
License: MIT License
Documentation: https://github.com/lorcan/CSVJoin
"""
import argparse
import csv
import redis
import sys
import ast
import copy
parser = argparse.ArgumentParser(description='Takes two CSV files and attempts to join them based on the key values. The headers from the first file will be retained unchanged but those from the second will be prefixed with the specified prefix value')
parser.add_argument("firstfile", help="This is the first CSV file.")
parser.add_argument("secondfile", help="This is the second CSV file.")
parser.add_argument("firstkey", help="This is the name of the header from the first file used for the join")
parser.add_argument("secondkey", help="This is the name of the header from the second file used for the join")
parser.add_argument("--keepsecondkey", help="Retain the second key rather than the first.", action="store_true")
parser.add_argument("--rightjoin", help="Retains the CSV from the first join and fills with blanks if there is no join present in the second file", action="store_true")
parser.add_argument("--firstprefix", default="", help="This is prefix to be used for headers in the first file.")
parser.add_argument("--secondprefix", default="", help="This is prefix to be used for headers in the second file.")
parser.add_argument("outputfile", help="This is the output file, where the joined file will be stored.")
args = parser.parse_args()
r = redis.StrictRedis(host='localhost', port=6379, db=0)
# Defensive Flush
r.flushdb()
outputheader = []
with open(args.firstfile, 'r') as csvfile:
reader = csv.reader(csvfile)
first = True
joinColumnNumber = -1
for row in reader:
if first:
first = False
firstheader = row
if args.firstkey not in firstheader:
print "There is no column called " + args.firstkey + " in the first files's header " + str(firstheader) + ". Unable to join. Exiting."
sys.exit()
joinColumnNumber = firstheader.index(args.firstkey)
for h in firstheader:
if not (args.keepsecondkey and h == args.firstkey):
outputheader.append(args.firstprefix + h)
else:
joinKey = row[joinColumnNumber]
firstJoin = []
for i in range(len(row)):
if not (args.keepsecondkey and i == joinColumnNumber):
firstJoin.append('' + row[i])
r.set(joinKey, str(firstJoin))
outputfile = csv.writer(open(args.outputfile, 'w'))
joinCount = 0
noJoinCount = 0
with open(args.secondfile, 'r') as csvfile:
reader = csv.reader(csvfile)
first = True
joinColumnNumber = -1
blankrow = []
for row in reader:
if first:
first = False
secondheader = row
if args.secondkey not in secondheader:
print "There is no column called " + args.secondkey + " in the second file's header " + str(secondheader) + ". Unable to join. Exiting."
sys.exit()
joinColumnNumber = secondheader.index(args.secondkey)
for h in secondheader:
if args.keepsecondkey or h != args.secondkey:
outputheader.append(args.secondprefix + h)
if(len(set(outputheader)) != len(outputheader)):
duplicates = list(set([x for x in outputheader if outputheader.count(x) > 1]))
print "There are duplicate headers " + str(duplicates) + " in the output. This won't do. Set a prefix to avoid this. Exiting."
sys.exit()
outputfile.writerow(outputheader)
for i in range(len(outputheader)):
blankrow.append("")
else:
secondFileKey = row[joinColumnNumber]
goodJoin = r.exists(secondFileKey)
if(goodJoin):
outputRow = ast.literal_eval(r.get(secondFileKey))
joinCount = joinCount + 1
else:
outputRow = copy.copy(blankrow)
noJoinCount = noJoinCount + 1
for i in range(len(row)):
if(args.keepsecondkey or i != joinColumnNumber):
outputRow.append(row[i])
if(goodJoin or args.rightjoin):
outputfile.writerow(outputRow)
# print "No " + args.firstkey + " value found in " + args.firstfile + " with " + args.secondkey + " " + str(secondFileKey)
if(args.rightjoin):
print "Joined " + str(joinCount) + " records and included " + str(noJoinCount) + " rows that could not be joined."
else:
print "Joined " + str(joinCount) + " records."
r.flushdb()
|
[
"[email protected]"
] | |
f17ce69e556c7992b8b72734be67d9a8c66a6a95
|
beab1ca3413c3397a4c5a3152b04a58585c75147
|
/algos/sorting/bubble_sort.py
|
0a542abbef1b7970fdc52fe0983e682342792879
|
[] |
no_license
|
john-a-m/snippets
|
60c3373e2ae9f4e2ea17884aac665e070e6783f8
|
90c6160220909a30a3503a4243d51d833330c49b
|
refs/heads/master
| 2021-01-21T04:25:28.507958 | 2019-08-05T16:00:15 | 2019-08-05T16:00:15 | 30,418,896 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 312 |
py
|
def bubble_sort(nums):
for passnum in range(len(nums) - 1, 0, -1):
for i in range(passnum):
if nums[i] > nums[i + 1]:
nums[i], nums[i + 1] = nums[i + 1], nums[i]
if __name__ == "__main__":
nums = [54,26,93,17,77,31,44,55,20]
bubble_sort(nums)
print nums
|
[
"[email protected]"
] | |
3d6198b0abdc87164e432fd09c0390ecba72de19
|
de1abd0ebbb817aa5f23d369e7dda360fd6f1c32
|
/chapter8/7-NltkAnalysis.py
|
486c02f2c7559694ee722504c06720e50861ed6a
|
[] |
no_license
|
CodedQuen/Web-Scraping-with-Python-
|
33aaa2e3733aa1f2b8c7a533d74f5d08ac868197
|
67f2d5f57726d5a943f5f044480e68c36076965b
|
refs/heads/master
| 2022-06-13T01:34:39.764531 | 2020-05-05T11:07:01 | 2020-05-05T11:07:01 | 261,435,932 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 486 |
py
|
from nltk import word_tokenize, sent_tokenize, pos_tag
sentences = sent_tokenize("Google is one of the best companies in the world. I constantly google myself to see what I'm up to.")
nouns = ['NN', 'NNS', 'NNP', 'NNPS']
for sentence in sentences:
if "google" in sentence.lower():
taggedWords = pos_tag(word_tokenize(sentence))
for word in taggedWords:
if word[0].lower() == "google" and word[1] in nouns:
print(sentence)
|
[
"[email protected]"
] | |
32b0d0a1e7c59df238be50af8ed751a950d96502
|
7ca55428503fc915fcffb8059d30654b625a6b26
|
/54-merge_sort.py
|
97a612c87752b25458be8094ab24483513f62345
|
[] |
no_license
|
Akshata2704/APS-2020
|
453e9eafb511e3e5fc73d939180c3402eb93134e
|
8f095ae1af9653499f1dedcdfe12b60b1ad1f65c
|
refs/heads/master
| 2020-12-21T03:10:52.043400 | 2020-05-15T18:51:48 | 2020-05-15T18:51:48 | 236,286,545 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 959 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 28 22:35:40 2020
@author: AKSHATA
"""
def merge(arr, l, m, r):
n1 = m - l + 1
n2 = r- m
L = [0] * (n1)
R = [0] * (n2)
for i in range(0 , n1):
L[i] = arr[l + i]
for j in range(0 , n2):
R[j] = arr[m + 1 + j]
i = 0
j = 0
k = l
while i < n1 and j < n2 :
if L[i] <= R[j]:
arr[k] = L[i]
i += 1
else:
arr[k] = R[j]
j += 1
k += 1
while i < n1:
arr[k] = L[i]
i += 1
k += 1
while j < n2:
arr[k] = R[j]
j += 1
k += 1
def mergeSort(arr,l,r):
if l < r:
m = (l+(r-1))//2
mergeSort(arr, l, m)
mergeSort(arr, m+1, r)
merge(arr, l, m, r)
arr = [12, 11, 13, 5, 6, 7]
n = len(arr)
print ("Given array is")
for i in range(n):
print ("%d" %arr[i]),
mergeSort(arr,0,n-1)
print ("\n\nSorted array is")
for i in range(n):
print ("%d" %arr[i]),
,
|
[
"[email protected]"
] | |
da6286df83fa7d22ced026b70d3ccc9e7a81e3c3
|
ad057daedceb42ac62c1aebca9271892d6e0d711
|
/lab5/zad1.py
|
ca076ff742d6d1d3d48f163c9b5faa814da64f40
|
[] |
no_license
|
Brouney/Vpython-university
|
5f311a02d0acdbdb51c840765888906591f97aef
|
b0ee5143c1c86f17ff651548e5360ee1fcffebd6
|
refs/heads/master
| 2022-11-13T05:46:11.764112 | 2020-07-09T11:44:53 | 2020-07-09T11:44:53 | 278,346,509 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,086 |
py
|
import numpy as np
import matplotlib.pyplot as plt
a = np.arange(1,367,1)
#print(sum(a==2))
file = open("zad1wyn.txt","w")
tab2 = []
tab3 = []
tab4 = []
for N in range(1,367):
liczba2 = 0
liczba3=0
liczba4=0
temp = np.zeros((1000,367))
for t in range(1000):
for i in range(N):
day = np.random.randint(1,367)
temp[t][day]+=1
for z in range(1000):
if sum(temp[z]>=2) > 0:
liczba2+=1
if sum(temp[z]>=3) > 0:
liczba3+=1
if sum(temp[z]>=4) > 0:
liczba4+=1
file.write(str(N)+") "+str(liczba2/1000)+"\n")
tab2.append(liczba2/1000)
tab3.append(liczba3/1000)
tab4.append(liczba4/1000)
plt.rcParams['font.size']=18
plt.rcParams['legend.fontsize']=18
plt.plot(a,tab2,'-',label = 'wieksze od 2')
#plt.savefig('myfig2.pdf',format='pdf')
#plt.show
plt.plot(a,tab3,'-',label = 'wieksze od 3')
#plt.savefig('myfig3.pdf',format='pdf')
#plt.show
plt.plot(a,tab4,'-',label = 'wieksze od 4')
plt.legend(loc='upper right')
plt.savefig('myfig.pdf',format='pdf')
plt.show
|
[
"[email protected]"
] | |
40dd55c4845e2d28839d9fcd23150afb908217b7
|
f9f416b5e4f00c190e3c63546c1442588dd5a504
|
/chapter02/Test05-for.py
|
72a664e819728738c9c42a2bbef7eee39250d650
|
[] |
no_license
|
tongyaojun/corepython-tongyao
|
1f4649fa758da20474f0604943635a363d6ab76a
|
c5869dc29a42b83435b93c7125fcac1bf8d2e6b1
|
refs/heads/master
| 2020-05-04T04:43:22.233234 | 2014-07-22T12:53:57 | 2014-07-22T12:53:57 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 60 |
py
|
#!/usr/bin/python
for rag in range(11):
print(rag)
|
[
"[email protected]"
] | |
5554458edaa0679f072cf8de87922210dcb52f07
|
13d52041d7ba53ccc1708a5931b69f8b13ebd0f6
|
/1_cohesion_and_coupling/good_exemple.py
|
4d7b397a8b8aab350d579b329a7acd62613e4268
|
[] |
no_license
|
iemsec/DesginPattern
|
0967c5cae53e228311e3003900eeafd1bccc908d
|
80b5426f1f83b646819188c7fb22c207ba1d05c7
|
refs/heads/main
| 2023-07-12T19:44:08.563756 | 2021-08-06T05:46:28 | 2021-08-06T05:46:28 | 389,579,477 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,366 |
py
|
import string
import random
class VehicleInfo:
brand: str
catalogue_price: int
electric: bool
def __init__(self, brand, electric, catalogue_price) -> None:
self.brand = brand
self.catalogue_price = catalogue_price
self.electric = electric
def compute_tax(self):
tax_percentage = 0.05
if self.electric:
tax_percentage = 0.02
return tax_percentage * self.catalogue_price
def print(self):
print(f"Brand: {self.brand}")
print(f"Payable tax: {self.compute_tax()}")
class Vehicle:
id: str
license_plate: str
info: VehicleInfo
def __init__(self, id, license_plate, info) -> None:
self.id = id
self.license_plate = license_plate
self.info = info
def print(self):
print("Registration Complete. Vehicle information:")
print(f"Id: {self.id}")
print(f"Licence plate: {self.license_plate}")
self.info.print()
class VehicleRegistry:
vehicle_info = { }
def __init__(self) -> None:
self.add_vehicle_info("Tesla Model 3", True, 60000)
self.add_vehicle_info("VW ID3", True, 35000)
self.add_vehicle_info("BMW 5", False, 45000)
def add_vehicle_info(self, brand, electric, catalogue_price):
self.vehicle_info[brand] = VehicleInfo(brand, electric, catalogue_price)
def generate_vehicle_id(self, length):
return ''.join(random.choices(string.ascii_uppercase, k=length))
def generate_vehicle_license(self, id):
return f"{id[:2]}-{''.join(random.choices(string.digits, k=2))}-{''.join(random.choices(string.ascii_uppercase, k=2))}"
def create_vehicle(self,brand):
# generate a vehicle id of length 12
vehicle_id = self.generate_vehicle_id(12)
# now generate a license plate for the vehicle
# using the first two characters of the vehicle id
license_plate = self.generate_vehicle_license(vehicle_id)
return Vehicle(vehicle_id, license_plate, self.vehicle_info[brand])
class Application:
def register_vehicle(self, brand: string):
# create a registry instance
registry = VehicleRegistry()
# Create the vehicle
vehicle = registry.create_vehicle(brand)
vehicle.print()
app = Application()
vehicle = app.register_vehicle("BMW 5")
|
[
"[email protected]"
] | |
2db8223646d69dc09a8a240df878d1dec6ea74d1
|
3979b5ea212395fc66f17c1f8a94ebc00e18d004
|
/Part2/main.py
|
4e6a21a69924803c67892b306dc7d2a5f9f622bd
|
[] |
no_license
|
Ibasquare/Nvidia-TX2-LivePanorama
|
a822d9ee7b31c92396ee138d2a4f6024aafe6180
|
81a15e378773e493b1a741a3f872397a473133c1
|
refs/heads/master
| 2022-07-26T03:04:02.137700 | 2022-06-27T13:34:49 | 2022-06-27T13:34:49 | 216,547,939 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 25,827 |
py
|
import cv2
import json
import sys
import operator
import numpy as np
from Angle import *
from Panorama import *
from JetsonCam import *
from Reader import *
from Motion_Detection import *
from Util import *
from MaskComp import *
from PersonDetection import *
PROJECTION_MATRICE = None
IMPLEMENTED_MODE = ["panorama", "matching_demo", "motion_detection", "enhanced_panorama", "personn_detection"]
FRAME_NB_BTW_PANO = 15
RESOLUTION = (1280,720)
WINDOW_WIDTH = 1280
WINDOW_HEIGHT = 720
FRAME_RATE = 25
#PERSONN_DETECTION_ALGO = "Opencv"
PERSONN_DETECTION_ALGO = "Tensorflow"
MODEL_PATH = "models/ssdlite_mobilenet_v2_coco_2018_05_09/frozen_inference_graph.pb"
def get_cam_matrix(filename):
json_data = open(filename).read()
data = json.loads(json_data)
return np.array(data["Camera Matrix"])
def video_matching_demo(cap,cam_matrix):
global FRAME_NB_BTW_PANO
global PROJECTION_MATRICE
cap2 = cap.copy()
relative_angle = [0.0, 0.0, 0.0]
if(len(cap) > 0):
frame = cap.pop()
else:
print("Error: 0 frame in the video mentionned.")
exit(-1)
while(len(cap) > 0):
prec_frame = frame
frame = cap.pop()
frame = cv2.resize(frame, RESOLUTION)
angle = get_angle(prec_frame, frame, cam_matrix, True)
relative_angle = list(map(operator.add, relative_angle,angle))
cv2.putText(frame, ("angle:" + str(relative_angle[1])), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255, 255))
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
print("You quit.")
break
elif key == ord("p"):
while(True):
key = cv2.waitKey(0) & 0xFF
if(key == ord("p")):
break
elif key == ord("r"):
cap = cap2.copy()
if(len(cap) > 0):
frame = cap.pop()
else:
print("Error: 0 frame in the video mentionned.")
exit(-1)
relative_angle = [0.0, 0.0, 0.0]
cv2.destroyAllWindows()
def video_panorama(cap,cam_matrix):
global FRAME_NB_BTW_PANO
global PROJECTION_MATRICE
init = cap
focal_length = cam_matrix[0][0]
scaling_factor = focal_length #Scaling Factor equal to focal length
PROJECTION_MATRICE = compute_projection_matrix(cam_matrix, scaling_factor, RESOLUTION)
relative_angle = [0.0, 0.0, 0.0]
panorama = None
nb_frame = FRAME_NB_BTW_PANO
last_frame_in_pano = None
trans = 0.0
if(len(cap) < 0):
print("Error: 0 frame in the video mentionned.")
exit(-1)
frame_buffer = list()
while(len(cap) > 0):
frame = cap.pop()
frame = cv2.resize(frame, RESOLUTION)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if panorama is None:
panorama = get_cylindrical(frame, PROJECTION_MATRICE)
last_frame_in_pano = frame
panorama_to_display = cv2.cvtColor(panorama.copy(), cv2.COLOR_GRAY2BGR)
cv2.rectangle(panorama_to_display,(0,0),(RESOLUTION[0],RESOLUTION[1]),(0,0,255),10)
cv2.putText(panorama_to_display, ("angle:" + str(relative_angle[1])), (0, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0, 255))
open_window("Panorama")
cv2.imshow("Panorama", panorama_to_display)
if(nb_frame > 0):
nb_frame = nb_frame - 1;
if(len(frame_buffer) > FRAME_NB_BTW_PANO - 1):
del(frame_buffer[0])
frame_buffer.append(frame)
else:
tmp = frame.copy()
prec_trans = trans
while(len(frame_buffer) > 0):
tmp_panorama, tmp_translation = get_panorama("cylindrical",panorama,tmp,last_frame_in_pano,trans,PROJECTION_MATRICE)
if tmp_translation is None:
tmp = frame_buffer.pop()
else:
angle = get_angle(last_frame_in_pano, tmp, cam_matrix)
relative_angle = list(map(operator.add, relative_angle,angle))
last_frame_in_pano = tmp
trans = tmp_translation
panorama = tmp_panorama
break
if(len(frame_buffer) < 1):
print("Error : The panorama can't be made on this Video Sequence (not enough matches could be made).")
exit(-1)
elif(len(frame_buffer) > FRAME_NB_BTW_PANO - 1):
del(frame_buffer[0])
else:
FRAME_NB_BTW_PANO = round(FRAME_NB_BTW_PANO/2 + 0.5)
print("Number of Frame between two panorama has been updated : nb_frame_btw_pano = " + str(FRAME_NB_BTW_PANO))
frame_buffer.append(frame)
nb_frame = FRAME_NB_BTW_PANO
panorama_to_display = cv2.cvtColor(panorama.copy(), cv2.COLOR_GRAY2BGR)
if(trans > 0):
cv2.rectangle(panorama_to_display,(int(trans),0),(int(trans) + RESOLUTION[0],RESOLUTION[1]),(0,0,255),10)
cv2.putText(panorama_to_display, ("angle:" + str(relative_angle[1])), (int(trans),20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0, 255))
elif(trans < 0 and trans < prec_trans):
cv2.rectangle(panorama_to_display,(0,0),(RESOLUTION[0],RESOLUTION[1]),(0,0,255),10)
cv2.putText(panorama_to_display, ("angle:" + str(relative_angle[1])), (15, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0, 255))
else:
cv2.rectangle(panorama_to_display,(int(panorama.shape[1] - (abs(trans) + RESOLUTION[0])),0),(int(panorama.shape[1] - (abs(trans) + RESOLUTION[0]) + RESOLUTION[0]),RESOLUTION[1]),(0,0,255),10)
cv2.putText(panorama_to_display, ("angle:" + str(relative_angle[1])), ((int(panorama.shape[1] - (abs(trans) + RESOLUTION[0]))), 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0, 255))
open_window("Panorama")
cv2.imshow("Panorama", panorama_to_display)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
print("You quit.")
break
elif key == ord("p"):
while(True):
key = cv2.waitKey(0) & 0xFF
if(key == ord("p")):
break
elif key == ord("r"):
frame = None
relative_angle = [0.0, 0.0, 0.0]
panorama = None
nb_frame = FRAME_NB_BTW_PANO
cap = init
if panorama is not None:
panorama = cv2.cvtColor(panorama, cv2.COLOR_GRAY2BGR)
ret = cv2.imwrite(("Panorama.jpg") ,autocrop(panorama))
if ret is False:
print("Error: Fail to save the Panorama.")
else:
print("Panorama Saved")
else:
print("Error: The panorama has not been computed.")
cv2.destroyAllWindows()
def video_motion_detection_demo(cap,cam_matrix):
global FRAME_NB_BTW_PANO
global PROJECTION_MATRICE
cap2 = cap.copy()
relative_angle = [0.0, 0.0, 0.0]
focal_length = cam_matrix[0][0]
scaling_factor = focal_length #Scaling Factor equal to focal length
PROJECTION_MATRICE = compute_projection_matrix(cam_matrix, scaling_factor, RESOLUTION)
if(len(cap) > 0):
frame = cap.pop()
frame = cv2.resize(frame, RESOLUTION)
#Use grayscale ==> lighter computations
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
# Blur footage to prevent artifacts
else:
print("Error: 0 frame in the video mentionned.")
exit(-1)
frame_counter = FRAME_NB_BTW_PANO
fgbg = cv2.createBackgroundSubtractorMOG2()
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
while(len(cap) > 0):
frame = cap.pop()
frame = cv2.resize(frame, RESOLUTION)
if(frame_counter > 0):
frame_counter = frame_counter - 1
continue
#angle = get_angle(prec_frame, frame, cam_matrix, False)
#relative_angle = list(map(operator.add, relative_angle,angle))
prec_gray = gray
#Use grayscale ==> lighter computations
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
to_disp = frame.copy()
motion_mask = motion_detection(fgbg,kernel, prec_gray, gray, PROJECTION_MATRICE, to_disp)
#motion_mask = bad_motion_detection(fgbg,frame,to_disp)
open_window("frame")
cv2.imshow('frame',to_disp)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
print("You quit.")
break
elif key == ord("p"):
while(True):
key = cv2.waitKey(0) & 0xFF
if(key == ord("p")):
break
elif key == ord("r"):
cap = cap2.copy()
if(len(cap) > 0):
frame = cap.pop()
else:
print("Error: 0 frame in the video mentionned.")
exit(-1)
relative_angle = [0.0, 0.0, 0.0]
cv2.destroyAllWindows()
def video_enhanced_panorama(cap,cam_matrix):
global FRAME_NB_BTW_PANO
global PROJECTION_MATRICE
init = cap
focal_length = cam_matrix[0][0]
scaling_factor = focal_length #Scaling Factor equal to focal length
PROJECTION_MATRICE = compute_projection_matrix(cam_matrix, scaling_factor, RESOLUTION)
relative_angle = [0.0, 0.0, 0.0]
panorama = None
nb_frame = FRAME_NB_BTW_PANO
last_frame_in_pano = None
trans = 0.0
if(len(cap) < 0):
print("Error: 0 frame in the video mentionned.")
exit(-1)
frame_buffer = list()
fgbg = cv2.createBackgroundSubtractorMOG2()
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
while(len(cap) > 0):
frame = cap.pop()
frame = cv2.resize(frame, RESOLUTION)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if panorama is None:
panorama = get_cylindrical(frame, PROJECTION_MATRICE)
last_frame_in_pano = frame
panorama_to_display = cv2.cvtColor(panorama.copy(), cv2.COLOR_GRAY2BGR)
cv2.rectangle(panorama_to_display,(0,0),(RESOLUTION[0],RESOLUTION[1]),(0,0,255),10)
cv2.putText(panorama_to_display, ("angle:" + str(relative_angle[1])), (0, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0, 255))
open_window("Panorama")
cv2.imshow("Panorama", panorama_to_display)
if(nb_frame > 0):
nb_frame = nb_frame - 1;
if(len(frame_buffer) > FRAME_NB_BTW_PANO - 1):
del(frame_buffer[0])
frame_buffer.append(frame)
else:
tmp = frame.copy()
prec_trans = trans
while(len(frame_buffer) > 0):
moving_fg_mask = motion_detection(fgbg, kernel, tmp,last_frame_in_pano,PROJECTION_MATRICE)
static_fg_mask = compute_foreground_mask(tmp,fgbg,kernel)
tmp_panorama, tmp_translation = get_enhanced_panorama("cylindrical",panorama,tmp,last_frame_in_pano,trans,PROJECTION_MATRICE, moving_fg_mask, static_fg_mask)
if tmp_translation is None:
tmp = frame_buffer.pop()
else:
angle = get_angle(last_frame_in_pano, tmp, cam_matrix)
relative_angle = list(map(operator.add, relative_angle,angle))
last_frame_in_pano = tmp
trans = tmp_translation
panorama = tmp_panorama
break
if(len(frame_buffer) < 1):
print("Error : The panorama can't be made on this Video Sequence (not enough matches could be made).")
exit(-1)
elif(len(frame_buffer) > FRAME_NB_BTW_PANO - 1):
del(frame_buffer[0])
else:
FRAME_NB_BTW_PANO = round(FRAME_NB_BTW_PANO/2 + 0.5)
print("Number of Frame between two panorama has been updated : nb_frame_btw_pano = " + str(FRAME_NB_BTW_PANO))
frame_buffer.append(frame)
nb_frame = FRAME_NB_BTW_PANO
panorama_to_display = cv2.cvtColor(panorama.copy(), cv2.COLOR_GRAY2BGR)
if(trans > 0):
cv2.rectangle(panorama_to_display,(int(trans),0),(int(trans) + RESOLUTION[0],RESOLUTION[1]),(0,0,255),10)
cv2.putText(panorama_to_display, ("angle:" + str(relative_angle[1])), (int(trans),20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0, 255))
elif(trans < 0 and trans < prec_trans):
cv2.rectangle(panorama_to_display,(0,0),(RESOLUTION[0],RESOLUTION[1]),(0,0,255),10)
cv2.putText(panorama_to_display, ("angle:" + str(relative_angle[1])), (15, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0, 255))
else:
cv2.rectangle(panorama_to_display,(int(panorama.shape[1] - (abs(trans) + RESOLUTION[0])),0),(int(panorama.shape[1] - (abs(trans) + RESOLUTION[0]) + RESOLUTION[0]),RESOLUTION[1]),(0,0,255),10)
cv2.putText(panorama_to_display, ("angle:" + str(relative_angle[1])), ((int(panorama.shape[1] - (abs(trans) + RESOLUTION[0]))), 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0, 255))
open_window("Panorama")
cv2.imshow("Panorama", panorama_to_display)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
print("You quit.")
break
elif key == ord("p"):
while(True):
key = cv2.waitKey(0) & 0xFF
if(key == ord("p")):
break
elif key == ord("r"):
frame = None
relative_angle = [0.0, 0.0, 0.0]
panorama = None
nb_frame = FRAME_NB_BTW_PANO
cap = init
if panorama is not None:
panorama = cv2.cvtColor(panorama, cv2.COLOR_GRAY2BGR)
ret = cv2.imwrite(("Panorama.jpg") ,autocrop(panorama))
if ret is False:
print("Error: Fail to save the Panorama.")
else:
print("Panorama Saved")
else:
print("Error: The panorama has not been computed.")
cv2.destroyAllWindows()
def video_personn_detection(video_path):
global MODEL_PATH
if(PERSONN_DETECTION_ALGO == "Opencv"):
detect_opcv(video_path)
elif(PERSONN_DETECTION_ALGO == "Tensorflow"):
detect_tf(video_path,MODEL_PATH)
else:
print("Error : Unknown Personn Detection algorithm")
exit(-1)
def motion_detection_assessment(cap, cam_matrix, video_nb):
global FRAME_NB_BTW_PANO
global PROJECTION_MATRICE
focal_length = cam_matrix[0][0]
scaling_factor = focal_length #Scaling Factor equal to focal length
PROJECTION_MATRICE = compute_projection_matrix(cam_matrix, scaling_factor, RESOLUTION)
if(len(cap) > 0):
frame = cap.pop()
frame = cv2.resize(frame, RESOLUTION)
#Use grayscale ==> lighter computations
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
# Blur footage to prevent artifacts
else:
print("Error: 0 frame in the video mentionned.")
exit(-1)
fgbg = cv2.createBackgroundSubtractorMOG2()
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
if(video_nb == 1):
dir_annotation = "Annotation/In/"
list_id = getRefId(dir_annotation + "box_6_1.txt")
else:
dir_annotation = "Annotation/Out/"
list_id = getRefId(dir_annotation + "box_6_2.txt")
frame_id = 1
error_list = list()
while(len(cap) > 0):
frame = cap.pop()
frame = cv2.resize(frame, RESOLUTION)
prec_gray = gray
#Use grayscale ==> lighter computations
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
to_disp = frame.copy()
motion_mask = motion_detection(fgbg,kernel, prec_gray, gray, PROJECTION_MATRICE)
if(frame_id in list_id):
print("Image " + str(frame_id) + ":")
ref_mask = readMask(frame_id,dir_annotation,6,video_nb)
error_list.append(maskComp(ref_mask,motion_mask,True))
#motion_mask = bad_motion_detection(fgbg,frame,to_disp)
frame_id = frame_id + 1
mean_error = np.mean(np.array(error_list))
print("Motion Detection Assesment - Mean Error : " + str(mean_error))
cv2.destroyAllWindows()
def personn_detection_assesment(video_path, video_nb):
global PERSONN_DETECTION_ALGO
global MODEL_PATH
if(video_nb == 1):
dir_annotation = "Annotation/In/"
ann_path = dir_annotation + "box_6_1.txt"
else:
dir_annotation = "Annotation/Out/"
ann_path = dir_annotation + "box_6_2.txt"
if(PERSONN_DETECTION_ALGO == "Opencv"):
mean_error, computation_time = perf_ass_opcv(video_path,ann_path)
elif(PERSONN_DETECTION_ALGO == "Tensorflow"):
mean_error, computation_time = perf_ass_tf(video_path,ann_path, MODEL_PATH)
else:
print("Error : Unknown Personn Detection algorithm")
exit(-1)
print("Mean Error :" + str(mean_error))
print("Computation time : " + str(computation_time))
def live_matching_demo(cap,cam_matrix):
global FRAME_NB_BTW_PANO
global PROJECTION_MATRICE
ret = False
frame = None
relative_angle = [0.0, 0.0, 0.0]
start_live = False
while(cap.isOpened()):
prec_ret, prec_frame = (ret,frame)
ret, frame = cap.read()
if ret is True:
frame = cv2.resize(frame, RESOLUTION)
if prec_ret is True and ret is True and start_live:
angle = get_angle(prec_frame, frame, cam_matrix, start_live)
relative_angle = list(map(operator.add, relative_angle,angle))
cv2.putText(frame, ("angle:" + str(relative_angle[1])), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255, 255))
if ret is True:
open_window("Live")
cv2.imshow("Live", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
print("You quit.")
break
elif key == ord("s"):
start_live = not start_live
if start_live is False:
cv2.destroyWindow("Feature Matcher - orb Flanner")
else:
relative_angle = [0.0, 0.0, 0.0]
cap.release()
cv2.destroyAllWindows()
def live_panorama(cap,cam_matrix):
global PROJECTION_MATRICE
global FRAME_NB_BTW_PANO
frame_buffer = list()
start_pano = False
while(cap.isOpened()):
ret, frame = cap.read()
if ret is True:
frame = cv2.resize(frame, RESOLUTION)
open_window("Live")
cv2.imshow("Live", frame)
if start_pano is True:
frame_buffer.append(frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
print("You quit.")
break
elif key == ord("s"):
if start_pano is True:
print("Panorama being computed ...")
video_panorama(frame_buffer,cam_matrix)
start_pano = not start_pano
cap.release()
cv2.destroyAllWindows()
def live_motion_detection_demo(cap,cam_matrix):
global FRAME_NB_BTW_PANO
global PROJECTION_MATRICE
ret = False
frame = None
start_live = False
fgbg = cv2.createBackgroundSubtractorMOG2()
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
focal_length = cam_matrix[0][0]
scaling_factor = focal_length #Scaling Factor equal to focal length
PROJECTION_MATRICE = compute_projection_matrix(cam_matrix, scaling_factor, RESOLUTION)
while(cap.isOpened()):
prec_ret, prec_frame = (ret,frame)
ret, frame = cap.read()
if ret is True:
frame = cv2.resize(frame, RESOLUTION)
if prec_ret is True and ret is True and start_live:
prec_gray = cv2.cvtColor(prec_frame,cv2.COLOR_BGR2GRAY)
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
motion_mask = motion_detection(fgbg,kernel, prec_gray, gray, PROJECTION_MATRICE, frame)
if ret is True:
open_window("Live")
cv2.imshow("Live", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
print("You quit.")
break
elif key == ord("s"):
start_live = not start_live
cap.release()
cv2.destroyAllWindows()
def live_enhanced_panorama(cap,cam_matrix):
global PROJECTION_MATRICE
global FRAME_NB_BTW_PANO
frame_buffer = list()
start_pano = False
while(cap.isOpened()):
ret, frame = cap.read()
if ret is True:
frame = cv2.resize(frame, RESOLUTION)
open_window("Live")
cv2.imshow("Live", frame)
if start_pano is True:
frame_buffer.append(frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
print("You quit.")
break
elif key == ord("s"):
if start_pano is True:
print("Panorama being computed ...")
video_enhanced_panorama(frame_buffer,cam_matrix)
start_pano = not start_pano
cap.release()
cv2.destroyAllWindows()
def live_personn_detection(cap):
global MODEL_PATH
if(PERSONN_DETECTION_ALGO == "Opencv"):
detector = HumanDetectorOCV()
elif(PERSONN_DETECTION_ALGO == "Tensorflow"):
detector = HumanDetectorTF(MODEL_PATH)
else:
print("Error : Unknown Personn Detection algorithm")
exit(-1)
start_detect = False
while(cap.isOpened()):
ret, frame = cap.read()
if ret is True:
frame = cv2.resize(frame, RESOLUTION)
if start_detect is True:
boxes = detector.detect(frame)
for box in boxes:
cv2.rectangle(frame,box[0],box[1],(0,0,255),2)
open_window("Live")
cv2.imshow("Live", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
print("You quit.")
break
elif key == ord("s"):
start_detect = not start_detect
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
live = False
if(len(sys.argv) == 3):
cmatrix_filename = sys.argv[1]
cam_matrix = get_cam_matrix(cmatrix_filename)
mode = sys.argv[2]
if mode not in IMPLEMENTED_MODE:
print("Error: Implemented modes are " + str(IMPLEMENTED_MODE) + ".")
exit(-1)
live = True
#cap = cv2.VideoCapture(0)
cap = open_cam_onboard(WINDOW_WIDTH, WINDOW_HEIGHT, RESOLUTION,FRAME_RATE)
elif(len(sys.argv) == 4):
cmatrix_filename = sys.argv[1]
cam_matrix = get_cam_matrix(cmatrix_filename)
live = False
mode = sys.argv[2]
if mode not in IMPLEMENTED_MODE:
print("Error: Implemented modes are " + str(IMPLEMENTED_MODE) + ".")
exit(-1)
video_dirname = sys.argv[3]
nb_video = int(video_dirname.split("_")[1])
perf_assess = False
#cap = cv2.VideoCapture(video_filename)
cap = frameReadingFromImage(video_dirname)
if cap is None:
print("Error: Fail to read the Video Files.")
exit(-1)
elif(len(sys.argv) == 5):
cmatrix_filename = sys.argv[1]
cam_matrix = get_cam_matrix(cmatrix_filename)
live = False
mode = sys.argv[2]
if mode not in IMPLEMENTED_MODE:
print("Error: Implemented modes are " + str(IMPLEMENTED_MODE) + ".")
exit(-1)
if mode == "motion_detection" or mode == "personn_detection":
perf_assess = bool(sys.argv[4])
else:
print("Error: python3.6 main.py cam_matrix_filename.json mode=" + str(IMPLEMENTED_MODE) + " [video_dirname] [performance_assessment=True]")
exit(-1)
video_dirname = sys.argv[3]
video_nb = int(video_dirname.split("_")[1])
#cap = cv2.VideoCapture(video_filename)
cap = frameReadingFromImage(video_dirname)
if cap is None:
print("Error: Fail to read the Video Files.")
exit(-1)
else:
print("Error: python3.6 main.py cam_matrix_filename.json mode=" + str(IMPLEMENTED_MODE) + " [video_dirname] [performance_assessment=True]")
exit(-1)
if live is True and mode == "panorama":
live_panorama(cap,cam_matrix)
elif live is True and mode == "matching_demo":
live_matching_demo(cap,cam_matrix)
elif live is False and mode == "panorama":
video_panorama(cap,cam_matrix)
elif live is False and mode == "matching_demo":
video_matching_demo(cap,cam_matrix)
elif(live is False and mode == "motion_detection" and perf_assess == False):
video_motion_detection_demo(cap,cam_matrix)
elif(live is False and mode == "motion_detection" and perf_assess == True):
motion_detection_assessment(cap,cam_matrix, video_nb)
elif(live is True and mode == "motion_detection"):
live_motion_detection_demo(cap,cam_matrix)
elif(live is False and mode == "enhanced_panorama"):
video_enhanced_panorama(cap,cam_matrix)
elif(live is True and mode == "enhanced_panorama"):
live_enhanced_panorama(cap,cam_matrix)
elif(live is False and mode == "personn_detection" and perf_assess == False):
video_personn_detection(video_dirname)
elif(live is False and mode == "personn_detection" and perf_assess == True):
personn_detection_assesment(video_dirname, video_nb)
elif(live is True and mode == "personn_detection"):
live_personn_detection(cap)
else:
print("Error: python3.6 main.py cam_matrix_filename.json mode=" + str(IMPLEMENTED_MODE) + " [video_dirname] [performance_assessment=True]")
exit(-1)
|
[
"[email protected]"
] | |
2e5b24c6d45a87fa2c12e35d5b432da39edabc4a
|
82ebcc53d1c3e80b8960520f934004b945c84f70
|
/gumi/models/__init__.py
|
66dce650fdf02fea6bbc93b79f43d1dcb08f0817
|
[
"MIT"
] |
permissive
|
kumasento/gconv-prune
|
404fa15c508be017faf56a0e27edc5410fa459d9
|
f81c417d3754102c902bd153809130e12607bd7d
|
refs/heads/main
| 2021-10-06T12:54:49.626589 | 2021-10-04T16:13:59 | 2021-10-04T16:13:59 | 204,706,641 | 10 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 117 |
py
|
from .resnet import *
from .preresnet import *
from .densenet import *
from .vgg import *
from .condensenet import *
|
[
"[email protected]"
] | |
60360235f3c2a84e511dfb8b88931bf1b27d9951
|
d930be62c302a01ad1462180519a8646bbf78484
|
/my_templates/folder_monitor.py
|
add7a30f4db020762edaa61f1bf60c4893c37237
|
[] |
no_license
|
puneet3LOQ/my_templates
|
4e76489a7cd344408288727ec6514bf62e456d73
|
b9891d79030f82fd5195f7d089d3da7b624bbfe4
|
refs/heads/master
| 2021-01-23T03:22:07.259075 | 2015-04-23T12:06:21 | 2015-04-23T12:06:21 | 34,115,017 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,026 |
py
|
#/home/puneet/workspace/folder_monitor.py
'''
Platform independent file system monitor.
'''
from __future__ import nested_scopes
import os, time
def watch_directories(paths, func, delay=0.1):
'''
Watches a list of directories specified in 'paths' and runs func on each
new/changed file encountered in paths. Func can return True/False. If it
returns True, the paths are immediately rescanned without applying func.
This is for use with functions which may make changes which affect files
in 'paths'.
@param
paths (list) - A list of unix paths to monitor.
func (function) - Function variable to call on each new/changed file
found in paths.
delay (float) - Wait time between scans on paths.
@return
None
'''
##Dict to map files to modification time.
all_files = {}
def f(unused, dirname, files):
for filename in files:
path = os.path.join(dirname, filename)
try:
t = os.stat(path)
except os.error:
continue
mtime = remaining_files.get(path)
if mtime is not None:
del remaining_files[path]
if t.st_mtime > mtime:
changed_list.append(path)
else:
changed_list.append(path)
all_files[path] = t.st_mtime
rescan = False
while True:
changed_list = []
remaining_files = all_files.copy()
all_files = {}
for path in paths:
os.path.walk(path, f, None)
removed_list = remaining_files.keys()
if rescan:
rescan = False
elif changed_list or removed_list:
rescan = func(changed_list, removed_list)
time.sleep(delay)
if __name__ == '__main__':
def f(changed_files, removed_files):
print changed_files
print 'Removed: ', removed_files
watch_directories(['.'], f, delay=5)
|
[
"[email protected]"
] | |
48821661f53b8b2fd7a14393c175437f8f7231b3
|
cc856a6efb22c82eaa6bc9bcadb36ab519c2b3eb
|
/test.py
|
7de592f88c9b4629879bdd2da2c3554672ca5170
|
[] |
no_license
|
ajaybati/Past-Projects
|
59b861f76ca5cd5b1a7b85462b92666660263d57
|
204ada5e7f91cea4d8108c5f66f159b384d7dbdd
|
refs/heads/master
| 2020-12-21T10:05:49.450890 | 2020-01-31T07:43:02 | 2020-01-31T07:43:02 | 236,396,115 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 734 |
py
|
list=['1', '(8', 'ounce)', 'container', 'plain', 'yogurt', '1⁄3', 'cup', 'chopped', 'seeded', 'cucumber', '(thinly', 'slice', 'remainder', 'of', 'cucumber)', '2', 'tablespoons', 'finely', 'chopped', 'onions', '1', 'garlic', 'clove,', 'minced', '1', 'teaspoon', 'sugarFilling', '1', 'lb', 'lean', 'ground', 'beef', '(I', 'use', 'ground', 'turkey)', '1', '1⁄2', 'teaspoons', 'dried', 'oregano', '1', 'teaspoon', 'garlic', 'powder', '1', 'teaspoon', 'onion', 'powder', '1', 'teaspoon', 'salt', '(optional)', '3⁄4', 'teaspoon', 'pepper', '4', 'pita', 'breads', '3', 'cups', 'shredded', 'lettuce', '1', 'large', 'tomatoes,', 'chopped', '1', 'small', 'onion,', 'chopped']
start=0
starter=1
print("ok")
list[1:6]=(' '.join(list[1:6])
|
[
"[email protected]"
] | |
78123c2af7bf0854263ba8aa6b53951d3161379b
|
b1da705aab90be29e17036d4495f945159ea0160
|
/bin/restore
|
a1ba719f2492a6b886fe99cc12b1808664df8d93
|
[
"Apache-2.0"
] |
permissive
|
dan-sullivan/mist.api
|
06e428fe82f817e79dd7068a205ba0ea6dfea334
|
82ea1780801429336e89be714131d69f4015768b
|
refs/heads/master
| 2022-04-20T03:38:21.191064 | 2020-04-22T17:02:46 | 2020-04-22T17:03:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,126 |
#!/usr/bin/env python
import os
import argparse
import requests
from mist.api import config
def main():
argparser = argparse.ArgumentParser(
description="Restore a mist backup"
)
argparser.add_argument('backup', help="Backup to restore.")
args = argparser.parse_args()
cmd = 's3cmd --access_key=%s --secret_key=%s get %s' % (
config.BACKUP['key'], config.BACKUP['secret'],
args.backup)
os.system(cmd)
dump_path = args.backup.split('/')[-1]
if dump_path.endswith('.gpg'):
new_path = dump_path.replace('.gpg', '')
cmd = 'gpg --pinentry-mode loopback -o %s -d %s' % (new_path, dump_path)
os.system(cmd)
dump_path = new_path
if 'mongo' in args.backup:
cmd = 'mongorestore -h %s --gzip --archive=%s' % (config.MONGO_URI,
dump_path)
os.system(cmd)
elif 'influx' in args.backup:
# Strip protocol prefix from influx backup uri
influx_backup_host = config.INFLUX.get('backup', '').replace(
'http://', '').replace('https://', '')
# Prepare base URL.
url = '%s/query' % config.INFLUX['host']
for db in ['telegraf', 'metering']:
cmd = 'rm -rf influx-snapshot && tar xvf %s && \
influxd restore -host %s -portable -db %s -newdb %s_bak \
influx-snapshot && echo "Restored database as %s_bak"' % (
dump_path, influx_backup_host, db,
db, db)
os.system(cmd)
resp = raw_input("Move data from %s_bak to %s? [y/n] " % (db, db))
if resp.lower() == 'y':
requests.post('%s?q=CREATE database %s' % (url, db))
query = "SELECT * INTO %s..:MEASUREMENT FROM /.*/ GROUP BY *;"
query += "DROP DATABASE %s_bak"
query = query % (db, db)
requests.post('%s?db=%s_bak&q=%s' % (url, db, query))
requests.post('%s?q=DROP database %s_bak' % (url, db))
else:
print('Unknown backup type')
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | ||
d0523a8eb2d8b4391e899985c3cf03af8655971e
|
648f96d16f5413ab4f9f52fd132b7371c39733e1
|
/local/get_metadata.py
|
0019173cd8ad496d28a06571533222092d5c1824
|
[
"MIT"
] |
permissive
|
Akash-Sharma-1/TweepyMR
|
f608c9862c6af2ff6af3ad803ccf5f82844a58e6
|
901af2014928691d62ad6d90810e3bb8f35b6fcb
|
refs/heads/master
| 2021-02-08T08:52:58.674947 | 2020-07-16T08:01:19 | 2020-07-16T08:01:19 | 244,132,519 | 2 | 0 | null | 2020-03-01T10:56:41 | 2020-03-01T10:56:41 | null |
UTF-8
|
Python
| false | false | 2,831 |
py
|
import sys
import tweepy
import json
import math
import glob
import csv
import zipfile
import zlib
from tweepy import TweepError
from time import sleep
# CHANGE THIS TO THE USER YOU WANT
user = 'narendramodi'
with open('api_keys.json') as f:
keys = json.load(f)
auth = tweepy.OAuthHandler(keys['consumer_key'], keys['consumer_secret'])
auth.set_access_token(keys['access_token'], keys['access_token_secret'])
api = tweepy.API(auth)
user = user.lower()
output_file = '{}.json'.format(user)
output_file_short = '{}_short.json'.format(user)
compression = zipfile.ZIP_DEFLATED
with open('all_ids.json') as f:
ids = json.load(f)
print('total ids: {}'.format(len(ids)))
all_data = []
start = 0
end = 100
limit = len(ids)
i = math.ceil(limit / 100)
for go in range(i):
print('currently getting {} - {}'.format(start, end))
sleep(6) # needed to prevent hitting API rate limit
id_batch = ids[start:end]
start += 100
end += 100
tweets = api.statuses_lookup(id_batch)
for tweet in tweets:
all_data.append(dict(tweet._json))
print('metadata collection complete')
print('creating master json file')
with open(output_file, 'w') as outfile:
json.dump(all_data, outfile)
print('creating ziped master json file')
zf = zipfile.ZipFile('{}.zip'.format(user), mode='w')
zf.write(output_file, compress_type=compression)
zf.close()
results = []
def is_retweet(entry):
return 'retweeted_status' in entry.keys()
def get_source(entry):
if '<' in entry["source"]:
return entry["source"].split('>')[1].split('<')[0]
else:
return entry["source"]
with open(output_file) as json_data:
data = json.load(json_data)
for entry in data:
t = {
"created_at": entry["created_at"],
"text": entry["text"],
"in_reply_to_screen_name": entry["in_reply_to_screen_name"],
"retweet_count": entry["retweet_count"],
"favorite_count": entry["favorite_count"],
"source": get_source(entry),
"id_str": entry["id_str"],
"is_retweet": is_retweet(entry)
}
results.append(t)
print('creating minimized json master file')
with open(output_file_short, 'w') as outfile:
json.dump(results, outfile)
with open(output_file_short, encoding='utf-8') as master_file:
data = json.load(master_file)
fields = ["favorite_count", "source", "text", "in_reply_to_screen_name", "is_retweet", "created_at", "retweet_count", "id_str"]
print('creating CSV version of minimized json master file')
f = csv.writer(open('{}.csv'.format(user), 'w',encoding='utf-8'))
f.writerow(fields)
for x in data:
f.writerow([x["favorite_count"], x["source"], x["text"], x["in_reply_to_screen_name"], x["is_retweet"], x["created_at"], x["retweet_count"], x["id_str"]])
|
[
"[email protected]"
] | |
48351d6d1b511a8717bd34a114b6e54683357290
|
3c000380cbb7e8deb6abf9c6f3e29e8e89784830
|
/venv/Lib/site-packages/cobra/modelimpl/acllog/flowcounteraghist1d.py
|
2e2a886e4137ca0fffa75a3d90db0646a85fbed6
|
[] |
no_license
|
bkhoward/aciDOM
|
91b0406f00da7aac413a81c8db2129b4bfc5497b
|
f2674456ecb19cf7299ef0c5a0887560b8b315d0
|
refs/heads/master
| 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,371 |
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class FlowCounterAgHist1d(Mo):
"""
A class that represents historical aggregated statistics for Flow Record Counter in a 1 day sampling interval. This class updates every hour.
"""
meta = StatsClassMeta("cobra.model.acllog.FlowCounterAgHist1d", "Flow Record Counter")
counter = CounterMeta("hitscount", CounterCategory.COUNTER, "hits", "Hits Counter")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "hitscountCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "hitscountPer"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "hitscountSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "hitscountThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "hitscountTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "hitscountRate"
meta._counters.append(counter)
meta.moClassName = "acllogFlowCounterAgHist1d"
meta.rnFormat = "HDacllogFlowCounterAg1d-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical aggregated Flow Record Counter stats in 1 day"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.acllog.PermitL3Pkt")
meta.parentClasses.add("cobra.model.acllog.DropL2Pkt")
meta.parentClasses.add("cobra.model.acllog.DropL2Flow")
meta.parentClasses.add("cobra.model.acllog.DropL3Pkt")
meta.parentClasses.add("cobra.model.acllog.DropL3Flow")
meta.parentClasses.add("cobra.model.acllog.PermitL2Flow")
meta.parentClasses.add("cobra.model.acllog.PermitL3Flow")
meta.parentClasses.add("cobra.model.acllog.PermitL2Pkt")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.superClasses.add("cobra.model.acllog.FlowCounterAgHist")
meta.rnPrefixes = [
('HDacllogFlowCounterAg1d-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "hitscountCum", "hitscountCum", 25142, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Hits Counter cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("hitscountCum", prop)
prop = PropMeta("str", "hitscountPer", "hitscountPer", 25143, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Hits Counter periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("hitscountPer", prop)
prop = PropMeta("str", "hitscountRate", "hitscountRate", 25147, PropCategory.IMPLICIT_RATE)
prop.label = "Hits Counter rate"
prop.isOper = True
prop.isStats = True
meta.props.add("hitscountRate", prop)
prop = PropMeta("str", "hitscountSpct", "hitscountSpct", 25144, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Hits Counter suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("hitscountSpct", prop)
prop = PropMeta("str", "hitscountThr", "hitscountThr", 25145, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Hits Counter thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("hitscountThr", prop)
prop = PropMeta("str", "hitscountTr", "hitscountTr", 25146, PropCategory.IMPLICIT_TREND)
prop.label = "Hits Counter trend"
prop.isOper = True
prop.isStats = True
meta.props.add("hitscountTr", prop)
prop = PropMeta("str", "index", "index", 25066, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "index"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"[email protected]"
] | |
e69cbe6f5d5c2767f1553eace21a5c5ab7485281
|
9df7a73ddb4237bbd752efdc6c7b03da5aa1fa40
|
/weiss/planner/liblinearutil.py
|
74db82a219a5e197dabfcccd8a4886a8c4e3ca05
|
[
"Apache-2.0"
] |
permissive
|
austinlostinboston/mitsWebApp
|
39f8ada858def608c2377bdea006590379896a5c
|
bf30e891f4b0985e636935a82b87e3b100e1fd64
|
refs/heads/master
| 2020-04-01T12:40:53.615324 | 2015-08-06T20:48:35 | 2015-08-06T20:48:35 | 37,482,873 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,549 |
py
|
#!/usr/bin/env python
import os
import sys
import time
sys.path = [os.path.dirname(os.path.abspath(__file__))] + sys.path
from liblinear import *
from liblinear import __all__ as liblinear_all
from ctypes import c_double
__all__ = ['svm_read_problem', 'load_model', 'save_model', 'evaluations',
'train', 'predict'] + liblinear_all
def svm_read_problem(data_file_name):
"""
svm_read_problem(data_file_name) -> [y, x]
Read LIBSVM-format data from data_file_name and return labels y
and data instances x.
"""
prob_y = []
prob_x = []
for line in open(data_file_name):
line = line.split(None, 1)
# In case an instance with all zero features
if len(line) == 1: line += ['']
label, features = line
xi = {}
for e in features.split():
ind, val = e.split(":")
xi[int(ind)] = float(val)
prob_y += [float(label)]
prob_x += [xi]
return (prob_y, prob_x)
def load_model(model_file_name):
"""
load_model(model_file_name) -> model
Load a LIBLINEAR model from model_file_name and return.
"""
model = liblinear.load_model(model_file_name.encode())
if not model:
print("can't open model file %s" % model_file_name)
return None
model = toPyModel(model)
return model
def save_model(model_file_name, model):
"""
save_model(model_file_name, model) -> None
Save a LIBLINEAR model to the file model_file_name.
"""
liblinear.save_model(model_file_name.encode(), model)
def evaluations(ty, pv):
"""
evaluations(ty, pv) -> (ACC, MSE, SCC)
Calculate accuracy, mean squared error and squared correlation coefficient
using the true values (ty) and predicted values (pv).
"""
if len(ty) != len(pv):
raise ValueError("len(ty) must equal to len(pv)")
total_correct = total_error = 0
sumv = sumy = sumvv = sumyy = sumvy = 0
for v, y in zip(pv, ty):
if y == v:
total_correct += 1
total_error += (v-y)*(v-y)
sumv += v
sumy += y
sumvv += v*v
sumyy += y*y
sumvy += v*y
l = len(ty)
ACC = 100.0*total_correct/l
MSE = total_error/l
try:
SCC = ((l*sumvy-sumv*sumy)*(l*sumvy-sumv*sumy))/((l*sumvv-sumv*sumv)*(l*sumyy-sumy*sumy))
except:
SCC = float('nan')
return (ACC, MSE, SCC)
def train(arg1, arg2=None, arg3=None):
"""
train(y, x [, options]) -> model | ACC
train(prob [, options]) -> model | ACC
train(prob, param) -> model | ACC
Train a model from data (y, x) or a problem prob using
'options' or a parameter param.
If '-v' is specified in 'options' (i.e., cross validation)
either accuracy (ACC) or mean-squared error (MSE) is returned.
options:
-s type : set type of solver (default 1)
for multi-class classification
0 -- L2-regularized logistic regression (primal)
1 -- L2-regularized L2-loss support vector classification (dual)
2 -- L2-regularized L2-loss support vector classification (primal)
3 -- L2-regularized L1-loss support vector classification (dual)
4 -- support vector classification by Crammer and Singer
5 -- L1-regularized L2-loss support vector classification
6 -- L1-regularized logistic regression
7 -- L2-regularized logistic regression (dual)
for regression
11 -- L2-regularized L2-loss support vector regression (primal)
12 -- L2-regularized L2-loss support vector regression (dual)
13 -- L2-regularized L1-loss support vector regression (dual)
-c cost : set the parameter C (default 1)
-p epsilon : set the epsilon in loss function of SVR (default 0.1)
-e epsilon : set tolerance of termination criterion
-s 0 and 2
|f'(w)|_2 <= eps*min(pos,neg)/l*|f'(w0)|_2,
where f is the primal function, (default 0.01)
-s 11
|f'(w)|_2 <= eps*|f'(w0)|_2 (default 0.001)
-s 1, 3, 4, and 7
Dual maximal violation <= eps; similar to liblinear (default 0.)
-s 5 and 6
|f'(w)|_inf <= eps*min(pos,neg)/l*|f'(w0)|_inf,
where f is the primal function (default 0.01)
-s 12 and 13
|f'(alpha)|_1 <= eps |f'(alpha0)|,
where f is the dual function (default 0.1)
-B bias : if bias >= 0, instance x becomes [x; bias]; if < 0, no bias term added (default -1)
-wi weight: weights adjust the parameter C of different classes (see README for details)
-v n: n-fold cross validation mode
-q : quiet mode (no outputs)
"""
prob, param = None, None
if isinstance(arg1, (list, tuple)):
assert isinstance(arg2, (list, tuple))
y, x, options = arg1, arg2, arg3
prob = problem(y, x)
param = parameter(options)
elif isinstance(arg1, problem):
prob = arg1
if isinstance(arg2, parameter):
param = arg2
else :
param = parameter(arg2)
if prob == None or param == None :
raise TypeError("Wrong types for the arguments")
prob.set_bias(param.bias)
liblinear.set_print_string_function(param.print_func)
err_msg = liblinear.check_parameter(prob, param)
if err_msg :
raise ValueError('Error: %s' % err_msg)
if param.cross_validation:
l, nr_fold = prob.l, param.nr_fold
target = (c_double * l)()
liblinear.cross_validation(prob, param, nr_fold, target)
ACC, MSE, SCC = evaluations(prob.y[:l], target[:l])
directory = os.getcwd()
result = open('training_log','a')
if param.solver_type in [L2R_L2LOSS_SVR, L2R_L2LOSS_SVR_DUAL, L2R_L1LOSS_SVR_DUAL]:
result.write("%s\nCross Validation Mean squared error : %g\nArguments: %s\n"
% (time.strftime("%x"), MSE, options))
result.write("%s\nCross Validation Squared correlation coefficient : %g\nArguments: %s\n"
% (time.strftime("%x"), SCC, options))
#return MSE
else:
result.write("%s\nCross Validation Accuracy : %g%%\nArguments: %s\n"
% (time.strftime("%x"), ACC, options))
#return ACC
#else :
m = liblinear.train(prob, param)
m = toPyModel(m)
return m
def predict(y, x, m, options=""):
"""
predict(y, x, m [, options]) -> (p_labels, p_acc, p_vals)
Predict data (y, x) with the SVM model m.
options:
-b probability_estimates: whether to output probability estimates, 0 or 1 (default 0); currently for logistic regression only
-q quiet mode (no outputs)
The return tuple contains
p_labels: a list of predicted labels
p_acc: a tuple including accuracy (for classification), mean-squared
error, and squared correlation coefficient (for regression).
p_vals: a list of decision values or probability estimates (if '-b 1'
is specified). If k is the number of classes, for decision values,
each element includes results of predicting k binary-class
SVMs. if k = 2 and solver is not MCSVM_CS, only one decision value
is returned. For probabilities, each element contains k values
indicating the probability that the testing instance is in each class.
Note that the order of classes here is the same as 'model.label'
field in the model structure.
"""
def info(s):
print(s)
predict_probability = 0
argv = options.split()
i = 0
while i < len(argv):
if argv[i] == '-b':
i += 1
predict_probability = int(argv[i])
elif argv[i] == '-q':
info = print_null
else:
raise ValueError("Wrong options")
i+=1
solver_type = m.param.solver_type
nr_class = m.get_nr_class()
nr_feature = m.get_nr_feature()
is_prob_model = m.is_probability_model()
bias = m.bias
if bias >= 0:
biasterm = feature_node(nr_feature+1, bias)
else:
biasterm = feature_node(-1, bias)
pred_labels = []
pred_values = []
if predict_probability:
if not is_prob_model:
raise TypeError('probability output is only supported for logistic regression')
prob_estimates = (c_double * nr_class)()
for xi in x:
xi, idx = gen_feature_nodearray(xi, feature_max=nr_feature)
xi[-2] = biasterm
label = liblinear.predict_probability(m, xi, prob_estimates)
values = prob_estimates[:nr_class]
pred_labels += [label]
pred_values += [values]
else:
if nr_class <= 2:
nr_classifier = 1
else:
nr_classifier = nr_class
dec_values = (c_double * nr_classifier)()
for xi in x:
xi, idx = gen_feature_nodearray(xi, feature_max=nr_feature)
xi[-2] = biasterm
label = liblinear.predict_values(m, xi, dec_values)
values = dec_values[:nr_classifier]
pred_labels += [label]
pred_values += [values]
if len(y) == 0:
y = [0] * len(x)
#The following part is
#ACC, MSE, SCC = evaluations(y, pred_labels)
#l = len(y)
#if m.is_regression_model():
# info("Mean squared error = %g (regression)" % MSE)
# info("Squared correlation coefficient = %g (regression)" % SCC)
#else:
# info("Accuracy = %g%% (%d/%d) (classification)" % (ACC, int(l*ACC/100), l))
#return pred_labels, (ACC, MSE, SCC), pred_values
return pred_labels, pred_values
|
[
"[email protected]"
] | |
405d7228f719d71c4b76b31841b43bc34e7a2c0d
|
80d20d3edd874a011361800939f4ef6982673137
|
/balance.py
|
321ffc73e54d9c8b635da940facb59a3daaa9543
|
[] |
no_license
|
vincelwt/krakenoverview
|
2969d1ef9f4bd068521ffefc7421c2c0e414f43a
|
b7043a3852da866f4097323209b35807ccff9801
|
refs/heads/master
| 2021-01-22T08:09:47.603675 | 2017-05-27T14:20:10 | 2017-05-27T14:20:10 | 92,603,134 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,175 |
py
|
import krakenex, json, os, time
from termcolor import cprint, colored
from tabulate import tabulate
k = krakenex.API()
k.load_key('krakenvince.key')
liveValues = []
compound = []
# Updates liveValues array with current prices of all currencies
def updateLivevalues():
global liveValues
assets = k.query_public('Ticker', {'pair': 'ETHEUR,XBTEUR,LTCEUR,XMREUR,XRPEUR,DASHEUR,ETCEUR,ZECEUR,GNOEUR,REPEUR'})['result']
liveValues = []
for attr, value in assets.iteritems():
liveValues.append([attr.replace('ZEUR', '').replace('EUR', ''), float(value['a'][0]) ])
# Returns an array with the currency price a week ago & a day ago
def getOldvalues(pair):
pair = pair.replace('XBT', 'BTC').replace('ASH', 'DASH')
timeAgo = int(time.time()-3600*24*7)
spread = k.query_public('OHLC', {'pair': pair, 'interval': '1440', 'since': str(timeAgo)})
for attr, value in spread['result'].iteritems():
if not 'EUR' in attr: continue
weekAgo = value[0]
weekAgoAverage = ( float(weekAgo[1]) + float(weekAgo[2]) + float(weekAgo[3]) + float(weekAgo[4]) ) / 4
dayAgo = value[5]
dayAgoAverage = ( float(dayAgo[1]) + float(dayAgo[2]) + float(dayAgo[3]) + float(dayAgo[4]) ) / 4
return [weekAgoAverage, dayAgoAverage]
# Update total outcome of all made trades
def printTrades():
global compound
recentTrades = k.query_private('ClosedOrders', {})
compound = []
for attr, value in recentTrades['result']['closed'].iteritems():
type = value['descr']['type']
asset = value['descr']['pair'][:3]
source = value['descr']['pair'][-3:]
price = float( value['price'] )
vol = float( value['vol_exec'] )
cost = float( value['cost'] )
#print source, price
if type == 'sell':
vol = -vol
cost = -cost
touched = False
touched2 = False
for e in compound:
if e[0] == asset:
e[1] += vol
e[2] += cost
touched = True
if not touched:
compound.append([asset, vol, cost])
#print '%15s %13s %12s' % (colored(type+asset, 'grey', 'on_yellow'), str(vol), colored(cost, 'white', 'on_magenta'))
#print value
#print '--------Total ---------'
#for e in compound:
# print '%6s %13s %12s' % (colored(e[0], 'grey', 'on_yellow'), str(e[1]), colored(e[2], 'white', 'on_magenta'))
def printBalance():
print colored('Updating data...', 'green')
table = [['Balance', 'Quantity', 'Euro amount', 'Net results', 'Last 24h', 'Last week']]
currencies = k.query_private('Balance')['result']
balance = k.query_private('TradeBalance', {'asset': 'ZEUR'})['result']
totalChange = 0
totalWeekChange = 0
totalDayChange = 0
# For each currency in Kraken "wallet"
for attr, pair in currencies.iteritems():
value = 0
change = 0
weekChange = 0
dayChange = 0
pair = float(pair)
valueStr = ''
for values in liveValues:
if values[0] == attr:
value = pair*values[1]
valueStr = str( int(value) )+' EUR'
if (attr != 'ZEUR'): # No need to calc changes for EUR fiat
oldData = getOldvalues(attr[-3:]+'EUR')
weekChange = value-(pair*oldData[0])
dayChange = value-(pair*oldData[1])
for e in compound:
if e[0] in attr:
change = float("%.2f" % float(value-e[2]))
totalChange += change
totalDayChange += dayChange
totalWeekChange += weekChange
changeStr = ''
if change > 0:
changeStr = colored(str("%.2f" % change)+' EUR', 'white', 'on_cyan')
elif change < 0:
changeStr = colored(str("%.2f" % change)+' EUR', 'white', 'on_red')
weekChangeStr = ''
if weekChange > 0:
weekChangeStr = colored(str("%.2f" % weekChange)+' EUR', 'white', 'on_cyan')
elif weekChange < 0:
weekChangeStr = colored(str("%.2f" % weekChange)+' EUR', 'white', 'on_red')
dayChangeStr = ''
if dayChange > 0:
dayChangeStr = colored(str("%.2f" % dayChange)+' EUR', 'white', 'on_cyan')
elif dayChange < 0:
dayChangeStr = colored(str("%.2f" % dayChange)+' EUR', 'white', 'on_red')
toPrint = [colored(attr, 'grey', 'on_yellow'), str(pair), colored(valueStr, 'white', 'on_magenta'), changeStr, dayChangeStr, weekChangeStr]
table.append(toPrint)
totalChangeStr = ''
if totalChange > 0:
totalChangeStr = colored(str("%.2f" % totalChange)+' EUR', 'white', 'on_cyan')
elif totalChange < 0:
totalChangeStr = colored(str("%.2f" % totalChange)+' EUR', 'white', 'on_red')
totalDayChangeStr = ''
if totalDayChange > 0:
totalDayChangeStr = colored(str("%.2f" % totalDayChange)+' EUR', 'white', 'on_cyan')
elif totalDayChange < 0:
totalDayChangeStr = colored(str("%.2f" % totalDayChange)+' EUR', 'white', 'on_red')
totalWeekChangeStr = ''
if totalWeekChange > 0:
totalWeekChangeStr = colored(str("%.2f" % totalWeekChange)+' EUR', 'white', 'on_cyan')
elif totalWeekChange < 0:
totalWeekChangeStr = colored(str("%.2f" % totalWeekChange)+' EUR', 'white', 'on_red')
table.append([colored('Total', 'white', 'on_blue'), 'x', colored( str(int(float( balance['eb'] )))+' EUR', 'white', 'on_green'), totalChangeStr, totalDayChangeStr, totalWeekChangeStr])
os.system('clear')
print tabulate(table, tablefmt="grid")
while 1:
try:
updateLivevalues()
printTrades()
printBalance()
except:
cprint('Error getting balance.', 'red')
time.sleep(60)
|
[
"[email protected]"
] | |
b9952317e4681c860e846a7e70a21548261b380d
|
ec762d868793b43301ac9f7b52e4ecdbe3512b55
|
/labs/week6/python/skinDetectionRuleBased.py
|
dff293930f1354937aa101f7c01f36d64c4ad5f1
|
[] |
no_license
|
payalbhatia/opencv_satyamallick
|
e92094273d89b8ebabc6025defad93f25b54988c
|
0a35bfa1a4593b05f633e7f6b0c48fa11e70177d
|
refs/heads/master
| 2021-10-16T16:28:31.652395 | 2019-02-12T05:46:41 | 2019-02-12T05:46:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,930 |
py
|
"""
Copyright 2017 BIG VISION LLC ALL RIGHTS RESERVED
This program is distributed WITHOUT ANY WARRANTY to the
Plus and Premium membership students of the online course
titled "Computer Visionfor Faces" by Satya Mallick for
personal non-commercial use.
Sharing this code is strictly prohibited without written
permission from Big Vision LLC.
For licensing and other inquiries, please email
[email protected]
"""
import cv2
import numpy as np
# (R, G, B) is classified as skin if
# R > 95 and G > 40 and B > 20 and
# R > G and R > B and |R-G| > 15 and
# max{R, G, B} - min{R, G, B} > 15
if __name__ == '__main__':
# Start webcam
cap = cv2.VideoCapture(0)
# Check if webcam opens
if (cap.isOpened()== False):
print("Error opening video stream or file")
# Window for displaying output
cv2.namedWindow("Skin Detection")
while(1):
# Read frame
ret, image = cap.read()
# Split frame into r, g and b channels
b,g,r = cv2.split(image)
# Set output to all zeros
output = np.zeros(image.shape, dtype=np.uint8)
# Specifying the rules
rule1 = np.uint8(r>95) # R>95
rule2 = np.uint8(g>40) # G > 40
rule3 = np.uint8(b>20) # B > 20
rule4 = np.uint8(r>g) # R > G
rule5 = np.uint8(r>b) # R > B
rule6 = np.uint8(abs(r-g)>15) # |R-G| > 15
# max{R, G, B} - min{R, G, B} > 15
rule7 = np.uint8( ( np.maximum(np.maximum(b,g),r) - np.minimum(np.minimum(b,g),r) ) > 15)
# Apply (AND) all the rules to get the skin mask
skinMask = rule1 * rule2 * rule3 * rule4 * rule5 * rule6 * rule7
# Using the mask to get the skin pixels
output[ skinMask ==1 ] = image[ skinMask == 1]
# Display results
cv2.imshow("Skin Detection",output)
if cv2.waitKey(1) & 0xFF == 27:
break
cv2.destroyAllWindows()
|
[
"[email protected]"
] | |
00366ebb2a1b53335b701e3f22e14fd88941755c
|
bca25ecb5677b528cdf3d68b6ead3ef64290fdac
|
/Solid_rocket_motor_ignition_system_based_on_air-methane_mixture_-_Cantera (1).py
|
5874f2923ccf0ba1f6c7e483a53084c3b2415a3b
|
[] |
no_license
|
gibon1617/mkws
|
fe78eb8fd09dce38dc568001fab7fb21ef3e1ea2
|
0850f38503c9b664088387f9daa91f719fcf7823
|
refs/heads/master
| 2020-06-04T02:27:16.808515 | 2019-06-14T10:07:35 | 2019-06-14T10:07:35 | 191,833,938 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,334 |
py
|
import math as m
import cantera as ct
import numpy as np
import matplotlib.pyplot as plt
# Reaction mechanism GRI-Mech 3.0
gas = ct.Solution('gri30.xml')
# Energy which has to be provided in order to ignite the rocket fuel of A2 rocket motor
ignition_energy = 1.2*1550 # [kcal]
ign_energy = ignition_energy*4186.8 # [J]
# Outer combustion chamber (reactor) for ignition mixture will be used
d = 40.0 # mm diameter of the reactor
l = 100.0 # mm length of the reactor
Vr = m.pi*d*d*0.25*l/1000000000 # [m^3] volume of the reactor
mdot = 0.025 # [kg/s] mass flow in the reactor
mt = []
Tt = []
Qet = []
Eqt = []
tsim = 0.005 # [s] time spended in the reactor by flowing gases
eq_ratio = 0.6 # initial equivalence ratio
while eq_ratio < 1.6:
print(eq_ratio)
# gas definition, initial conditions and inlet
gas.TP = 300.0, ct.one_atm*10
gas.set_equivalence_ratio(eq_ratio, 'CH4:1.0', 'O2:1.0, N2:3.76')
inlet = ct.Reservoir(gas)
# filling combustor with a gas
gas.equilibrate('HP')
combustor = ct.IdealGasReactor(gas)
combustor.volume = Vr
# exhaust definition
exhaust = ct.Reservoir(gas)
# mass flow
inlet_mfc = ct.MassFlowController(inlet, combustor, mdot=mdot)
# simulation definition
sim = ct.ReactorNet([combustor])
# Reactor's states array
states = ct.SolutionArray(gas)
#Simulation
sim.set_initial_time(0.0) # reset the integrator
sim.advance(tsim)
states.append(combustor.thermo.state)
V = mdot/combustor.density
Q = -np.sum(states.net_production_rates * states.partial_molar_enthalpies)
Qe = Q*V
t = ign_energy/Qe
mpal = mdot*t
print('masa = {:.2f}; T = {:.1f};'.format(mpal, combustor.T))
# writing results to arrays
mt.append(mpal)
Tt.append(t)
Qet.append(Qe)
Eqt.append(eq_ratio)
eq_ratio += 0.01
print('Qe = {:.2f}; mpal = {:.2f}; t = {:.2f}'.format(Qe, mpal, t))
Q=0.0
mpal=0.0
#plots
f, ax1 = plt.subplots(1,1)
ax1.plot(Eqt, mt, '.-', color='C0')
ax2 = ax1.twinx()
ax1.set_xlabel('equivalence ratio [-]')
ax1.set_ylabel('mixture mass [kg]', color='C0')
ax2.plot(Eqt,Tt, '.-', color='C0')
ax2.set_ylabel('t [s]', color='C0')
f.tight_layout()
plt.show()
|
[
"[email protected]"
] | |
8835421c4159ffa65247218af967d81b9433de71
|
4c4fdd5ce7cbf6bc4b0028c35d24f7b871a0b4d5
|
/guvipairsum.py
|
453e1dc0206c201cdf924864fe3e480e9b14b5fc
|
[] |
no_license
|
Ashiscodebat/lemmecode
|
7df1510e069e851e3f395efec83474813ca20b91
|
4a5dd5377d0c964d03f0e189d5142aece0c730e6
|
refs/heads/master
| 2020-04-01T16:55:00.731129 | 2018-10-22T15:52:27 | 2018-10-22T15:52:27 | 153,403,893 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 824 |
py
|
def checkrep(a,b,dic):
flag = 0
for key,vals in dic.items():
#if key + vals > a + b:
#return 1
if a != key and b != key:
flag = 1
else:
return 0
return 1
n = int(input())
l= []
d = {}
dic = {}
count = 0
an = 0
minsum = 1000000
for i in range(0,n):
s =int(input())
l.append(s)
for i in range(0,n):
for j in range(i+1,n):
if l[i] + l[j] == 0 :
an = checkrep(l[i],l[j],dic)
if an == 1:
d = {l[i]:l[j]}
dic.update(d)
count = count + 1
an = 0
else:
continue
li = []
for key,val in dic.items():
li.extend([key,val])
if li == []:
print("No pair")
exit(1)
for i in range(0,len(li)):
print(li[i],end = " ")
|
[
"[email protected]"
] | |
ab454232840dbe7c1c6602b6ff4d5f9d48c09fe4
|
83d7003aa276549e563c614c59a70603c61402e1
|
/EXERCÍCIOS RESOLVIDOS/python/loops/alg4.py
|
75c0159296723ef6622922e3cbc9529dc21e78cc
|
[] |
no_license
|
mendelson/Algoritmos
|
66c67d6ccce99272da2d0e7bfd3fdee371e08275
|
68e90aa259464ada915e419fb26416203ce5f896
|
refs/heads/master
| 2021-01-19T16:01:31.962672 | 2017-08-21T19:11:04 | 2017-08-21T19:11:04 | 100,981,374 | 0 | 0 | null | 2017-08-21T18:35:24 | 2017-08-21T18:35:24 | null |
UTF-8
|
Python
| false | false | 373 |
py
|
# Algorithm 4
def main():
cont = final = 0
while cont < 2:
grade = float(input("Grade " + str(cont) + ": "))
if grade < 0 or grade > 10:
print("Invalid!")
else:
final = final + grade
cont = cont + 1
media = final / 2
print("Final grande = %.2f" % media)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
764fc0ffc334d6afffff8b3c00492773cafb869d
|
c1c0f44de906af90da1aeafa35c3780320b292ac
|
/build/lib/discogs_wrapper/__init__.py
|
2226d09879d2f6b94b11c62e69289d0b942c5956
|
[] |
no_license
|
Gargeebhase/Discogs-Python-Wrapper
|
696ca79c39d29323bf61635ecba1b7af8873ec67
|
67581337378346b308871310f862d93450e673b6
|
refs/heads/master
| 2023-05-25T05:13:26.132079 | 2020-06-09T04:01:26 | 2020-06-09T04:01:26 | 270,838,242 | 0 | 0 | null | 2023-05-22T20:44:19 | 2020-06-08T21:51:17 |
Python
|
UTF-8
|
Python
| false | false | 96 |
py
|
import os
import requests
session = requests.Session()
session.params = {}
from .dv import DV
|
[
"[email protected]"
] | |
b4ceba382671ec1f1b68a5f95a4f15ae4e570161
|
c428f065a7614751ba525599aa0595f02133fbee
|
/utils/exporters/blender/2.49/threejs_export.py
|
c69782d7b6eb6b498fd6472af0e6a08712e91a89
|
[
"MIT"
] |
permissive
|
jzmudzinski/three.js
|
b4251e7286e14471ae71224ba8db642de497c8f2
|
c186b37be3cd87a0f0aa8e98eb5d88e49b7d519b
|
refs/heads/master
| 2021-01-09T06:18:10.765230 | 2010-12-31T17:59:11 | 2010-12-31T17:59:11 | 1,213,135 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,801 |
py
|
#!BPY
# Based on Anthony D'Agostino (Scorpius)'s Raw Exported provided with Blender
# and on Mr.doob's and Kikko's Blender 2.5a2 exporter
# 'http://mrdoob.com', 'http://github.com/kikko'
"""
Name: 'three.js (.js)...'
Blender: 245
Group: 'Export'
Tooltip: 'Export selected mesh to three.js (.js)'
"""
__author__ = "George Profenza"
__url__ = ("disturb", "disturbmedia.com/blog",
"My blog, http://tomaterial.blogspot.com")
__version__ = "First File Exporter"
__bpydoc__ = """\
Export meshes to mr.doob's three.js 3D Engine.
Currently supports UVs. If the model doesn't display correctly
you might need to reverse some normals/do some cleanup.
More details on the engine here:
https://github.com/mrdoob/three.js
Have fun!
Usage:<br>
Select a mesh to be exported and go to "File->Export->three.js" .
"""
# $Id: raw_export.py 14597 2008-04-28 16:09:17Z campbellbarton $
#
# +---------------------------------------------------------+
# | Copyright (c) 2002 Anthony D'Agostino |
# | http://www.redrival.com/scorpius |
# | [email protected] |
# | April 28, 2002 |
# | Read and write RAW Triangle File Format (*.raw) |
# +---------------------------------------------------------+
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ***** END GPL LICENCE BLOCK *****
import Blender
import BPyMesh
import re
clean = lambda varStr: re.sub('\W|^(?=\d)','_', varStr)
def write(filename):
start = Blender.sys.time()
if not filename.lower().endswith('.js'):
filename += '.js'
scn= Blender.Scene.GetCurrent()
ob= scn.objects.active
if not ob:
Blender.Draw.PupMenu('Error%t|Select 1 active object')
return
file = open(filename, 'wb')
mesh = BPyMesh.getMeshFromObject(ob, None, True, False, scn)
if not mesh:
Blender.Draw.PupMenu('Error%t|Could not get mesh data from active object')
return
mesh.transform(ob.matrixWorld)
#classname = clean(ob.name)
classname = filename.split('/')[-1].replace('.js','')
file = open(filename, "wb")
file.write('var %s = function () {\n\n' % classname)
file.write('\tvar scope = this;\n\n')
file.write('\tTHREE.Geometry.call(this);\n\n')
for v in mesh.verts:
file.write('\tv( %.6f, %.6f, %.6f );\n' % (v.co.x, v.co.z, -v.co.y)) # co
file.write('\n')
for f in mesh.faces:
if len(f.verts) == 3:
file.write('\tf3( %d, %d, %d, %.6f, %.6f, %.6f );\n' % (f.verts[0].index, f.verts[1].index, f.verts[2].index, f.verts[0].no.x, f.verts[0].no.z, -f.verts[0].no.y))
else:
file.write('\tf4( %d, %d, %d, %d, %.6f, %.6f, %.6f );\n' % (f.verts[0].index, f.verts[1].index, f.verts[2].index, f.verts[3].index, f.verts[0].no.x, f.verts[0].no.z, -f.verts[0].no.y))
face_index_pairs = [ (face, index) for index, face in enumerate(mesh.faces)]
file.write('\n')
'''
for f in me.faces:
if me.faceUV:
if len(f.verts) == 3:
file.write('\tuv( %.6f, %.6f, %.6f, %.6f, %.6f, %.6f );\n' % (f.uv[0][0], 1.0-f.uv[0][1], f.uv[1][0], 1.0-f.uv[1][1], f.uv[2][0], 1.0-f.uv[2][1])
'''
for f in mesh.faces:
if mesh.faceUV:
if len(f.verts) == 3:
file.write('\tuv( %.6f, %.6f, %.6f, %.6f, %.6f, %.6f );\n' % (f.uv[0].x, 1.0 - f.uv[0].y, f.uv[1].x, 1.0 - f.uv[1].y, f.uv[2].x, 1.0 - f.uv[2].y))
else:
file.write('\tuv( %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f);\n' % (f.uv[0].x, 1.0 - f.uv[0].y, f.uv[1].x, 1.0 - f.uv[1].y, f.uv[2].x, 1.0 - f.uv[2].y, f.uv[3].x, 1.0 - f.uv[3].y))
file.write('\n')
file.write('\tfunction v( x, y, z ) {\n\n')
file.write('\t\tscope.vertices.push( new THREE.Vertex( new THREE.Vector3( x, y, z ) ) );\n\n')
file.write('\t}\n\n')
file.write('\tfunction f3( a, b, c, nx, ny, nz ) {\n\n')
file.write('\t\tscope.faces.push( new THREE.Face3( a, b, c, nx && ny && nz ? new THREE.Vector3( nx, ny, nz ) : null ) );\n\n')
file.write('\t}\n\n')
file.write('\tfunction f4( a, b, c, d, nx, ny, nz ) {\n\n')
file.write('\t\tscope.faces.push( new THREE.Face4( a, b, c, d, nx && ny && nz ? new THREE.Vector3( nx, ny, nz ) : null ) );\n\n')
file.write('\t}\n\n')
file.write('\tfunction uv( u1, v1, u2, v2, u3, v3, u4, v4 ) {\n\n')
file.write('\t\tvar uv = [];\n')
file.write('\t\tuv.push( new THREE.UV( u1, v1 ) );\n')
file.write('\t\tuv.push( new THREE.UV( u2, v2 ) );\n')
file.write('\t\tuv.push( new THREE.UV( u3, v3 ) );\n')
file.write('\t\tif ( u4 && v4 ) uv.push( new THREE.UV( u4, v4 ) );\n')
file.write('\t\tscope.uvs.push( uv );\n')
file.write('\t}\n\n')
file.write('}\n\n')
file.write('%s.prototype = new THREE.Geometry();\n' % classname)
file.write('%s.prototype.constructor = %s;' % (classname, classname))
file.close()
end = Blender.sys.time()
def main():
Blender.Window.FileSelector(write, 'three.js Export', Blender.sys.makename(ext='.js'))
if __name__=='__main__':
main()
|
[
"[email protected]"
] | |
7b44412ce11d8c6c342152422abcba093327737b
|
3a48cfb0b43fe61f52355a67b2b5700aa8c5ddf2
|
/src/som/interpreter/ast/nodes/message/generic_node.py
|
5cfc38a7257dfdd24617ab9116a1996177084454
|
[
"MIT"
] |
permissive
|
SOM-st/RTruffleSOM
|
ce380d02985b0ef1f41f400409f61377dc3a583e
|
1efc698577830ff3fcd1607e7155d9c6423e8804
|
refs/heads/master
| 2021-01-17T07:25:19.895376 | 2020-12-08T18:56:50 | 2020-12-08T18:56:50 | 17,311,290 | 9 | 2 |
MIT
| 2020-09-02T16:08:31 | 2014-03-01T08:45:25 |
Python
|
UTF-8
|
Python
| false | false | 2,256 |
py
|
from rpython.rlib.debug import make_sure_not_resized
from rpython.rlib.jit import we_are_jitted
from ..dispatch import SuperDispatchNode, UninitializedDispatchNode, send_does_not_understand
from .abstract_node import AbstractMessageNode
class GenericMessageNode(AbstractMessageNode):
_immutable_fields_ = ['_dispatch?']
_child_nodes_ = ['_dispatch']
def __init__(self, selector, universe, rcvr_expr, arg_exprs,
source_section = None):
AbstractMessageNode.__init__(self, selector, universe, rcvr_expr,
arg_exprs, source_section)
if rcvr_expr.is_super_node():
dispatch = SuperDispatchNode(selector, rcvr_expr.get_super_class(),
universe)
else:
dispatch = UninitializedDispatchNode(selector, universe)
self._dispatch = self.adopt_child(dispatch)
def replace_dispatch_list_head(self, node):
self._dispatch.replace(node)
def execute(self, frame):
rcvr, args = self._evaluate_rcvr_and_args(frame)
return self.execute_evaluated(frame, rcvr, args)
def execute_evaluated(self, frame, rcvr, args):
assert frame is not None
assert rcvr is not None
assert args is not None
make_sure_not_resized(args)
if we_are_jitted():
return self._direct_dispatch(rcvr, args)
else:
return self._dispatch.execute_dispatch(rcvr, args)
def _direct_dispatch(self, rcvr, args):
method = self._lookup_method(rcvr)
if method:
return method.invoke(rcvr, args)
else:
return send_does_not_understand(rcvr, self._selector, args, self._universe)
def _lookup_method(self, rcvr):
rcvr_class = self._class_of_receiver(rcvr)
return rcvr_class.lookup_invokable(self._selector)
def _class_of_receiver(self, rcvr):
if self._rcvr_expr.is_super_node():
return self._rcvr_expr.get_super_class()
return rcvr.get_class(self._universe)
def __str__(self):
return "%s(%s, %s)" % (self.__class__.__name__,
self._selector,
self._source_section)
|
[
"[email protected]"
] | |
241dde377505cb18294f1a598634e2f265b9c22a
|
fb208d7a31c1cec334991328c32d57ecfcfbbd9e
|
/models/TFLite/saved_model_to_tflite_float_16.py
|
c52d3da297b40539a680738115de2f5fdfa60135
|
[] |
no_license
|
HanYangZhao/AnimeGAN
|
c9b8dc031098ef9e0af55fddb71886ee6ce8bb8f
|
394a9701af81b4656a9e76b567667ae30f4a0643
|
refs/heads/master
| 2022-09-13T06:16:12.030109 | 2020-05-30T01:14:11 | 2020-05-30T01:14:11 | 262,147,399 | 0 | 1 | null | 2020-05-07T20:05:21 | 2020-05-07T20:05:21 | null |
UTF-8
|
Python
| false | false | 589 |
py
|
# Run with TF 2.0+
import tensorflow as tf
# Convert the model.
model = tf.saved_model.load('./')
concrete_func = model.signatures[
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
'''keep the ratio as close to 3/2 while being divisible by 32'''
concrete_func.inputs[0].set_shape([1, 992, 1504, 3])
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
# converter = tf.lite.TFLiteConverter.from_saved_model('./')
converter.optimization = [tf.lite.Optimize.DEFAULT]
tflite_model = converter.convert()
open("./converted_model.tflite","wb").write(tflite_model)
|
[
"hanzhao"
] |
hanzhao
|
4840d8dc0bb9174550dbc6043c03fcfd6ee37c70
|
78f3af339af51102f9b1312a54ff79604f257e24
|
/ssh_remote.py
|
8cf925b4593f1868cbc66c3eb738da13587daca3
|
[] |
no_license
|
chemila/bin
|
c3b3b1aaf37619800d07d6708bfad61322a8a64a
|
7349e5a1ff0d5095ff65efdc0e663e9c8afeb471
|
refs/heads/master
| 2021-08-16T20:42:44.880705 | 2021-07-22T06:47:17 | 2021-07-22T06:47:17 | 1,201,992 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,559 |
py
|
#!/usr/bin/env python
import subprocess
import ConfigParser
from threading import Thread
from Queue import Queue
import time
"""
A threaded ssh-based command dispatch system
"""
def readConfig(file="/home/ethan/data/config.ini"):
"""Extract IP addresses and CMDS from config file and returns tuple"""
ips = []
cmds = []
Config = ConfigParser.ConfigParser()
Config.read(file)
machines = Config.items("MACHINES")
commands = Config.items("COMMANDS")
for ip in machines:
ips.append(ip[1])
for cmd in commands:
cmds.append(cmd[1])
return ips, cmds
def launcher(i,q, cmd):
"""Spawns command in a thread to an ip"""
while True:
#grabs ip, cmd from queue
ip = q.get()
print "Thread %s: Running %s to %s" % (i, cmd, ip)
subprocess.call("ssh root@%s %s" % (ip, cmd), shell=True)
q.task_done()
if __name__ == '__main__':
start = time.time()
queue = Queue()
#grab ips and cmds from config
ips, cmds = readConfig()
#Determine Number of threads to use, but max out at 25
if len(ips) < 25:
num_threads = len(ips)
else:
num_threads = 25
#Start thread pool
for i in range(num_threads):
for cmd in cmds:
worker = Thread(target=launcher, args=(i, queue,cmd))
worker.setDaemon(True)
worker.start()
print "Main Thread Waiting"
for ip in ips:
queue.put(ip)
queue.join()
end = time.time()
print "Dispatch Completed in %s seconds" % end - start
|
[
"[email protected]"
] | |
efc4ec62a0ebada56728b089bbcba09c1c3a43e0
|
3dc3070b33f1355aa3fa589e3f029c45f2af14e2
|
/Chapter 2/02_05_hangman_play.py
|
a197409f3d8ca8aef75db094a596564c6608bf28
|
[] |
no_license
|
alec-rabold/Intro-Python-Projects
|
5a9f297585576d0bfd8688e70e0df469e78b786c
|
6d0766d97242e1056a07beeb693cda7178ea7184
|
refs/heads/master
| 2021-01-23T01:33:50.078305 | 2017-06-01T16:57:28 | 2017-06-01T16:57:28 | 92,879,268 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 592 |
py
|
import random
words = ['chicken', 'dog', 'cat', 'mouse', 'frog']
lives_remaining = 14
def play():
word = pick_a_word()
while True:
guess = get_guess(word)
if process_guess(guess,word):
print('You win!')
break
if lives_remaining == 0:
print('You are hanged!')
print('The word was: ' + word)
break
def pick_a_word():
word_position = random.randint(0, len(words) - 1)
return words[word_position]
def get_guess(word):
return 'a'
def process_guess(guess,word):
global lives_remaining
lives_remaining = lives_remaining - 1
return False
play()
|
[
"[email protected]"
] | |
e215e64f4961ac65de0386bc2f4edbb5f03c7cb4
|
ad432645cc8e08f5e4aeb24b70a075511e740cc5
|
/shp/views.py
|
1713a9305e5399c6d77f4852599a4ee92686564d
|
[] |
no_license
|
SamyakLuitel/geodjango
|
51bbf94bc23a072fe27eb3eb6b4be5694a16f7dc
|
c1f4a3e606c62c69a5401501b70a8d38e9ef352a
|
refs/heads/main
| 2023-04-28T06:20:30.587728 | 2021-05-26T15:27:07 | 2021-05-26T15:27:07 | 345,569,513 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 375 |
py
|
from django.shortcuts import render
from django.core.serializers import serialize
from django.http import HttpResponse
from .models import ShpNepal
# Create your views here.
def shp(request):
shpData = serialize('geojson',ShpNepal.objects.all())
return HttpResponse(shpData, content_type = 'geojson')
def index(request):
return render(request,'shp/index.html')
|
[
"[email protected]"
] | |
ff1f7ab5f78639ef2e3a0814d0360331c288f048
|
2d29197477e227070c07110183b31812f53c07a0
|
/books/apps.py
|
97ac2d91eab931ff613a714e85a82f6f15e90db3
|
[] |
no_license
|
manish2074/book_management
|
8d6a5fb9487a194cb5eb61948b6b09b626289a25
|
13fd5de24cc25bc82c9cd7a79f082d10e9f3edea
|
refs/heads/master
| 2021-09-10T06:00:17.355923 | 2020-03-30T04:34:55 | 2020-03-30T04:34:55 | 242,757,393 | 0 | 0 | null | 2021-09-08T01:48:07 | 2020-02-24T14:28:16 |
JavaScript
|
UTF-8
|
Python
| false | false | 157 |
py
|
from django.apps import AppConfig
class BooksConfig(AppConfig):
name = 'books'
def ready(self):
from books.signals import language_changed
|
[
"[email protected]"
] | |
b99ac23720342baf031a4813dfb3e5a9682e6cfa
|
57ab38dfeb1e6e5c66dddbb7dd5122ec69229c73
|
/meetings/forms.py
|
cfa5df507a79f57dbe7f654ef4eca7c04ce5ac2e
|
[] |
no_license
|
japial/meet-django
|
97ed37c217af3afb881f084aaaec432b4b435c81
|
4e02daa21079f385ff12537bd4de516da65e3f4e
|
refs/heads/master
| 2022-12-14T23:01:13.531260 | 2020-09-05T14:53:17 | 2020-09-05T14:53:17 | 292,758,322 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 686 |
py
|
from datetime import date
from django.core.exceptions import ValidationError
from django.forms import ModelForm, DateInput, TimeInput, TextInput
from meetings.models import Meeting
class MeetingForm(ModelForm):
class Meta:
model = Meeting
fields = '__all__'
widgets = {
'date': DateInput(attrs={'type': 'date'}),
'time': TimeInput(attrs={'type': 'time'}),
'duration': TextInput(attrs={'type': 'number', 'min': '1', 'max': '4'})
}
def clean_date(self):
d = self.cleaned_data.get('date')
if d < date.today():
raise ValidationError('Meeting can not be in past')
return d
|
[
"[email protected]"
] | |
5c8711ec3d10513cade5507c7c98f752e3a04760
|
9b60c4cd95949db6771bcd5581b1207d06776cb0
|
/mdadm/source/default.py
|
0358f93b130e655301a5f87afc201a30bdbb1aed
|
[] |
no_license
|
OldSchoolOnline/openelec-addons
|
251e9112d825228d0362a5a67af3312f36db8f4e
|
377f46a63f6ca9cdc284fb9571b901308676a4fa
|
refs/heads/master
| 2020-06-01T18:33:37.340599 | 2013-04-05T15:19:34 | 2013-04-05T15:19:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,150 |
py
|
################################################################################
# This file is part of OpenELEC - http://www.openelec.tv
# Copyright (C) 2009-2011 Stephan Raue ([email protected])
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenELEC.tv; see the file COPYING. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
################################################################################
import os
import sys
import xbmcaddon
__scriptname__ = "mdadm is a Linux utility used to manage software RAID devices."
|
[
"[email protected]"
] | |
954a403a9df50306bafb2a28b887c0d65ca1b1b6
|
2b460c7598914e0fc70515619f19670a44f9ba90
|
/backend/test_flaskr.py
|
0a854efea85a581352710a1687150944f8f528b0
|
[] |
no_license
|
vijaybaliah/trivia
|
0fdd5e0c95031c96b58ddb58583355c1ee7371cf
|
a9396d54188517457e8844ba8d37a7e228101d9f
|
refs/heads/master
| 2023-02-07T17:47:20.443689 | 2019-11-12T19:57:17 | 2019-11-12T19:57:17 | 217,876,153 | 0 | 0 | null | 2023-02-02T06:40:41 | 2019-10-27T15:34:52 |
Python
|
UTF-8
|
Python
| false | false | 5,438 |
py
|
import os
import unittest
import json
from flask_sqlalchemy import SQLAlchemy
from flaskr import create_app
from models import setup_db, Question, Category
class TriviaTestCase(unittest.TestCase):
"""This class represents the trivia test case"""
def setUp(self):
"""Define test variables and initialize app."""
self.app = create_app()
self.client = self.app.test_client
self.database_name = "trivia_test"
self.database_path = "postgres://{}:{}@{}/{}".format('postgres', 'password', 'localhost:5432', self.database_name)
setup_db(self.app, self.database_path)
self.code_success = 200
self.code_not_found = 404
self.code_unprocessable = 422
# binds the app to the current context
with self.app.app_context():
self.db = SQLAlchemy()
self.db.init_app(self.app)
# create all tables
self.db.create_all()
def tearDown(self):
"""Executed after reach test"""
pass
def format_response(self, response):
return json.loads(response.data)
"""
TODO
Write at least one test for each test for successful operation and for expected errors.
"""
def test_get_categories(self):
res = self.client().get('/categories')
data = self.format_response(res)
self.assertEqual(res.status_code, self.code_success)
self.assertTrue(data['success'])
def test_get_questions(self):
res = self.client().get('/questions')
data = self.format_response(res)
self.assertDictEqual(data['data']['categories'][0],
{'id': 1, 'type': 'Science'}
)
self.assertEqual(res.status_code, self.code_success)
self.assertTrue(data['success'])
def test_404_get_questions(self):
res = self.client().get('/questions?page=100')
data = self.format_response(res)
self.assertEqual(res.status_code, self.code_not_found)
self.assertFalse(data['success'])
def test_delete_question(self):
res = self.client().delete('/questions/3')
data = self.format_response(res)
self.assertEqual(res.status_code, self.code_success)
self.assertTrue(data['success'])
def test_404_delete_question(self):
res = self.client().delete('/questions/24')
data = self.format_response(res)
self.assertEqual(res.status_code, self.code_not_found)
self.assertFalse(data['success'])
def test_add_question(self):
mock_data = {
'question': 'test',
'answer': 'test2',
'difficulty': 1,
'category_id': 1
}
res = self.client().post('/questions', json=mock_data)
data = self.format_response(res)
self.assertEqual(data['data']['question'], mock_data['question'])
self.assertEqual(res.status_code, self.code_success)
self.assertTrue(data['success'])
def test_404_add_question(self):
mock_data = {
'difficulty': 1,
'category_id': 1
}
res = self.client().post('/questions', json=mock_data)
data = self.format_response(res)
self.assertEqual(res.status_code, self.code_not_found)
self.assertFalse(data['success'])
def test_search_questions(self):
mock_data = {
'search_term': 'test'
}
res = self.client().post('/questions/search', json=mock_data)
data = self.format_response(res)
self.assertRegex(data['data'][0]['question'], mock_data['search_term'])
self.assertEqual(res.status_code, self.code_success)
self.assertTrue(data['success'])
def test_404_search_questions(self):
mock_data = {
}
res = self.client().post('/questions/search', json=mock_data)
data = self.format_response(res)
self.assertEqual(res.status_code, self.code_not_found)
self.assertFalse(data['success'])
def test_get_questions_by_category_id(self):
res = self.client().get('/categories/1/questions')
data = self.format_response(res)
self.assertEqual(res.status_code, self.code_success)
self.assertTrue(data['success'])
self.assertTrue(len(data['data']))
def test_404_get_questions_by_category_id(self):
res = self.client().get('/categories/100/questions')
data = self.format_response(res)
self.assertEqual(res.status_code, self.code_not_found)
self.assertFalse(data['success'])
def test_get_quiz_question(self):
mock_data = {
"quiz_category": {
"id": "1"
},
"previous_questions": []
}
res = self.client().post('/quizzes', json=mock_data)
data = self.format_response(res)
self.assertEqual(res.status_code, self.code_success)
self.assertTrue(data['success'])
self.assertTrue(len(data['data']))
def test_404_get_quiz_question(self):
mock_data = {
"quiz_category": {
"id": "1"
}
}
res = self.client().post('/quizzes', json=mock_data)
data = self.format_response(res)
self.assertEqual(res.status_code, self.code_not_found)
self.assertFalse(data['success'])
# Make the tests conveniently executable
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
0cb0301d89aba7935aed0f12f790f27edd0c395d
|
6afa8213eb2f583ce3d31d0b2f86adf783694c17
|
/dagsage.py
|
1a0b907eade51ea9ab242247c4b4e8be1da895b5
|
[] |
no_license
|
DLwbm123/GraphEnas
|
cd2e5523d8a67ac04b2671540affc9d45ae58915
|
cc8c0913832a33f7e730b1e7d0709300b8e7ac65
|
refs/heads/master
| 2022-01-07T16:30:06.684908 | 2019-08-11T20:53:05 | 2019-08-11T20:53:05 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,837 |
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from fuzzy_relu import fuzzy_relu
ATTENTION_ITERATIONS = 3
class DagSage(nn.Module):
"""
A graphsage inspired convolution on a directional acyclic graph
"""
def __init__(self, input_dim, output_dim, representation_size, attention_iterations=ATTENTION_ITERATIONS):
# input_dim: size of vector representation of incoming nodes
# output_dim: size of node output dimension per node
# representation_size: size of internal hidden layers
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.representation_size = representation_size
self.attention_iterations = attention_iterations
self.node_self_rep = nn.Linear(input_dim, representation_size)
self.src_representation = nn.Linear(output_dim, representation_size)
self.src_keys = nn.Linear(output_dim, representation_size)
self.forget_trans = nn.Linear(representation_size*4, representation_size)
self.hidden_trans = nn.Linear(representation_size*4, representation_size)
self.query_gen = nn.Linear(representation_size*3, representation_size)
self.out_trans = nn.Linear(representation_size*3, output_dim)
def cuda(self):
self.node_self_rep = self.node_self_rep.cuda()
self.src_representation = self.src_representation.cuda()
self.src_keys = self.src_keys.cuda()
return self
def forward(self, nodes_adj):
# nodes_adj[0]: (batch, node, vector) - node representations
# nodes_adj[1]: (batch, node^2)
# nodes_adj[1] is a directional adjacency matrix
batch = nodes_adj[0].shape[0]
node_id_rep = self.node_self_rep(nodes_adj[0])
num_nodes = nodes_adj[0].shape[1]
out_nodes = torch.zeros((batch, num_nodes, self.output_dim))
src_conn = nodes_adj[1]
for i in range(num_nodes):
hidden = torch.zeros((batch, self.representation_size))
src_rep = self.src_representation(out_nodes)
src_keys = self.src_keys(out_nodes)
query = torch.zeros((batch, self.representation_size))
# Minimal gated unit
for j in range(self.attention_iterations):
inp_att = torch.einsum('bk,bik->bi', query, src_keys)
# Negate by max attention match to reduce chance of max clipping
inp_att = (inp_att.t()-torch.max(inp_att, dim=1).values).t()
inp_att = torch.exp(inp_att) # Softmax attention
inp_att *= src_conn[:,:,i]
inv_sum = 1/torch.sum(inp_att, dim=1)
inp_att = torch.einsum('bi,b->bi', inp_att, inv_sum)
query_res = torch.einsum('bi,biv->bv', inp_att, src_rep)
forget = self.forget_trans(torch.cat((query_res, hidden, node_id_rep[:,i], query), dim=1))
forget = torch.sigmoid(forget)
hidden = forget*hidden + (1-forget)*torch.tanh(self.hidden_trans(torch.cat(query_res, forget*hidden, node_id_rep[:,i], query), dim=1))
query = self.query_gen(torch.cat((hidden, node_id_rep[:,i], query), dim=1))
out_nodes[:,i] = self.out_trans(torch.cat((hidden, node_id_rep[:,i], query), dim=1))
return (out_nodes, nodes_adj[1])
class ReverseDagSage(nn.Module):
def __init__(self, *args, **kwargs):
self.dagsage = DagSage(*args, **kwargs)
def cuda(self):
self.dagsage = self.dagsage.cuda()
return self
def forward(self, node_adj):
reversed_nodes = torch.flip(node_adj[0], (1,))
reversed_adjs = torch.flip(node_adj[1], (0,1))
res = self.dagsage((reversed_nodes, reversed_adjs))
return (torch.flip(res[0], (1,)), node_adj[1])
|
[
"[email protected]"
] | |
3d35ce4bb4ccdf9aece71f129c8f1245dec4ba05
|
e6787a3d92d3dd69cea480fef4ccf992cb46c8b1
|
/numguess.py
|
6cc2c998cba3fc81d997a08ce7578ba4c4cdb31e
|
[
"MIT"
] |
permissive
|
JuHyeong-K/numguess-flow
|
bc3f7ec339645c4ad54c9dac6ab57a6790efb989
|
ab6fdeda9a6dcc03f28d332098459e72143387f3
|
refs/heads/main
| 2023-02-22T21:40:28.775412 | 2021-01-29T05:42:54 | 2021-01-29T05:42:54 | 334,043,206 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 241 |
py
|
import random
num = random.randint(0, 50)
while True:
user_guess = int(input("Guess the number: "))
if user_guess == num:
print("Correct! Congraturation!")
break
else:
print("you are wrong! Try again!")
|
[
"[email protected]"
] | |
bf181f4074f25fc1a743940bbca6e1c67d5dd96d
|
219e279c1607256f27c0f44ddbd8ed86bf88b48e
|
/lesson4/string_functions.py
|
349bf118b9343352d81fb0cffc31dc701444145c
|
[] |
no_license
|
PanBohdan/ITEA_Python_Basics
|
dfdaee4b2b1388a75e0b9b38a5394d1d91a8f6b5
|
41be881fd00c247e15855b22110f6e4fb8900356
|
refs/heads/master
| 2020-06-01T20:00:27.693189 | 2019-07-21T13:01:39 | 2019-07-21T13:01:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,996 |
py
|
# Без использования методов строк, напишите реализацию таких методов строк:
# replace, split, find. Напишите функцию remove по индексу и по подстроке.
def find(inp_data, what_to_find):
index = 0
if what_to_find in inp_data:
c = what_to_find[0]
for i in inp_data:
if i == c:
if inp_data[index:index+len(what_to_find)] == what_to_find:
return index
index += 1
return -1
def split(inp_data, splitter):
list_of_data = []
temp_string = ''
for i in inp_data:
if i == splitter:
list_of_data.append(temp_string)
temp_string = ''
else:
temp_string += i
if temp_string:
list_of_data.append(temp_string)
return list_of_data
def replace(inp_data, what_to_replace, replace_with):
new_data = ''
index = 0
temp_index = 0
temp_list = []
for i in inp_data:
temp_list.append(i)
final_destination = len(what_to_replace)
for i in range(len(inp_data)-len(what_to_replace)+1):
temp_index += 1
for j in what_to_replace:
if inp_data[index+i] == j:
index += 1
if index >= final_destination:
if len(what_to_replace) == len(replace_with):
for l in range(final_destination):
temp_list[temp_index+l-1] = replace_with[l]
for temp in temp_list:
new_data += temp
return new_data
if len(what_to_replace) > len(replace_with):
for l in range(final_destination):
if l < len(replace_with):
temp_list[temp_index+l-1] = replace_with[l]
else:
temp_list.pop(l+index)
for temp in temp_list:
new_data += temp
return new_data
if len(what_to_replace) < len(replace_with):
for k in range(len(replace_with)):
if k < final_destination:
temp_list[temp_index+k-1] = replace_with[k]
elif k >= final_destination:
temp_list.insert(k+temp_index-1,
replace_with[k])
for temp in temp_list:
new_data += temp
return new_data
elif inp_data[index+i] != j:
index = 0
break
return new_data
if __name__ == '__main__':
data = 'Hey! Hello world. Help! 1 2 3'
print(find(data, 'Hello'))
print(split(data, ' '))
print(replace(data, 'Hey!', 'Hello!'))
|
[
"[email protected]"
] | |
e1f076a8b40ac225debbdfe4c6812b58dabf08a9
|
ef74d9ad851021bcb0ed12880e14269b6ed7f617
|
/Sample/Doudizhu/Server/src/ZyGames.Doudizhu.HostServer/PyScript/Action/Action12001.py
|
7d60a50f0c7331ca1c254a61ca9b33c5de93279d
|
[
"BSD-2-Clause-Views",
"MIT"
] |
permissive
|
sunyuping/Scut
|
b5e5798e9b519941f0ac3a08a3263dc0f45beb47
|
ec2ea35c0e4de1f2da49c50d14e119a4f17cd93a
|
refs/heads/master
| 2020-12-25T23:19:26.597830 | 2013-11-16T07:50:01 | 2013-11-16T07:50:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,188 |
py
|
import clr, sys
from action import *
from System import *
from mathUtils import MathUtils
clr.AddReference('ZyGames.Framework');
clr.AddReference('ZyGames.Framework.Common');
clr.AddReference('ZyGames.Framework.Game');
clr.AddReference('ZyGames.Doudizhu.Bll');
clr.AddReference('ZyGames.Doudizhu.Model');
clr.AddReference('ZyGames.Doudizhu.Lang');
from System.Collections.Generic import *
from ZyGames.Framework.SyncThreading import *
from ZyGames.Framework.Common import *
from ZyGames.Framework.Game.Cache import *
from ZyGames.Framework.Game.Com.Rank import *
from ZyGames.Framework.Game.Service import *
from ZyGames.Doudizhu.Bll import *
from ZyGames.Doudizhu.Bll.Logic import *
from ZyGames.Doudizhu.Bll.Com.Chat import *
from ZyGames.Doudizhu.Lang import *
from ZyGames.Doudizhu.Model import *
from ZyGames.Framework.Cache.Generic import *
from ZyGames.Framework.Game.Runtime import *
from ZyGames.Framework.Cache import *
#12001_转盘界面接口
class UrlParam(HttpParam):
def __init__(self):
HttpParam.__init__(self)
class ActionResult(DataResult):
def __init__(self):
DataResult.__init__(self)
self.IsFree = 0
self.FreeNum = 0
self.DailList = List[DialInfo]
self.UserCoin = 0
self.UserGold = 0
def getUrlElement(httpGet, parent):
urlParam = UrlParam()
if True:
urlParam.Result = True
else:
urlParam.Result = False
return urlParam
def takeAction(urlParam, parent):
actionResult = ActionResult()
userId = parent.Current.User.PersonalId;
user = parent.Current.User
gameRoom = GameRoom.Current
dailyFreeNum = ConfigEnvSet.GetInt("User.DailyFreeNum", 3);
useNum = 0
userRestrain = GameDataCacheSet[UserDailyRestrain]().FindKey(userId)
if userRestrain!=None:
gameRoom.RefreshRestrain(userRestrain)
if userRestrain.RestrainProperty!= None:
useNum = userRestrain.RestrainProperty.DialFreeNum
if dailyFreeNum > useNum:
actionResult.FreeNum = MathUtils.Subtraction(dailyFreeNum,useNum)
else:
actionResult.IsFree = 1;
actionResult.DailList = ConfigCacheSet[DialInfo]().FindAll();
actionResult.UserCoin = user.GameCoin
gameHall = GameHall(user)
actionResult.UserGold = gameHall.UserGold
#需要实现
return actionResult
def buildPacket(writer, urlParam, actionResult):
postion = 0
writer.PushShortIntoStack(actionResult.IsFree)
writer.PushIntoStack(actionResult.FreeNum)
writer.PushIntoStack(len(actionResult.DailList))
for info in actionResult.DailList:
postion = MathUtils.Addition(postion, 1);
Probability = PythonHelper.TransformString(info.Probability)
dsItem = DataStruct()
dsItem.PushIntoStack(postion)
dsItem.PushIntoStack(MathUtils.ToNotNullString(info.HeadID))
dsItem.PushIntoStack(MathUtils.ToNotNullString(Probability))
dsItem.PushIntoStack(MathUtils.ToNotNullString(info.ItemDesc))
dsItem.PushIntoStack(info.GameCoin)
writer.PushIntoStack(dsItem)
writer.PushIntoStack(actionResult.UserCoin)
writer.PushIntoStack(actionResult.UserGold)
return True
|
[
"[email protected]"
] | |
c7b9c6378a3dd842cabaa7d5fb31214631d710ee
|
c02b157399f2ede41abf5119e57f94bfe18c713d
|
/merc/__init__.py
|
d8fed445f0d9439793cf5d9c80b0de7600943748
|
[
"MIT"
] |
permissive
|
merc-devel/merc
|
b366befb6285af984c2da7eabdd1063f16e0414e
|
15e010db2474b5d9f9720fc83983b03c95063a02
|
refs/heads/master
| 2021-01-18T17:15:33.553125 | 2014-11-08T03:02:30 | 2014-11-08T03:02:30 | 25,289,852 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 57 |
py
|
from merc import util
__version__ = util.get_version()
|
[
"[email protected]"
] | |
20a27700d4d3cca86f523401878a5f6df4caa327
|
5a5b66513e6b8e77d49662392868452deafeeef3
|
/reasoner/rl_agent/write_test_triplestore_1.py
|
914a126cb3b9968a6f4464166069d7821e66759c
|
[] |
no_license
|
broadinstitute/reasoner
|
e844c6dabbb9524e9b060b0f48095ecb628498c8
|
f78fffb771c482bd65d14814bff39e85b4cbac17
|
refs/heads/master
| 2021-06-06T23:50:36.183081 | 2020-06-19T17:31:06 | 2020-06-19T17:31:06 | 111,457,986 | 5 | 1 | null | 2020-06-19T17:31:07 | 2017-11-20T20:12:42 |
Python
|
UTF-8
|
Python
| false | false | 2,140 |
py
|
import os
import csv
import re
import numpy as np
from reasoner.knowledge_graph.KnowledgeGraph import KnowledgeGraph
np.random.seed(439572)
def get_preferred_label(labels):
if 'Drug' in labels:
return('Drug')
elif 'Disease' in labels:
return('Disease')
else:
return("ChebiTerm")
outfolder = 'translator_test_1'
kg = KnowledgeGraph()
result = kg.query("""
MATCH path = (dr:Drug)-[:HAS_ROLE]->(t:ChebiTerm)--(dis:Disease)--(dr)
UNWIND relationships(path) as r
RETURN startNode(r) as start, r, endNode(r) as end
""")
graph_triples = []
target_triples = []
for record in result:
start_term = get_preferred_label(record['start'].labels) + '_' + re.sub(r'[ ,\'-]', "",record['start']['name'])
end_term = get_preferred_label(record['end'].labels) + '_' + re.sub(r'[ ,\'-]', "",record['end']['name'])
relation = record['r'].type
if relation == 'HAS_INDICATION':
target_triples.append([start_term, relation, end_term])
else:
graph_triples.append([start_term, relation, end_term])
# split target triples into training and test set
n_targets = len(target_triples)
randidx = np.random.permutation(n_targets)
training_idx, test_idx = randidx[:np.floor(n_targets/2).astype(int)], randidx[np.floor(n_targets/2).astype(int):]
train_triples = [target_triples[i] for i in training_idx]
test_triples = [target_triples[i] for i in test_idx]
dev_triples = train_triples[1:50]
os.mkdir(outfolder)
graphfile = os.path.join(outfolder, 'graph.txt')
trainfile = os.path.join(outfolder, 'train.txt')
testfile = os.path.join(outfolder, 'test.txt')
devfile = os.path.join(outfolder, 'dev.txt')
with open(graphfile, 'w') as f:
writer = csv.writer(f, delimiter='\t')
writer.writerows(graph_triples + train_triples)
with open(trainfile, 'w') as f:
writer = csv.writer(f, delimiter='\t')
writer.writerows(train_triples)
with open(testfile, 'w') as f:
writer = csv.writer(f, delimiter='\t')
writer.writerows(test_triples)
with open(devfile, 'w') as f:
writer = csv.writer(f, delimiter='\t')
writer.writerows(dev_triples)
|
[
"[email protected]"
] | |
d90edc51fc306fd126b52ad7d183e14880a9e586
|
ec985f74562c245c5c7b0772536c723df5d53b1f
|
/wordcount/views.py
|
c0bb1a7282b8583203f18619e35aab5c572d3901
|
[] |
no_license
|
joycezhao23/wordcount-project
|
b4212b2f482b23a9f16d59368a78c2a2a2194fa4
|
965ce988b75a65af1058e0ba9047781060e88317
|
refs/heads/master
| 2020-03-14T15:00:01.318324 | 2018-05-01T01:48:43 | 2018-05-01T01:48:43 | 131,666,191 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 660 |
py
|
from django.http import HttpResponse
from django.shortcuts import render
import operator
def home(request):
return render(request, 'home.html')
def about(request):
return render(request, 'about.html')
def count(request):
fulltext = request.GET['fulltext']
wordList = fulltext.split()
wordDict = {}
for word in wordList:
if word in wordDict:
wordDict[word] += 1
else:
wordDict[word] = 1
sortedWords = sorted(wordDict.items(), key=operator.itemgetter(1), reverse=True)
return render(request, 'count.html', {'fulltext': fulltext, 'count': len(wordList), 'sortedWords': sortedWords})
|
[
"[email protected]"
] | |
904780b13a2b9bff1a93d620fd8790a37652d91f
|
d822501d09895216da854f2db8227164775970bd
|
/Functions/palindrome.py
|
ff198055c64c23fc99c5310034c3c4b9e3b3a87c
|
[] |
no_license
|
Jitendrap1702/Coding_Ninjas_Intro_to_Python
|
8bf7418b53d9e4ab1146950d4fef46a51cc33157
|
44371c5595c7507de0a1b4c0e596664d34c026c4
|
refs/heads/master
| 2022-12-10T22:07:27.213614 | 2020-09-06T16:00:24 | 2020-09-06T16:00:24 | 293,270,812 | 1 | 1 | null | 2020-09-06T13:48:35 | 2020-09-06T12:19:30 |
Python
|
UTF-8
|
Python
| false | false | 225 |
py
|
def checkPalindrome(num):
k=str(num)
if k==k[::-1]:
return True
else:
return False
num = int(input())
isPalindrome = checkPalindrome(num)
if(isPalindrome):
print('true')
else:
print('false')
|
[
"[email protected]"
] | |
e2953a819236184ccc205f4be1d4e16384d5849d
|
6725e264bbeaa56e7da830d6f13f60d7aa2607a3
|
/main.py
|
31ee3e2ef38ffd98c318427187043862ee6f737b
|
[] |
no_license
|
nixiaocang/uploadapp
|
5403d1ef4b24088c4e16dafd4762764458685758
|
24587d7fe3833eed6ce83d418b20575eace13bb7
|
refs/heads/master
| 2021-01-17T11:56:35.625505 | 2017-03-06T09:12:58 | 2017-03-06T09:12:58 | 84,052,720 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,849 |
py
|
#!/usr/bin/env python
#coding=utf-8
import wx
from login import LoginHelper
from upload import UploadHelper
from add import Addhelper
class MyFrame(wx.Frame):
def __init__(self, parent=None, title=u'BDP文件传输工具 v0.0.1'):
wx.Frame.__init__(self, parent, -1, title=title)
self.panel = wx.Panel(self, style=wx.TAB_TRAVERSAL | wx.CLIP_CHILDREN | wx.FULL_REPAINT_ON_RESIZE)
#增加一些控件:用户名密码部分,并使用GridBagSizer来管理这些控件
self.label1=wx.StaticText(self.panel,-1,label=u'企业域:')
self.label2=wx.StaticText(self.panel,-1,label=u'用户名:')
self.label3=wx.StaticText(self.panel,-1,label=u'密 码:')
self.domainText=wx.TextCtrl(self.panel,-1,size=(200,25))
self.userText=wx.TextCtrl(self.panel,-1,size=(200,25))
self.passText=wx.TextCtrl(self.panel,-1,size=(200,25), style=wx.TE_PASSWORD)
self.loginBtn=wx.Button(self.panel,1,label=u'登录')
self.gbsizer1=wx.GridBagSizer(hgap=10, vgap=10)
self.gbsizer1.Add(self.label1,pos=(2,5),span=(1,1),flag=wx.ALIGN_RIGHT|wx.ALIGN_CENTRE_VERTICAL)
self.gbsizer1.Add(self.domainText,pos=(2,6),span=(1,1),flag=wx.EXPAND)
self.gbsizer1.Add(self.label2,pos=(3,5),span=(1,1),flag=wx.EXPAND)
self.gbsizer1.Add(self.userText,pos=(3,6),span=(1,1),flag=wx.EXPAND)
self.gbsizer1.Add(self.label3,pos=(4,5),span=(1,1),flag=wx.EXPAND)
self.gbsizer1.Add(self.passText,pos=(4,6),span=(1,1),flag=wx.EXPAND)
self.gbsizer1.Add(self.loginBtn,pos=(5,6),span=(1,1),flag=wx.EXPAND)
#增加一些控件:最下方的按钮,并使用水平方向的BoxSizer来管理这些控件
#给"服务器设置"按钮绑定事件处理器
self.loginBtn.Bind(wx.EVT_BUTTON,self.OnTouch)
# 定义第二面板
self.dlabel=wx.StaticText(self.panel,-1,label=u'数据源名称:')
self.dtext=wx.TextCtrl(self.panel,-1,size=(200,25))
self.tlabel=wx.StaticText(self.panel,-1,label=u'表名称:')
self.ttext=wx.TextCtrl(self.panel,-1,size=(200,25))
self.flabel=wx.StaticText(self.panel,-1,label=u'文件路径:')
self.ftext=wx.TextCtrl(self.panel,-1,size=(200,25))
self.slabel=wx.StaticText(self.panel,-1,label=u'分隔符:')
self.stext=wx.TextCtrl(self.panel,-1,size=(200,25))
self.nlabel=wx.StaticText(self.panel,-1,label=u'空值:')
self.ntext=wx.TextCtrl(self.panel,-1,size=(200,25))
self.putBtn=wx.Button(self.panel,1,label=u'上传')
self.rlabel=wx.StaticText(self.panel,-1,label=u'处理结果:')
self.rtext=wx.TextCtrl(self.panel,-1,size=(200,100), style=wx.TE_MULTILINE)
self.gbsizer2=wx.GridBagSizer(hgap=10, vgap=10)
self.gbsizer2.Add(self.dlabel,pos=(1,5),span=(1,1),flag=wx.ALIGN_RIGHT|wx.ALIGN_CENTRE_VERTICAL)
self.gbsizer2.Add(self.dtext,pos=(1,6),span=(1,1),flag=wx.EXPAND)
self.gbsizer2.Add(self.tlabel,pos=(2,5),span=(1,1),flag=wx.EXPAND)
self.gbsizer2.Add(self.ttext,pos=(2,6),span=(1,1),flag=wx.EXPAND)
self.gbsizer2.Add(self.flabel,pos=(3,5),span=(1,1),flag=wx.EXPAND)
self.gbsizer2.Add(self.ftext,pos=(3,6),span=(1,1),flag=wx.EXPAND)
self.gbsizer2.Add(self.slabel,pos=(4,5),span=(1,1),flag=wx.EXPAND)
self.gbsizer2.Add(self.stext,pos=(4,6),span=(1,1),flag=wx.EXPAND)
self.gbsizer2.Add(self.nlabel,pos=(5,5),span=(1,1),flag=wx.EXPAND)
self.gbsizer2.Add(self.ntext,pos=(5,6),span=(1,1),flag=wx.EXPAND)
self.gbsizer2.Add(self.putBtn,pos=(6,6),span=(1,1),flag=wx.EXPAND)
self.gbsizer2.Add(self.rlabel,pos=(8,5),span=(1,1),flag=wx.EXPAND)
self.gbsizer2.Add(self.rtext,pos=(8,6),span=(1,1),flag=wx.EXPAND)
self.putBtn.Bind(wx.EVT_BUTTON,self.OnPut)
self.gbsizer3=wx.GridBagSizer(hgap=10, vgap=10)
self.retryBtn=wx.Button(self.panel,1,label=u'重试')
self.gbsizer3.Add(self.retryBtn,pos=(1,10),span=(1,1),flag=wx.EXPAND)
self.retryBtn.Bind(wx.EVT_BUTTON,self.RetyPut)
self.gbsizer4=wx.GridBagSizer(hgap=10, vgap=10)
self.createBtn=wx.Button(self.panel,1,label=u'新建')
self.appendBtn=wx.Button(self.panel,1,label=u'追加')
self.gbsizer4.Add(self.createBtn,pos=(1,10),span=(1,1),flag=wx.EXPAND)
self.gbsizer4.Add(self.appendBtn,pos=(2,10),span=(1,1),flag=wx.EXPAND)
self.createBtn.Bind(wx.EVT_BUTTON,self.create)
self.appendBtn.Bind(wx.EVT_BUTTON,self.append)
# 定义追加面板
self.dsid_label=wx.StaticText(self.panel,-1,label=u'数据源id:')
self.dsid_text=wx.TextCtrl(self.panel,-1,size=(200,25))
self.tbid_label=wx.StaticText(self.panel,-1,label=u'工作表id:')
self.tbid_text=wx.TextCtrl(self.panel,-1,size=(200,25))
self.adpath_label=wx.StaticText(self.panel,-1,label=u'文件路径:')
self.adpath_text=wx.TextCtrl(self.panel,-1,size=(200,25))
self.ads_label=wx.StaticText(self.panel,-1,label=u'分隔符:')
self.ads_text=wx.TextCtrl(self.panel,-1,size=(200,25))
self.adn_label=wx.StaticText(self.panel,-1,label=u'空值:')
self.adn_text=wx.TextCtrl(self.panel,-1,size=(200,25))
self.addBtn=wx.Button(self.panel,1,label=u'上传')
self.arlabel=wx.StaticText(self.panel,-1,label=u'处理结果:')
self.artext=wx.TextCtrl(self.panel,-1,size=(200,100), style=wx.TE_MULTILINE)
self.gbsizer5=wx.GridBagSizer(hgap=10, vgap=10)
self.gbsizer5.Add(self.dsid_label,pos=(1,5),span=(1,1),flag=wx.ALIGN_RIGHT|wx.ALIGN_CENTRE_VERTICAL)
self.gbsizer5.Add(self.dsid_text,pos=(1,6),span=(1,1),flag=wx.EXPAND)
self.gbsizer5.Add(self.tbid_label,pos=(2,5),span=(1,1),flag=wx.EXPAND)
self.gbsizer5.Add(self.tbid_text,pos=(2,6),span=(1,1),flag=wx.EXPAND)
self.gbsizer5.Add(self.adpath_label,pos=(3,5),span=(1,1),flag=wx.EXPAND)
self.gbsizer5.Add(self.adpath_text,pos=(3,6),span=(1,1),flag=wx.EXPAND)
self.gbsizer5.Add(self.ads_label,pos=(4,5),span=(1,1),flag=wx.EXPAND)
self.gbsizer5.Add(self.ads_text,pos=(4,6),span=(1,1),flag=wx.EXPAND)
self.gbsizer5.Add(self.adn_label,pos=(5,5),span=(1,1),flag=wx.EXPAND)
self.gbsizer5.Add(self.adn_text,pos=(5,6),span=(1,1),flag=wx.EXPAND)
self.gbsizer5.Add(self.addBtn,pos=(6,6),span=(1,1),flag=wx.EXPAND)
self.gbsizer5.Add(self.arlabel,pos=(8,5),span=(1,1),flag=wx.EXPAND)
self.gbsizer5.Add(self.artext,pos=(8,6),span=(1,1),flag=wx.EXPAND)
self.addBtn.Bind(wx.EVT_BUTTON,self.adata)
#增加BoxSizer,管理用户名密码部分的gbsizer1,
#服务器设置部分的sbsizer,以及最下方的bsizer
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.gbsizer1, 0, wx.EXPAND, 20)
self.sizer.Add(self.gbsizer2, 0, wx.EXPAND, 20)
self.sizer.Add(self.gbsizer5, 0, wx.EXPAND, 20)
self.sizer.Add(self.gbsizer3, 0, wx.EXPAND, 20)
self.sizer.Add(self.gbsizer4, 0, wx.EXPAND, 20)
self.sizer.Hide(self.gbsizer2)
self.sizer.Hide(self.gbsizer3)
self.sizer.Hide(self.gbsizer4)
self.sizer.Hide(self.gbsizer5)
self.isShown = False #用这个变量指示当前是否已将控件隐藏
self.SetClientSize((600,480)) #更改面板尺寸 d
self.panel.SetSizerAndFit(self.sizer)
self.sizer.SetSizeHints(self.panel)
def create(self, event):
self.sizer.Hide(self.gbsizer4)
self.sizer.Show(self.gbsizer2)
self.sizer.Layout()
return True
def append(self, event):
self.sizer.Hide(self.gbsizer4)
self.sizer.Remove(self.gbsizer2)
self.sizer.Show(self.gbsizer5)
self.sizer.Layout()
return True
def OnTouch(self, event):
domain = self.domainText.GetValue()
username = self.userText.GetValue()
password = self.passText.GetValue()
if (str(domain)=='' or str(password)=='' or str(username)==''):
wx.MessageBox("请输入完整的参数", caption="Message", style=wx.OK)
return True
try:
res = LoginHelper(domain, username, password).login()
except Exception, e:
dlg = wx.MessageDialog(None, "%s" % e.message, u"错误信息", wx.YES_NO | wx.ICON_QUESTION)
if dlg.ShowModal() == wx.ID_YES:
dlg.Destroy()
return True
self.sizer.Hide(self.gbsizer1)
self.sizer.Show(self.gbsizer4)
self.sizer.Layout() #关键所在,强制sizer重新计算并布局sizer中的控件
self.user_id = res
return True
def OnPut(self, event):
ds_name = self.dtext.GetValue().encode('utf-8')
tbname = self.ttext.GetValue().encode('utf-8')
path = self.ftext.GetValue().encode('utf-8')
separator = self.stext.GetValue().encode('utf-8')
null_holder = self.ntext.GetValue().encode('utf-8')
if (str(ds_name)=='' or str(tbname)=='' or str(path)=='' or str(separator)=='' or str(null_holder)==''):
wx.MessageBox("请输入完整的参数", caption="Message", style=wx.OK)
return True
if path.endswith('/'):
path = path[:-1]
status, res = UploadHelper().do_action(self.user_id, path, tbname, ds_name, separator, null_holder)
self.rtext.SetValue(res)
if status != 0:
self.sizer.Show(self.gbsizer3)
self.sizer.Layout()
return True
def adata(self, event):
ds_id = self.dsid_text.GetValue().encode('utf-8')
tb_id = self.tbid_text.GetValue().encode('utf-8')
path = self.adpath_text.GetValue().encode('utf-8')
separator = self.ads_text.GetValue().encode('utf-8')
null_holder = self.adn_text.GetValue().encode('utf-8')
if (str(ds_id)=='' or str(tb_id)=='' or str(path)=='' or str(separator)=='' or str(null_holder)==''):
wx.MessageBox("请输入完整的参数", caption="Message", style=wx.OK)
return True
if path.endswith('/'):
path = path[:-1]
status, res = Addhelper().do_action(path, separator, null_holder, ds_id, tb_id , self.user_id)
self.artext.SetValue(res)
if status != 0:
self.sizer.Show(self.gbsizer3)
self.sizer.Layout()
return True
def RetyPut(self, event):
status, res = UploadHelper().retry(path)
self.rtext.SetValue(res)
self.artext.SetValue(res)
if status == 0:
self.sizer.Hide(self.gbsizer3)
self.sizer.Layout()
return True
if __name__ == "__main__":
app = wx.PySimpleApp()
frame = MyFrame(None)
frame.Show(True)
app.MainLoop()
|
[
"[email protected]"
] | |
c77a177a70bdda413b7873d59fb732ca95df4c26
|
767318c4ddf2713a8a035aa3bf68cd8260409aa0
|
/travellow/migrations/0001_initial.py
|
b7e329a337e095194ed93a271f0ab43896b0f234
|
[] |
no_license
|
sag-coder/travelbooking
|
704573b145ca04587bbaf2415f4bbdb6ad50b26f
|
dfc482ca01d1be324aba900075b2a64dc2fd1d88
|
refs/heads/master
| 2023-06-11T23:22:44.114545 | 2021-07-10T23:47:37 | 2021-07-10T23:47:37 | 384,562,878 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 558 |
py
|
# Generated by Django 3.2.3 on 2021-05-25 14:57
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Destination',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('img', models.ImageField(upload_to='pix')),
('offer', models.BooleanField(default=False)),
],
),
]
|
[
"[email protected]"
] | |
b3c22a904dac91d8b29c6d27d6ce97e5e99f49d8
|
a034d4ba39789e4a351112c46dd04a38180cd06c
|
/appengine/monorail/framework/sql.py
|
41fb66b26a0bd748c5788f67fa37cb9b6da157a5
|
[
"BSD-3-Clause"
] |
permissive
|
asdfghjjklllllaaa/infra
|
050ad249ab44f264b4e2080aa9537ce74aafb022
|
8f63af54e46194cd29291813f2790ff6e986804d
|
refs/heads/master
| 2023-01-10T21:55:44.811835 | 2019-07-01T14:03:32 | 2019-07-01T14:03:32 | 194,691,941 | 1 | 0 |
BSD-3-Clause
| 2023-01-07T07:12:37 | 2019-07-01T14:45:29 |
Python
|
UTF-8
|
Python
| false | false | 37,456 |
py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""A set of classes for interacting with tables in SQL."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import logging
import random
import re
import sys
import time
import settings
if not settings.unit_test_mode:
import MySQLdb
from framework import exceptions
from framework import framework_helpers
from infra_libs import ts_mon
from Queue import Queue
class ConnectionPool(object):
"""Manage a set of database connections such that they may be re-used.
"""
def __init__(self, poolsize=1):
self.poolsize = poolsize
self.queues = {}
@framework_helpers.retry(3, delay=0.1, backoff=2)
def get(self, instance, database):
"""Retun a database connection, or throw an exception if none can
be made.
"""
key = instance + '/' + database
if not key in self.queues:
queue = Queue(self.poolsize)
self.queues[key] = queue
queue = self.queues[key]
if queue.empty():
cnxn = cnxn_ctor(instance, database)
else:
cnxn = queue.get()
# Make sure the connection is still good.
cnxn.ping()
cnxn.commit()
return cnxn
def release(self, cnxn):
if not cnxn.pool_key in self.queues:
raise BaseException('unknown pool key: %s' % cnxn.pool_key)
q = self.queues[cnxn.pool_key]
if q.full():
cnxn.close()
else:
q.put(cnxn)
@framework_helpers.retry(2, delay=1, backoff=2)
def cnxn_ctor(instance, database):
logging.info('About to connect to SQL instance %r db %r', instance, database)
if settings.unit_test_mode:
raise ValueError('unit tests should not need real database connections')
try:
if settings.local_mode:
start_time = time.time()
cnxn = MySQLdb.connect(
host='127.0.0.1', port=3306, db=database, user='root', charset='utf8')
else:
start_time = time.time()
cnxn = MySQLdb.connect(
unix_socket='/cloudsql/' + instance, db=database, user='root',
charset='utf8')
duration = int((time.time() - start_time) * 1000)
DB_CNXN_LATENCY.add(duration)
CONNECTION_COUNT.increment({'success': True})
except MySQLdb.OperationalError:
CONNECTION_COUNT.increment({'success': False})
raise
cnxn.pool_key = instance + '/' + database
cnxn.is_bad = False
return cnxn
# One connection pool per database instance (master, replicas are each an
# instance). We'll have four connections per instance because we fetch
# issue comments, stars, spam verdicts and spam verdict history in parallel
# with promises.
cnxn_pool = ConnectionPool(settings.db_cnxn_pool_size)
# MonorailConnection maintains a dictionary of connections to SQL databases.
# Each is identified by an int shard ID.
# And there is one connection to the master DB identified by key MASTER_CNXN.
MASTER_CNXN = 'master_cnxn'
CONNECTION_COUNT = ts_mon.CounterMetric(
'monorail/sql/connection_count',
'Count of connections made to the SQL database.',
[ts_mon.BooleanField('success')])
DB_CNXN_LATENCY = ts_mon.CumulativeDistributionMetric(
'monorail/sql/db_cnxn_latency',
'Time needed to establish a DB connection.',
None)
DB_QUERY_LATENCY = ts_mon.CumulativeDistributionMetric(
'monorail/sql/db_query_latency',
'Time needed to make a DB query.',
[ts_mon.StringField('type')])
DB_COMMIT_LATENCY = ts_mon.CumulativeDistributionMetric(
'monorail/sql/db_commit_latency',
'Time needed to make a DB commit.',
None)
DB_ROLLBACK_LATENCY = ts_mon.CumulativeDistributionMetric(
'monorail/sql/db_rollback_latency',
'Time needed to make a DB rollback.',
None)
DB_RETRY_COUNT = ts_mon.CounterMetric(
'monorail/sql/db_retry_count',
'Count of queries retried.',
None)
DB_QUERY_COUNT = ts_mon.CounterMetric(
'monorail/sql/db_query_count',
'Count of queries sent to the DB.',
[ts_mon.StringField('type')])
DB_COMMIT_COUNT = ts_mon.CounterMetric(
'monorail/sql/db_commit_count',
'Count of commits sent to the DB.',
None)
DB_ROLLBACK_COUNT = ts_mon.CounterMetric(
'monorail/sql/db_rollback_count',
'Count of rollbacks sent to the DB.',
None)
DB_RESULT_ROWS = ts_mon.CumulativeDistributionMetric(
'monorail/sql/db_result_rows',
'Number of results returned by a DB query.',
None)
def RandomShardID():
"""Return a random shard ID to load balance across replicas."""
return random.randint(0, settings.num_logical_shards - 1)
class MonorailConnection(object):
"""Create and manage connections to the SQL servers.
We only store connections in the context of a single user request, not
across user requests. The main purpose of this class is to make using
sharded tables easier.
"""
def __init__(self):
self.sql_cnxns = {} # {MASTER_CNXN: cnxn, shard_id: cnxn, ...}
def GetMasterConnection(self):
"""Return a connection to the master SQL DB."""
if MASTER_CNXN not in self.sql_cnxns:
self.sql_cnxns[MASTER_CNXN] = cnxn_pool.get(
settings.db_instance, settings.db_database_name)
logging.info(
'created a master connection %r', self.sql_cnxns[MASTER_CNXN])
return self.sql_cnxns[MASTER_CNXN]
def GetConnectionForShard(self, shard_id):
"""Return a connection to the DB replica that will be used for shard_id."""
if shard_id not in self.sql_cnxns:
physical_shard_id = shard_id % settings.num_logical_shards
replica_name = settings.db_replica_names[
physical_shard_id % len(settings.db_replica_names)]
shard_instance_name = (
settings.physical_db_name_format % replica_name)
self.sql_cnxns[shard_id] = cnxn_pool.get(
shard_instance_name, settings.db_database_name)
logging.info('created a replica connection for shard %d', shard_id)
return self.sql_cnxns[shard_id]
def Execute(self, stmt_str, stmt_args, shard_id=None, commit=True, retries=1):
"""Execute the given SQL statement on one of the relevant databases."""
if shard_id is None:
# No shard was specified, so hit the master.
sql_cnxn = self.GetMasterConnection()
else:
sql_cnxn = self.GetConnectionForShard(shard_id)
try:
return self._ExecuteWithSQLConnection(
sql_cnxn, stmt_str, stmt_args, commit=commit)
except MySQLdb.OperationalError as e:
logging.exception(e)
logging.info('retries: %r', retries)
if retries > 0:
DB_RETRY_COUNT.increment()
self.sql_cnxns = {} # Drop all old mysql connections and make new.
return self.Execute(
stmt_str, stmt_args, shard_id=shard_id, commit=commit,
retries=retries - 1)
else:
raise e
def _ExecuteWithSQLConnection(
self, sql_cnxn, stmt_str, stmt_args, commit=True):
"""Execute a statement on the given database and return a cursor."""
if stmt_str.startswith('INSERT') or stmt_str.startswith('REPLACE'):
logging.info('SQL stmt_str: \n%s', stmt_str)
logging.info('SQL stmt_args: %r', stmt_args)
else:
logging.info('SQL stmt: \n%s', (stmt_str % tuple(stmt_args)))
start_time = time.time()
cursor = sql_cnxn.cursor()
cursor.execute('SET NAMES utf8mb4')
logging.info('made cursor on %r in %d ms',
sql_cnxn, int((time.time() - start_time) * 1000))
if stmt_str.startswith('INSERT') or stmt_str.startswith('REPLACE'):
cursor.executemany(stmt_str, stmt_args)
duration = (time.time() - start_time) * 1000
DB_QUERY_LATENCY.add(duration, {'type': 'write'})
DB_QUERY_COUNT.increment({'type': 'write'})
else:
cursor.execute(stmt_str, args=stmt_args)
duration = (time.time() - start_time) * 1000
DB_QUERY_LATENCY.add(duration, {'type': 'read'})
DB_QUERY_COUNT.increment({'type': 'read'})
DB_RESULT_ROWS.add(cursor.rowcount)
logging.info('%d rows in %d ms', cursor.rowcount,
int(duration))
if commit and not stmt_str.startswith('SELECT'):
try:
sql_cnxn.commit()
duration = (time.time() - start_time) * 1000
DB_COMMIT_LATENCY.add(duration)
DB_COMMIT_COUNT.increment()
except MySQLdb.DatabaseError:
sql_cnxn.rollback()
duration = (time.time() - start_time) * 1000
DB_ROLLBACK_LATENCY.add(duration)
DB_ROLLBACK_COUNT.increment()
return cursor
def Commit(self):
"""Explicitly commit any pending txns. Normally done automatically."""
sql_cnxn = self.GetMasterConnection()
try:
sql_cnxn.commit()
except MySQLdb.DatabaseError:
logging.exception('Commit failed for cnxn, rolling back')
sql_cnxn.rollback()
def Close(self):
"""Safely close any connections that are still open."""
for sql_cnxn in self.sql_cnxns.values():
try:
sql_cnxn.rollback() # Abandon any uncommitted changes.
cnxn_pool.release(sql_cnxn)
except MySQLdb.DatabaseError:
# This might happen if the cnxn is somehow already closed.
logging.exception('ProgrammingError when trying to close cnxn')
class SQLTableManager(object):
"""Helper class to make it easier to deal with an SQL table."""
def __init__(self, table_name):
self.table_name = table_name
def Select(
self, cnxn, distinct=False, cols=None, left_joins=None,
joins=None, where=None, or_where_conds=False, group_by=None,
order_by=None, limit=None, offset=None, shard_id=None, use_clause=None,
having=None, **kwargs):
"""Compose and execute an SQL SELECT statement on this table.
Args:
cnxn: MonorailConnection to the databases.
distinct: If True, add DISTINCT keyword.
cols: List of columns to retrieve, defaults to '*'.
left_joins: List of LEFT JOIN (str, args) pairs.
joins: List of regular JOIN (str, args) pairs.
where: List of (str, args) for WHERE clause.
or_where_conds: Set to True to use OR in the WHERE conds.
group_by: List of strings for GROUP BY clause.
order_by: List of (str, args) for ORDER BY clause.
limit: Optional LIMIT on the number of rows returned.
offset: Optional OFFSET when using LIMIT.
shard_id: Int ID of the shard to query.
use_clause: Optional string USE clause to tell the DB which index to use.
having: List of (str, args) for Optional HAVING clause
**kwargs: WHERE-clause equality and set-membership conditions.
Keyword args are used to build up more WHERE conditions that compare
column values to constants. Key word Argument foo='bar' translates to 'foo
= "bar"', and foo=[3, 4, 5] translates to 'foo IN (3, 4, 5)'.
Returns:
A list of rows, each row is a tuple of values for the requested cols.
"""
cols = cols or ['*'] # If columns not specified, retrieve all columns.
stmt = Statement.MakeSelect(
self.table_name, cols, distinct=distinct,
or_where_conds=or_where_conds)
if use_clause:
stmt.AddUseClause(use_clause)
if having:
stmt.AddHavingTerms(having)
stmt.AddJoinClauses(left_joins or [], left=True)
stmt.AddJoinClauses(joins or [])
stmt.AddWhereTerms(where or [], **kwargs)
stmt.AddGroupByTerms(group_by or [])
stmt.AddOrderByTerms(order_by or [])
stmt.SetLimitAndOffset(limit, offset)
stmt_str, stmt_args = stmt.Generate()
cursor = cnxn.Execute(stmt_str, stmt_args, shard_id=shard_id)
rows = cursor.fetchall()
cursor.close()
return rows
def SelectRow(
self, cnxn, cols=None, default=None, where=None, **kwargs):
"""Run a query that is expected to return just one row."""
rows = self.Select(cnxn, distinct=True, cols=cols, where=where, **kwargs)
if len(rows) == 1:
return rows[0]
elif not rows:
logging.info('SelectRow got 0 results, so using default %r', default)
return default
else:
raise ValueError('SelectRow got %d results, expected only 1', len(rows))
def SelectValue(self, cnxn, col, default=None, where=None, **kwargs):
"""Run a query that is expected to return just one row w/ one value."""
row = self.SelectRow(
cnxn, cols=[col], default=[default], where=where, **kwargs)
return row[0]
def InsertRows(
self, cnxn, cols, row_values, replace=False, ignore=False,
commit=True, return_generated_ids=False):
"""Insert all the given rows.
Args:
cnxn: MonorailConnection object.
cols: List of column names to set.
row_values: List of lists with values to store. The length of each
nested list should be equal to len(cols).
replace: Set to True if inserted values should replace existing DB rows
that have the same DB keys.
ignore: Set to True to ignore rows that would duplicate existing DB keys.
commit: Set to False if this operation is part of a series of operations
that should not be committed until the final one is done.
return_generated_ids: Set to True to return a list of generated
autoincrement IDs for inserted rows. This requires us to insert rows
one at a time.
Returns:
If return_generated_ids is set to True, this method returns a list of the
auto-increment IDs generated by the DB. Otherwise, [] is returned.
"""
if not row_values:
return None # Nothing to insert
generated_ids = []
if return_generated_ids:
# We must insert the rows one-at-a-time to know the generated IDs.
for row_value in row_values:
stmt = Statement.MakeInsert(
self.table_name, cols, [row_value], replace=replace, ignore=ignore)
stmt_str, stmt_args = stmt.Generate()
cursor = cnxn.Execute(stmt_str, stmt_args, commit=commit)
if cursor.lastrowid:
generated_ids.append(cursor.lastrowid)
cursor.close()
return generated_ids
stmt = Statement.MakeInsert(
self.table_name, cols, row_values, replace=replace, ignore=ignore)
stmt_str, stmt_args = stmt.Generate()
cnxn.Execute(stmt_str, stmt_args, commit=commit)
return []
def InsertRow(
self, cnxn, replace=False, ignore=False, commit=True, **kwargs):
"""Insert a single row into the table.
Args:
cnxn: MonorailConnection object.
replace: Set to True if inserted values should replace existing DB rows
that have the same DB keys.
ignore: Set to True to ignore rows that would duplicate existing DB keys.
commit: Set to False if this operation is part of a series of operations
that should not be committed until the final one is done.
**kwargs: column=value assignments to specify what to store in the DB.
Returns:
The generated autoincrement ID of the key column if one was generated.
Otherwise, return None.
"""
cols = sorted(kwargs.keys())
row = tuple(kwargs[col] for col in cols)
generated_ids = self.InsertRows(
cnxn, cols, [row], replace=replace, ignore=ignore,
commit=commit, return_generated_ids=True)
if generated_ids:
return generated_ids[0]
else:
return None
def Update(self, cnxn, delta, where=None, commit=True, limit=None, **kwargs):
"""Update one or more rows.
Args:
cnxn: MonorailConnection object.
delta: Dictionary of {column: new_value} assignments.
where: Optional list of WHERE conditions saying which rows to update.
commit: Set to False if this operation is part of a series of operations
that should not be committed until the final one is done.
limit: Optional LIMIT on the number of rows updated.
**kwargs: WHERE-clause equality and set-membership conditions.
Returns:
Int number of rows updated.
"""
if not delta:
return 0 # Nothing is being changed
stmt = Statement.MakeUpdate(self.table_name, delta)
stmt.AddWhereTerms(where, **kwargs)
stmt.SetLimitAndOffset(limit, None)
stmt_str, stmt_args = stmt.Generate()
cursor = cnxn.Execute(stmt_str, stmt_args, commit=commit)
result = cursor.rowcount
cursor.close()
return result
def IncrementCounterValue(self, cnxn, col_name, where=None, **kwargs):
"""Atomically increment a counter stored in MySQL, return new value.
Args:
cnxn: MonorailConnection object.
col_name: int column to increment.
where: Optional list of WHERE conditions saying which rows to update.
**kwargs: WHERE-clause equality and set-membership conditions. The
where and kwargs together should narrow the update down to exactly
one row.
Returns:
The new, post-increment value of the counter.
"""
stmt = Statement.MakeIncrement(self.table_name, col_name)
stmt.AddWhereTerms(where, **kwargs)
stmt_str, stmt_args = stmt.Generate()
cursor = cnxn.Execute(stmt_str, stmt_args)
assert cursor.rowcount == 1, (
'missing or ambiguous counter: %r' % cursor.rowcount)
result = cursor.lastrowid
cursor.close()
return result
def Delete(self, cnxn, where=None, or_where_conds=False, commit=True,
limit=None, **kwargs):
"""Delete the specified table rows.
Args:
cnxn: MonorailConnection object.
where: Optional list of WHERE conditions saying which rows to update.
or_where_conds: Set to True to use OR in the WHERE conds.
commit: Set to False if this operation is part of a series of operations
that should not be committed until the final one is done.
limit: Optional LIMIT on the number of rows deleted.
**kwargs: WHERE-clause equality and set-membership conditions.
Returns:
Int number of rows updated.
"""
# Deleting the whole table is never intended in Monorail.
assert where or kwargs
stmt = Statement.MakeDelete(self.table_name, or_where_conds=or_where_conds)
stmt.AddWhereTerms(where, **kwargs)
stmt.SetLimitAndOffset(limit, None)
stmt_str, stmt_args = stmt.Generate()
cursor = cnxn.Execute(stmt_str, stmt_args, commit=commit)
result = cursor.rowcount
cursor.close()
return result
class Statement(object):
"""A class to help build complex SQL statements w/ full escaping.
Start with a Make*() method, then fill in additional clauses as needed,
then call Generate() to return the SQL string and argument list. We pass
the string and args to MySQLdb separately so that it can do escaping on
the arg values as appropriate to prevent SQL-injection attacks.
The only values that are not escaped by MySQLdb are the table names
and column names, and bits of SQL syntax, all of which is hard-coded
in our application.
"""
@classmethod
def MakeSelect(cls, table_name, cols, distinct=False, or_where_conds=False):
"""Constuct a SELECT statement."""
assert _IsValidTableName(table_name)
assert all(_IsValidColumnName(col) for col in cols)
main_clause = 'SELECT%s %s FROM %s' % (
(' DISTINCT' if distinct else ''), ', '.join(cols), table_name)
return cls(main_clause, or_where_conds=or_where_conds)
@classmethod
def MakeInsert(
cls, table_name, cols, new_values, replace=False, ignore=False):
"""Constuct an INSERT statement."""
if replace == True:
return cls.MakeReplace(table_name, cols, new_values, ignore)
assert _IsValidTableName(table_name)
assert all(_IsValidColumnName(col) for col in cols)
ignore_word = ' IGNORE' if ignore else ''
main_clause = 'INSERT%s INTO %s (%s)' % (
ignore_word, table_name, ', '.join(cols))
return cls(main_clause, insert_args=new_values)
@classmethod
def MakeReplace(
cls, table_name, cols, new_values, ignore=False):
"""Construct an INSERT...ON DUPLICATE KEY UPDATE... statement.
Uses the INSERT/UPDATE syntax because REPLACE is literally a DELETE
followed by an INSERT, which doesn't play well with foreign keys.
INSERT/UPDATE is an atomic check of whether the primary key exists,
followed by an INSERT if it doesn't or an UPDATE if it does.
"""
assert _IsValidTableName(table_name)
assert all(_IsValidColumnName(col) for col in cols)
ignore_word = ' IGNORE' if ignore else ''
main_clause = 'INSERT%s INTO %s (%s)' % (
ignore_word, table_name, ', '.join(cols))
return cls(main_clause, insert_args=new_values, duplicate_update_cols=cols)
@classmethod
def MakeUpdate(cls, table_name, delta):
"""Constuct an UPDATE statement."""
assert _IsValidTableName(table_name)
assert all(_IsValidColumnName(col) for col in delta.keys())
update_strs = []
update_args = []
for col, val in delta.items():
update_strs.append(col + '=%s')
update_args.append(val)
main_clause = 'UPDATE %s SET %s' % (
table_name, ', '.join(update_strs))
return cls(main_clause, update_args=update_args)
@classmethod
def MakeIncrement(cls, table_name, col_name, step=1):
"""Constuct an UPDATE statement that increments and returns a counter."""
assert _IsValidTableName(table_name)
assert _IsValidColumnName(col_name)
main_clause = (
'UPDATE %s SET %s = LAST_INSERT_ID(%s + %%s)' % (
table_name, col_name, col_name))
update_args = [step]
return cls(main_clause, update_args=update_args)
@classmethod
def MakeDelete(cls, table_name, or_where_conds=False):
"""Constuct a DELETE statement."""
assert _IsValidTableName(table_name)
main_clause = 'DELETE FROM %s' % table_name
return cls(main_clause, or_where_conds=or_where_conds)
def __init__(
self, main_clause, insert_args=None, update_args=None,
duplicate_update_cols=None, or_where_conds=False):
self.main_clause = main_clause # E.g., SELECT or DELETE
self.or_where_conds = or_where_conds
self.insert_args = insert_args or [] # For INSERT statements
for row_value in self.insert_args:
if not all(_IsValidDBValue(val) for val in row_value):
raise exceptions.InputException('Invalid DB value %r' % (row_value,))
self.update_args = update_args or [] # For UPDATEs
for val in self.update_args:
if not _IsValidDBValue(val):
raise exceptions.InputException('Invalid DB value %r' % val)
self.duplicate_update_cols = duplicate_update_cols or [] # For REPLACE-ish
self.use_clauses = []
self.join_clauses, self.join_args = [], []
self.where_conds, self.where_args = [], []
self.having_conds, self.having_args = [], []
self.group_by_terms, self.group_by_args = [], []
self.order_by_terms, self.order_by_args = [], []
self.limit, self.offset = None, None
def Generate(self):
"""Return an SQL string having %s placeholders and args to fill them in."""
clauses = [self.main_clause] + self.use_clauses + self.join_clauses
if self.where_conds:
if self.or_where_conds:
clauses.append('WHERE ' + '\n OR '.join(self.where_conds))
else:
clauses.append('WHERE ' + '\n AND '.join(self.where_conds))
if self.group_by_terms:
clauses.append('GROUP BY ' + ', '.join(self.group_by_terms))
if self.having_conds:
assert self.group_by_terms
clauses.append('HAVING %s' % ','.join(self.having_conds))
if self.order_by_terms:
clauses.append('ORDER BY ' + ', '.join(self.order_by_terms))
if self.limit and self.offset:
clauses.append('LIMIT %d OFFSET %d' % (self.limit, self.offset))
elif self.limit:
clauses.append('LIMIT %d' % self.limit)
elif self.offset:
clauses.append('LIMIT %d OFFSET %d' % (sys.maxint, self.offset))
if self.insert_args:
clauses.append('VALUES (' + PlaceHolders(self.insert_args[0]) + ')')
args = self.insert_args
if self.duplicate_update_cols:
clauses.append('ON DUPLICATE KEY UPDATE %s' % (
', '.join(['%s=VALUES(%s)' % (col, col)
for col in self.duplicate_update_cols])))
assert not (self.join_args + self.update_args + self.where_args +
self.group_by_args + self.order_by_args + self.having_args)
else:
args = (self.join_args + self.update_args + self.where_args +
self.group_by_args + self.having_args + self.order_by_args)
assert not (self.insert_args + self.duplicate_update_cols)
args = _BoolsToInts(args)
stmt_str = '\n'.join(clause for clause in clauses if clause)
assert _IsValidStatement(stmt_str), stmt_str
return stmt_str, args
def AddUseClause(self, use_clause):
"""Add a USE clause (giving the DB a hint about which indexes to use)."""
assert _IsValidUseClause(use_clause), use_clause
self.use_clauses.append(use_clause)
def AddJoinClauses(self, join_pairs, left=False):
"""Save JOIN clauses based on the given list of join conditions."""
for join, args in join_pairs:
assert _IsValidJoin(join), join
assert join.count('%s') == len(args), join
self.join_clauses.append(
' %sJOIN %s' % (('LEFT ' if left else ''), join))
self.join_args.extend(args)
def AddGroupByTerms(self, group_by_term_list):
"""Save info needed to generate the GROUP BY clause."""
assert all(_IsValidGroupByTerm(term) for term in group_by_term_list)
self.group_by_terms.extend(group_by_term_list)
def AddOrderByTerms(self, order_by_pairs):
"""Save info needed to generate the ORDER BY clause."""
for term, args in order_by_pairs:
assert _IsValidOrderByTerm(term), term
assert term.count('%s') == len(args), term
self.order_by_terms.append(term)
self.order_by_args.extend(args)
def SetLimitAndOffset(self, limit, offset):
"""Save info needed to generate the LIMIT OFFSET clause."""
self.limit = limit
self.offset = offset
def AddWhereTerms(self, where_cond_pairs, **kwargs):
"""Generate a WHERE clause."""
where_cond_pairs = where_cond_pairs or []
for cond, args in where_cond_pairs:
assert _IsValidWhereCond(cond), cond
assert cond.count('%s') == len(args), cond
self.where_conds.append(cond)
self.where_args.extend(args)
for col, val in sorted(kwargs.items()):
assert _IsValidColumnName(col), col
eq = True
if col.endswith('_not'):
col = col[:-4]
eq = False
if isinstance(val, set):
val = list(val) # MySQL inteface cannot handle sets.
if val is None or val == []:
op = 'IS' if eq else 'IS NOT'
self.where_conds.append(col + ' ' + op + ' NULL')
elif isinstance(val, list):
op = 'IN' if eq else 'NOT IN'
# Sadly, MySQLdb cannot escape lists, so we flatten to multiple "%s"s
self.where_conds.append(
col + ' ' + op + ' (' + PlaceHolders(val) + ')')
self.where_args.extend(val)
else:
op = '=' if eq else '!='
self.where_conds.append(col + ' ' + op + ' %s')
self.where_args.append(val)
def AddHavingTerms(self, having_cond_pairs):
"""Generate a HAVING clause."""
for cond, args in having_cond_pairs:
assert _IsValidHavingCond(cond), cond
assert cond.count('%s') == len(args), cond
self.having_conds.append(cond)
self.having_args.extend(args)
def PlaceHolders(sql_args):
"""Return a comma-separated list of %s placeholders for the given args."""
return ','.join('%s' for _ in sql_args)
TABLE_PAT = '[A-Z][_a-zA-Z0-9]+'
COLUMN_PAT = '[a-z][_a-z]+'
COMPARE_OP_PAT = '(<|>|=|!=|>=|<=|LIKE|NOT LIKE)'
SHORTHAND = {
'table': TABLE_PAT,
'column': COLUMN_PAT,
'tab_col': r'(%s\.)?%s' % (TABLE_PAT, COLUMN_PAT),
'placeholder': '%s', # That's a literal %s that gets passed to MySQLdb
'multi_placeholder': '%s(, ?%s)*',
'compare_op': COMPARE_OP_PAT,
'opt_asc_desc': '( ASC| DESC)?',
'opt_alias': '( AS %s)?' % TABLE_PAT,
'email_cond': (r'\(?'
r'('
r'(LOWER\(Spare\d+\.email\) IS NULL OR )?'
r'LOWER\(Spare\d+\.email\) '
r'(%s %%s|IN \(%%s(, ?%%s)*\))'
r'( (AND|OR) )?'
r')+'
r'\)?' % COMPARE_OP_PAT),
'hotlist_cond': (r'\(?'
r'('
r'(LOWER\(Cond\d+\.name\) IS NULL OR )?'
r'LOWER\(Cond\d+\.name\) '
r'(%s %%s|IN \(%%s(, ?%%s)*\))'
r'( (AND|OR) )?'
r')+'
r'\)?' % COMPARE_OP_PAT),
'phase_cond': (r'\(?'
r'('
r'(LOWER\(Phase\d+\.name\) IS NULL OR )?'
r'LOWER\(Phase\d+\.name\) '
r'(%s %%s|IN \(%%s(, ?%%s)*\))?'
r'( (AND|OR) )?'
r')+'
r'\)?' % COMPARE_OP_PAT),
'approval_cond': (r'\(?'
r'('
r'(LOWER\(Cond\d+\.status\) IS NULL OR )?'
r'LOWER\(Cond\d+\.status\) '
r'(%s %%s|IN \(%%s(, ?%%s)*\))'
r'( (AND|OR) )?'
r')+'
r'\)?' % COMPARE_OP_PAT),
}
def _MakeRE(regex_str):
"""Return a regular expression object, expanding our shorthand as needed."""
return re.compile(regex_str.format(**SHORTHAND))
TABLE_RE = _MakeRE('^{table}$')
TAB_COL_RE = _MakeRE('^{tab_col}$')
USE_CLAUSE_RE = _MakeRE(
r'^USE INDEX \({column}\) USE INDEX FOR ORDER BY \({column}\)$')
HAVING_RE_LIST = [
_MakeRE(r'^COUNT\(\*\) {compare_op} {placeholder}$')]
COLUMN_RE_LIST = [
TAB_COL_RE,
_MakeRE(r'\*'),
_MakeRE(r'COUNT\(\*\)'),
_MakeRE(r'COUNT\({tab_col}\)'),
_MakeRE(r'COUNT\(DISTINCT\({tab_col}\)\)'),
_MakeRE(r'MAX\({tab_col}\)'),
_MakeRE(r'MIN\({tab_col}\)'),
_MakeRE(r'GROUP_CONCAT\((DISTINCT )?{tab_col}( ORDER BY {tab_col})?' \
r'( SEPARATOR \'.*\')?\)'),
]
JOIN_RE_LIST = [
TABLE_RE,
_MakeRE(
r'^{table}{opt_alias} ON {tab_col} = {tab_col}'
r'( AND {tab_col} = {tab_col})?'
r'( AND {tab_col} IN \({multi_placeholder}\))?$'),
_MakeRE(
r'^{table}{opt_alias} ON {tab_col} = {tab_col}'
r'( AND {tab_col} = {tab_col})?'
r'( AND {tab_col} = {placeholder})?'
r'( AND {tab_col} IN \({multi_placeholder}\))?'
r'( AND {tab_col} = {tab_col})?$'),
_MakeRE(
r'^{table}{opt_alias} ON {tab_col} = {tab_col}'
r'( AND {tab_col} = {tab_col})?'
r'( AND {tab_col} = {placeholder})?'
r'( AND {tab_col} IN \({multi_placeholder}\))?'
r'( AND {tab_col} IS NULL)?'
r'( AND \({tab_col} IS NULL'
r' OR {tab_col} NOT IN \({multi_placeholder}\)\))?$'),
_MakeRE(
r'^{table}{opt_alias} ON {tab_col} = {tab_col}'
r'( AND {tab_col} = {tab_col})?'
r'( AND {tab_col} = {placeholder})?'
r' AND \(?{tab_col} {compare_op} {placeholder}\)?'
r'( AND {tab_col} = {tab_col})?$'),
_MakeRE(
r'^{table}{opt_alias} ON {tab_col} = {tab_col}'
r'( AND {tab_col} = {tab_col})?'
r'( AND {tab_col} = {placeholder})?'
r' AND {tab_col} = {tab_col}$'),
_MakeRE(
r'^{table}{opt_alias} ON {tab_col} = {tab_col}'
r'( AND {tab_col} = {tab_col})?'
r'( AND {tab_col} = {placeholder})?'
r' AND \({tab_col} IS NULL OR'
r' {tab_col} != {placeholder}\)$'),
_MakeRE(
r'^{table}{opt_alias} ON {tab_col} = {tab_col}'
r' AND LOWER\({tab_col}\) = LOWER\({placeholder}\)'),
_MakeRE(
r'^{table}{opt_alias} ON {tab_col} = {tab_col} AND {email_cond}$'),
_MakeRE(
r'^{table}{opt_alias} ON {email_cond}$'),
_MakeRE(
r'^{table}{opt_alias} ON '
r'\({tab_col} = {tab_col} OR {tab_col} = {tab_col}\)$'),
_MakeRE(
r'^\({table} AS {table} JOIN User AS {table} '
r'ON {tab_col} = {tab_col} AND {email_cond}\) '
r'ON Issue(Snapshot)?.id = {tab_col}'
r'( AND {tab_col} IS NULL)?'),
_MakeRE(
r'^\({table} JOIN Hotlist AS {table} '
r'ON {tab_col} = {tab_col} AND {hotlist_cond}\) '
r'ON Issue.id = {tab_col}?'),
_MakeRE(
r'^\({table} AS {table} JOIN IssuePhaseDef AS {table} '
r'ON {tab_col} = {tab_col} AND {phase_cond}\) '
r'ON Issue.id = {tab_col}?'),
_MakeRE(
r'^IssuePhaseDef AS {table} ON {phase_cond}'),
_MakeRE(
r'^Issue2ApprovalValue AS {table} ON {tab_col} = {tab_col} '
r'AND {tab_col} = {placeholder} AND {approval_cond}'),
_MakeRE(
r'^{table} AS {table} ON {tab_col} = {tab_col} '
r'LEFT JOIN {table} AS {table} ON {tab_col} = {tab_col}'),
]
ORDER_BY_RE_LIST = [
_MakeRE(r'^{tab_col}{opt_asc_desc}$'),
_MakeRE(r'^LOWER\({tab_col}\){opt_asc_desc}$'),
_MakeRE(r'^ISNULL\({tab_col}\){opt_asc_desc}$'),
_MakeRE(r'^\(ISNULL\({tab_col}\) AND ISNULL\({tab_col}\)\){opt_asc_desc}$'),
_MakeRE(r'^FIELD\({tab_col}, {multi_placeholder}\){opt_asc_desc}$'),
_MakeRE(r'^FIELD\(IF\(ISNULL\({tab_col}\), {tab_col}, {tab_col}\), '
r'{multi_placeholder}\){opt_asc_desc}$'),
_MakeRE(r'^CONCAT\({tab_col}, {tab_col}\){opt_asc_desc}$'),
]
GROUP_BY_RE_LIST = [
TAB_COL_RE,
]
WHERE_COND_RE_LIST = [
_MakeRE(r'^TRUE$'),
_MakeRE(r'^FALSE$'),
_MakeRE(r'^{tab_col} IS NULL$'),
_MakeRE(r'^{tab_col} IS NOT NULL$'),
_MakeRE(r'^{tab_col} {compare_op} {tab_col}$'),
_MakeRE(r'^{tab_col} {compare_op} {placeholder}$'),
_MakeRE(r'^{tab_col} %% {placeholder} = {placeholder}$'),
_MakeRE(r'^{tab_col} IN \({multi_placeholder}\)$'),
_MakeRE(r'^{tab_col} NOT IN \({multi_placeholder}\)$'),
_MakeRE(r'^LOWER\({tab_col}\) IS NULL$'),
_MakeRE(r'^LOWER\({tab_col}\) IS NOT NULL$'),
_MakeRE(r'^LOWER\({tab_col}\) {compare_op} {placeholder}$'),
_MakeRE(r'^LOWER\({tab_col}\) IN \({multi_placeholder}\)$'),
_MakeRE(r'^LOWER\({tab_col}\) NOT IN \({multi_placeholder}\)$'),
_MakeRE(r'^LOWER\({tab_col}\) LIKE {placeholder}$'),
_MakeRE(r'^LOWER\({tab_col}\) NOT LIKE {placeholder}$'),
_MakeRE(r'^timestep < \(SELECT MAX\(j.timestep\) FROM Invalidate AS j '
r'WHERE j.kind = %s '
r'AND j.cache_key = Invalidate.cache_key\)$'),
_MakeRE(r'^\({tab_col} IS NULL OR {tab_col} {compare_op} {placeholder}\) '
'AND \({tab_col} IS NULL OR {tab_col} {compare_op} {placeholder}'
'\)$'),
_MakeRE(r'^\({tab_col} IS NOT NULL AND {tab_col} {compare_op} '
'{placeholder}\) OR \({tab_col} IS NOT NULL AND {tab_col} '
'{compare_op} {placeholder}\)$'),
]
# Note: We never use ';' for multiple statements, '@' for SQL variables, or
# any quoted strings in stmt_str (quotes are put in my MySQLdb for args).
STMT_STR_RE = re.compile(
r'\A(SELECT|UPDATE|DELETE|INSERT|REPLACE) [\'-+=!<>%*.,()\w\s]+\Z',
re.MULTILINE)
def _IsValidDBValue(val):
if isinstance(val, basestring):
return '\x00' not in val
return True
def _IsValidTableName(table_name):
return TABLE_RE.match(table_name)
def _IsValidColumnName(column_expr):
return any(regex.match(column_expr) for regex in COLUMN_RE_LIST)
def _IsValidUseClause(use_clause):
return USE_CLAUSE_RE.match(use_clause)
def _IsValidHavingCond(cond):
if cond.startswith('(') and cond.endswith(')'):
cond = cond[1:-1]
if ' OR ' in cond:
return all(_IsValidHavingCond(c) for c in cond.split(' OR '))
if ' AND ' in cond:
return all(_IsValidHavingCond(c) for c in cond.split(' AND '))
return any(regex.match(cond) for regex in HAVING_RE_LIST)
def _IsValidJoin(join):
return any(regex.match(join) for regex in JOIN_RE_LIST)
def _IsValidOrderByTerm(term):
return any(regex.match(term) for regex in ORDER_BY_RE_LIST)
def _IsValidGroupByTerm(term):
return any(regex.match(term) for regex in GROUP_BY_RE_LIST)
def _IsValidWhereCond(cond):
if cond.startswith('NOT '):
cond = cond[4:]
if cond.startswith('(') and cond.endswith(')'):
cond = cond[1:-1]
if any(regex.match(cond) for regex in WHERE_COND_RE_LIST):
return True
if ' OR ' in cond:
return all(_IsValidWhereCond(c) for c in cond.split(' OR '))
if ' AND ' in cond:
return all(_IsValidWhereCond(c) for c in cond.split(' AND '))
return False
def _IsValidStatement(stmt_str):
"""Final check to make sure there is no funny junk sneaking in somehow."""
return (STMT_STR_RE.match(stmt_str) and
'--' not in stmt_str)
def _BoolsToInts(arg_list):
"""Convert any True values to 1s and Falses to 0s.
Google's copy of MySQLdb has bool-to-int conversion disabled,
and yet it seems to be needed otherwise they are converted
to strings and always interpreted as 0 (which is FALSE).
Args:
arg_list: (nested) list of SQL statment argument values, which may
include some boolean values.
Returns:
The same list, but with True replaced by 1 and False replaced by 0.
"""
result = []
for arg in arg_list:
if isinstance(arg, (list, tuple)):
result.append(_BoolsToInts(arg))
elif arg is True:
result.append(1)
elif arg is False:
result.append(0)
else:
result.append(arg)
return result
|
[
"[email protected]"
] | |
9cca188f479ea7db11a4f564b341db58ed2c0248
|
5c11238d2798aaccdabfd09102a5839b23619c39
|
/game_object.py
|
ace9b4f9e9c2f6abc2795cc975ce7e27e680e6e8
|
[] |
no_license
|
ko-marinov/2048
|
e17a5724c45c60b695413c6a97923b7db1d9de6b
|
e5bdb641064f2532a15fca6d753d1e3ace8de2c9
|
refs/heads/master
| 2022-04-18T19:19:23.941842 | 2020-04-19T07:17:14 | 2020-04-19T07:17:14 | 255,260,530 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,522 |
py
|
from vec2 import Vec2
game_objects = []
class GameObject:
next_proc_id = 0
def __init__(self, pos=Vec2(0, 0), parent=None):
self.parent = parent
self._pos = Vec2(pos)
self.processes = {}
self.register()
def destroy(self):
game_objects.remove(self)
def register(self):
game_objects.append(self)
@property
def gpos(self):
parent_pos = self.parent.gpos if self.parent else Vec2(0, 0)
return self.pos + parent_pos
@property
def pos(self):
return self._pos
@pos.setter
def pos(self, value):
self._pos = Vec2(value)
def animate_transition(self, dest, duration):
velocity = (dest - self.pos) / duration
time_left = duration
def transpose_process(dtime):
nonlocal dest
nonlocal velocity
nonlocal time_left
self.pos += velocity * dtime
time_left -= dtime
if time_left <= 0:
self.pos = dest
return "DONE"
return "INPROGRESS"
GameObject.next_proc_id += 1
self.processes[GameObject.next_proc_id] = transpose_process
def draw(self, surface):
pass
def update(self, dtime):
finished_procs = []
for id, proc in self.processes.items():
status = proc(dtime)
if status == "DONE":
finished_procs.append(id)
for id in finished_procs:
del self.processes[id]
|
[
"[email protected]"
] | |
b15a144177a3426684ef389cecaaf365fc24dcb7
|
f54070cd3048a3645cb25f301592a904d387a1c9
|
/python_prgrams/testpython/file7.py
|
d8e98c05bbd14af3e9bf261e2d23c7dc207b2a22
|
[] |
no_license
|
mak705/Python_interview
|
02bded60417f1e6e2d81e1f6cde6961d95da2a8e
|
aff2d6018fd539dbcde9e3a6b3f8a69167ffca0d
|
refs/heads/master
| 2020-03-22T21:03:34.018919 | 2019-11-15T08:51:34 | 2019-11-15T08:51:34 | 140,653,056 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 138 |
py
|
##counting lines in a file
fhand = open('mul.py')
for line in fhand:
line = line.rstrip()
if not line.startswith('#From'):
print line
|
[
"[email protected]"
] | |
96874c2a3103ac1f32ede6d5e220bb4334df45cb
|
f2a3c3800b8dc9dbf6548bf17f7847b6435ef3bc
|
/test/bridge_init.py
|
7a67bd362458b88bdfb7ec99735c5ece8a7c38b0
|
[
"BSD-3-Clause"
] |
permissive
|
slongfield/pyfmt
|
f69f6cb3d5eae283b86c055dd813585e5d225e78
|
ea85ff4c361f54bcfc91ede46f88de49eb5f92e7
|
refs/heads/master
| 2022-03-06T02:39:36.556939 | 2022-02-22T01:26:16 | 2022-02-22T01:26:16 | 105,104,916 | 74 | 3 |
BSD-3-Clause
| 2022-02-22T01:26:17 | 2017-09-28T05:13:48 |
Go
|
UTF-8
|
Python
| false | false | 883 |
py
|
import os
from _bridge import ffi
lib = ffi.dlopen(os.path.join(os.path.dirname(__file__), "libbridge.so"))
def FormatOneInt(fmt, a):
return ffi.string(lib.FormatOneInt(fmt, a))
def FormatOneFloat(fmt, a):
return ffi.string(lib.FormatOneFloat(fmt, a))
def FormatOneDouble(fmt, a):
return ffi.string(lib.FormatOneDouble(fmt, a))
def FormatOneString(fmt, a):
return ffi.string(lib.FormatOneString(fmt, a))
def FormatNothing(fmt):
return ffi.string(lib.FormatNothing(fmt))
def FormatOneIntError(fmt, a):
return lib.FormatOneIntError(fmt, a)
def FormatOneFloatError(fmt, a):
return lib.FormatOneFloatError(fmt, a)
def FormatOneDoubleError(fmt, a):
return lib.FormatOneDoubleError(fmt, a)
def FormatOneStringError(fmt, a):
return lib.FormatOneStringError(fmt, a)
def FormatNothingError(fmt):
return lib.FormatNothingError(fmt)
|
[
"[email protected]"
] | |
c3b3a5014f07cb69548dc3237c5d9b5300052ae6
|
35bb7981230e8a0500a64a07665542cb889064cb
|
/mysite/polls/models.py
|
d2d7a7a86901799a3a9db0fb620fa86e2b0ffb90
|
[] |
no_license
|
shineping/question_choice_vote
|
53019cf630556d2968e9d1ef990397994b355e81
|
57f2aa36bd0a44b40ee5fe9e306fcc1db9c3ee56
|
refs/heads/master
| 2020-04-12T22:23:50.783893 | 2018-12-22T06:49:04 | 2018-12-22T06:49:04 | 162,788,626 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 695 |
py
|
from django.db import models
# Create your models here.
from django.db import models
from django.utils import timezone
import datetime
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
|
[
"[email protected]"
] | |
5bc9f7cb725e608b584db5bb260968104795a451
|
8aefdf04c115c6c6ab64997576ced97d4727dd06
|
/curation-api/src/users/migrations/0003_auto_20170809_0921.py
|
b1d063c42c10db300647e9e67f63a3b2095bfcd5
|
[] |
no_license
|
mohanj1919/django_app_test
|
a0d47bc98c604d81253c74488dcdbc2ccd039863
|
5d5bc4c1eecbf627d38260e4d314d8451d67a4f5
|
refs/heads/master
| 2021-05-08T06:01:21.712986 | 2017-10-11T12:12:07 | 2017-10-11T12:12:07 | 106,544,537 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 485 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-08-09 09:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20170703_1555'),
]
operations = [
migrations.AlterField(
model_name='curationuser',
name='phone_number',
field=models.CharField(max_length=15, null=True, unique=True),
),
]
|
[
"[email protected]"
] | |
0dca24b9c6f29b16158f770d50c5117e462afa62
|
568b99142e5a9a748ad86d81b84efe4fc97a7228
|
/untitled1.py
|
0fb9b1bf83295d4bb749879857c8c0c9d4815b14
|
[] |
no_license
|
WillAlex2017/-offerPython-
|
75814a84d602c8e1ebc914a089ab988d60accdc1
|
0ea94c1bda038060e44f7088d857259b147563d6
|
refs/heads/master
| 2020-04-05T17:56:40.602129 | 2018-11-11T13:27:53 | 2018-11-11T13:27:53 | 157,082,502 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 287 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 25 15:43:17 2018
@author: willalex
"""
class Solution:
def GetLeastNumbers_Solution(self, tinput, k):
# write code here
if k > len(tinput):
return []
else:
return sorted(set(tinput))[:k]
|
[
"[email protected]"
] | |
15758ecc7348e2cb18036fa725588ad5e88e4386
|
5829f36c3d24a8ba3d814288ee45f4ee34e52f74
|
/week_4/day_3/daily_challenge/daily_challenge.py
|
2c4a45fddf10118c722b1ba29cae96a213b22f9d
|
[] |
no_license
|
TyranSchmidt/developers_institute
|
3c71441e55b9d525f6375c1a66ee67110fee195a
|
9cf2b7bcc2a946267c554e3f9763668ea661f6d4
|
refs/heads/master
| 2021-04-21T11:05:21.085274 | 2020-06-18T14:15:54 | 2020-06-18T14:15:54 | 249,774,139 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 490 |
py
|
user_input = input("Do you want to encrypt or decrypt (text-sensitive)?: ")
user_text = input("Please enter the text: ")
enc_text = ""
dec_text = ""
for i in range(len(user_text)):
character = user_text[i]
if user_input == "encrypt":
enc_text += chr(ord(character) + 3)
elif user_input == "decrypt":
dec_text += chr(ord(character) - 3)
else:
print("That is not a valid argument.")
if user_input == "encrypt":
print(enc_text)
elif user_input == "decrypt":
print(dec_text)
|
[
"[email protected]"
] | |
59919a9d9900991467fcaabb4cc8e2acaff0e9e0
|
32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd
|
/benchmark/aphotomanager/testcase/firstcases/testcase5_028.py
|
6856a16cc6fb6a518aa1c467766e72d1e3596a1c
|
[] |
no_license
|
Prefest2018/Prefest
|
c374d0441d714fb90fca40226fe2875b41cf37fc
|
ac236987512889e822ea6686c5d2e5b66b295648
|
refs/heads/master
| 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,391 |
py
|
#coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'de.k3b.android.androFotoFinder',
'appActivity' : 'de.k3b.android.androFotoFinder.FotoGalleryActivity',
'resetKeyboard' : True,
'androidCoverage' : 'de.k3b.android.androFotoFinder/de.k3b.android.androFotoFinder.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase028
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\").description(\"More options\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Show in new gallery\")", "new UiSelector().className(\"android.widget.TextView\").instance(5)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"android:id/home\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"android:id/home\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"/storage/sdcard/Pictures/Wikipedia/Michael Mosman District Judge.jpg\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).long_press(element).release().perform()
element = getElememtBack(driver, "new UiSelector().text(\"/storage/sdcard/pic4.jpg\")", "new UiSelector().className(\"android.widget.TextView\").instance(7)")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"de.k3b.android.androFotoFinder:id/action_edit\").className(\"android.widget.TextView\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"de.k3b.android.androFotoFinder:id/menu_item_share\").className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"5_028\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'de.k3b.android.androFotoFinder'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
|
[
"[email protected]"
] | |
323ec850c34bcc51687b6833ac475f05d6a343e0
|
260bc967aa27a6cfa57e8729bf6fa51322e774ec
|
/mysite/polls/views.py
|
d3f1eab59a200c5569b82021815b4291c3b2555a
|
[] |
no_license
|
mawrobel/django-polls
|
136ea97ebf0bb858078f076afd0dbe4a9db3fa3d
|
4e9314a8edde9fdc114a635952009d855eb9b027
|
refs/heads/master
| 2022-01-17T06:07:02.740872 | 2019-07-22T19:49:55 | 2019-07-22T19:49:55 | 198,291,991 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,155 |
py
|
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.views import generic
from .models import Choice, Question
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
return Question.objects.order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except(KeyError, Choice.DoesNotExist):
return render(request, 'polls/detail.html', {
'question':question,
'error_message':"You didn't select a choice",
})
else:
selected_choice.votes+=1
selected_choice.save()
return HttpResponseRedirect(reverse('polls:results',args=(question.id, )))
|
[
"[email protected]"
] | |
92f52ac25e6d526b6ee75266d8e6e3dcecf56332
|
bd8136c3ec76ab2d85db9be6cf8ecbd7db36f325
|
/split_train_test.py
|
a83a9f2ecc320ab556826689f75f50f4880e744d
|
[] |
no_license
|
ViewFuture/Fs-net
|
52b5e0510bc137e11adac56eb4511637da741dda
|
e8e41b47ea00e4d1029d82133caef992f73ba255
|
refs/heads/master
| 2022-12-01T13:20:05.827782 | 2020-08-10T02:33:34 | 2020-08-10T02:33:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 315 |
py
|
import random
f1 = open("NIMS.arff","r")
f_train = open("NIMS_train.arff","w")
f_test = open("NIMS_test.arff","w")
lines = f1.readlines()
for i in range(28,len(lines)):
x = random.randint(0,20)
line = lines[i]
if x == 0:
f_test.write(line)
else:
f_train.write(line)
f1.close()
f_train.close()
f_test.close()
|
[
"[email protected]"
] | |
33984d775374f698a16233b294ee3e505d447c22
|
75519d2a9bf55e2d9376ea08a36676948a8b232c
|
/ui/uikits/TextSteam.py
|
222dfb2dcd7959a0cc728b523b9bf881ec8afbf0
|
[
"MIT"
] |
permissive
|
CGFanTuan/damgteam
|
9c32d59cbd0ecb9d3acffd9b902b918c40797e14
|
aec414f084f6ab6ec5897314390605aaa8380d62
|
refs/heads/master
| 2020-09-17T00:29:24.832648 | 2019-11-25T09:51:13 | 2019-11-25T09:51:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,118 |
py
|
# -*- coding: utf-8 -*-
"""
Script Name: TextSteam.py
Author: Do Trinh/Jimmy - 3D artist.
Description:
"""
# -------------------------------------------------------------------------------------------------------------
from __future__ import absolute_import, unicode_literals
from PyQt5.QtCore import QTextStream
from appData import __copyright__
class TextStream(QTextStream):
Type = 'DAMGSTREAM'
key = 'TextStream'
_name = 'DAMG Text Stream'
_copyright = __copyright__
@property
def copyright(self):
return self._copyright
@property
def name(self):
return self._name
@name.setter
def name(self, newName):
self._name = newName
# -------------------------------------------------------------------------------------------------------------
# Created by panda on 15/11/2019 - 5:43 PM
# © 2017 - 2018 DAMGteam. All rights reserved
|
[
"[email protected]"
] | |
8ea3b5fe878c8992db43d66125d44a9c5a56b3e4
|
c4755fc30069506249cea50240a42f9765c9a87d
|
/13_dqn_keras_type_c/05_0_Keras_type_c_frozen_lake_dueling_GREEN.py
|
b32bc6c23f081ba316f5a235a07cd0aa0f85d058
|
[] |
no_license
|
RichardMinsooGo-RL-Single-agent/2_frozen_lake
|
32e5ea206b2d7d20bfc226748f1e30fe54def378
|
680810a9ff162cdce646d08bb016cc0661db397f
|
refs/heads/master
| 2022-12-14T17:32:50.642594 | 2020-09-07T05:16:30 | 2020-09-07T05:16:30 | 277,676,117 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,601 |
py
|
import random
import numpy as np
import time, datetime
from collections import deque
import pylab
import sys
import pickle
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from keras.layers import *
from keras.models import Sequential,Model
import keras
from keras import backend as K_back
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import Adam
state_size = 64
action_size = 5
n_rows = 9
n_cols = 9
model_path = "save_model/"
graph_path = "save_graph/"
# Make folder for save data
if not os.path.exists(model_path):
os.makedirs(model_path)
if not os.path.exists(graph_path):
os.makedirs(graph_path)
load_model = True
class Frozen_Lake:
def __init__(self):
# player velocity, max velocity, downward accleration, accleration on flap
self.agent_row = 0
self.agent_col = 0
self.rand_init = np.random.randint(low=0, high=3)
def reset_env(self):
self.agent_row = 0
self.agent_col = 0
self.rand_init = np.random.randint(low=0, high=3)
state = np.zeros((n_rows,n_cols))
# rand_init = 0
state[2][(self.rand_init+1)%9] = 2
state[4][(self.rand_init+4)%9] = 2
state[6][(self.rand_init+7)%9] = 2
state[8][8] = 4
state[0][0] = 5
return state
def frame_step(self, action, ep_step):
if action == 0:
if (self.agent_row + 1) < 9:
self.agent_row += 1
if action == 1:
if self.agent_row > 0:
self.agent_row -= 1
if action == 2:
if self.agent_col > 0:
self.agent_col -= 1
if action == 3:
if (self.agent_col+1) < 9:
self.agent_col += 1
agent_pos = np.zeros((9,9))
agent_pos[self.agent_row][self.agent_col] = 5
ice_lake = np.zeros((9,9))
hole_1_col = int((self.rand_init+ep_step+1)%9)
hole_2_col = int((self.rand_init+ep_step+4)%9)
hole_3_col = int((self.rand_init+ep_step+7)%9)
ice_lake[2][hole_1_col] = 2
ice_lake[4][hole_2_col] = 2
ice_lake[6][hole_3_col] = 2
ice_lake[8][8] = 4
next_state = agent_pos + ice_lake
# print(next_state)
# reward = agent_row - 8 + agent_col - 8
reward = -1
done = False
if np.count_nonzero(next_state == 7) > 0:
if ep_step < 15:
reward = reward - 200
else:
reward = reward - 100
# done = True
if np.count_nonzero(next_state == 9) > 0:
done = True
reward = 500
if ep_step == 500:
done = True
return next_state, reward, done
# it uses Neural Network to approximate q function
# and replay memory & target q network
class DQN_agent:
def __init__(self):
# get size of state and action
self.progress = " "
self.state_size = state_size
self.action_size = action_size
# train time define
self.training_time = 20*60
# These are hyper parameters for the DQN
self.learning_rate = 0.001
self.discount_factor = 0.99
self.epsilon_max = 0.2
# final value of epsilon
self.epsilon_min = 0.0001
self.epsilon_decay = 0.0005
self.epsilon = self.epsilon_max
self.step = 0
self.score = 0
self.episode = 0
self.hidden1, self.hidden2 = 251, 251
self.ep_trial_step = 500
# Parameter for Experience Replay
self.size_replay_memory = 10000
self.batch_size = 64
self.input_shape = (n_rows,n_cols,1)
# Experience Replay
self.memory = deque(maxlen=self.size_replay_memory)
# Parameter for Target Network
self.target_update_cycle = 100
# create main model and target model
self.model = self.build_model()
self.target_model = self.build_model()
# approximate Q function using Neural Network
# state is input and Q Value of each action is output of network
def build_model(self):
state = Input(shape=self.input_shape)
net1 = Convolution2D(32, kernel_size=(3, 3),activation='relu', \
padding = 'valid', input_shape=self.input_shape)(state)
net2 = Convolution2D(64, kernel_size=(3, 3), activation='relu', padding = 'valid')(net1)
net3 = MaxPooling2D(pool_size=(2, 2))(net2)
net4 = Flatten()(net3)
lay_2 = Dense(units=self.hidden2,activation='relu',kernel_initializer='he_uniform',\
name='hidden_layer_1')(net4)
value_= Dense(units=1,activation='linear',kernel_initializer='he_uniform',\
name='Value_func')(lay_2)
ac_activation = Dense(units=self.action_size,activation='linear',\
kernel_initializer='he_uniform',name='action')(lay_2)
#Compute average of advantage function
avg_ac_activation = Lambda(lambda x: K_back.mean(x,axis=1,keepdims=True))(ac_activation)
#Concatenate value function to add it to the advantage function
concat_value = Concatenate(axis=-1,name='concat_0')([value_,value_])
concat_avg_ac = Concatenate(axis=-1,name='concat_ac_{}'.format(0))([avg_ac_activation,avg_ac_activation])
for i in range(1,self.action_size-1):
concat_value = Concatenate(axis=-1,name='concat_{}'.format(i))([concat_value,value_])
concat_avg_ac = Concatenate(axis=-1,name='concat_ac_{}'.format(i))([concat_avg_ac,avg_ac_activation])
#Subtract concatenated average advantage tensor with original advantage function
ac_activation = Subtract()([ac_activation,concat_avg_ac])
#Add the two (Value Function and modified advantage function)
merged_layers = Add(name='final_layer')([concat_value,ac_activation])
model = Model(inputs = state,outputs=merged_layers)
model.summary()
model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
return model
# pick samples randomly from replay memory (with batch_size)
def train_model(self):
minibatch = random.sample(self.memory, self.batch_size)
states = np.zeros((self.batch_size, n_rows, n_cols, 1))
next_states = np.zeros((self.batch_size, n_rows, n_cols, 1))
actions, rewards, dones = [], [], []
for i in range(self.batch_size):
states[i] = minibatch[i][0]
actions.append( minibatch[i][1])
rewards.append( minibatch[i][2])
next_states[i] = minibatch[i][3]
dones.append( minibatch[i][4])
q_value = self.model.predict(states)
tgt_q_value_next = self.target_model.predict(next_states)
for i in range(self.batch_size):
# Q Learning: get maximum Q value at s' from target model
if dones[i]:
q_value[i][actions[i]] = rewards[i]
else:
q_value[i][actions[i]] = rewards[i] + self.discount_factor * (np.amax(tgt_q_value_next[i]))
# Decrease epsilon while training
if self.epsilon > self.epsilon_min:
self.epsilon -= self.epsilon_decay
else :
self.epsilon = self.epsilon_min
# make minibatch which includes target q value and predicted q value
# and do the model fit!
self.model.fit(states, q_value, batch_size=self.batch_size, epochs=1, verbose=0)
# get action from model using epsilon-greedy policy
def get_action(self, state):
# choose an action_arr epsilon greedily
action_arr = np.zeros(self.action_size)
action = 0
if random.random() < self.epsilon:
# print("----------Random action_arr----------")
action = random.randrange(self.action_size)
action_arr[action] = 1
else:
# Predict the reward value based on the given state
Q_value = self.model.predict(state)
action = np.argmax(Q_value[0])
action_arr[action] = 1
return action_arr, action
# save sample <s,a,r,s'> to the replay memory
def append_sample(self, state, action, reward, next_state, done):
#in every action put in the memory
self.memory.append((state, action, reward, next_state, done))
# after some time interval update the target model to be same with model
def Copy_Weights(self):
self.target_model.set_weights(self.model.get_weights())
def save_model(self):
# Save the variables to disk.
self.model.save_weights(model_path+"/model.h5")
save_object = (self.epsilon, self.episode, self.step)
with open(model_path + '/epsilon_episode.pickle', 'wb') as ggg:
pickle.dump(save_object, ggg)
print("\n Model saved in file: %s" % model_path)
def main():
agent = DQN_agent()
game = Frozen_Lake()
# Initialize variables
# Load the file if the saved file exists
if os.path.isfile(model_path+"/Model_dueling_0.h5"):
agent.model.load_weights(model_path+"/Model_dueling_0.h5")
if os.path.isfile(model_path + '/epsilon_episode.pickle'):
with open(model_path + '/epsilon_episode.pickle', 'rb') as ggg:
agent.epsilon, agent.episode, agent.step = pickle.load(ggg)
print('\n\n Variables are restored!')
else:
print('\n\n Variables are initialized!')
agent.epsilon = agent.epsilon_max
avg_score = 0
episodes, scores = [], []
# start training
# Step 3.2: run the game
display_time = datetime.datetime.now()
# print("\n\n",game_name, "-game start at :",display_time,"\n")
start_time = time.time()
# initialize target model
agent.Copy_Weights()
while time.time() - start_time < agent.training_time and avg_score < 470:
# while agent.episode < 1:
state = game.reset_env()
done = False
agent.score = 0
ep_step = 0
rewards = 0
# if agent.progress == "Training":
# print(state)
state = state.reshape(1,n_rows,n_cols,1)
while not done and ep_step < agent.ep_trial_step:
if len(agent.memory) < agent.size_replay_memory:
agent.progress = "Exploration"
else:
agent.progress = "Training"
ep_step += 1
agent.step += 1
action_arr, action = agent.get_action(state)
next_state, reward, done = game.frame_step(action, ep_step)
rewards += reward
next_state = next_state.reshape(1,n_rows,n_cols,1)
agent.append_sample(state, action, reward, next_state, done)
# print("next_state_shape :\n", next_state.shape)
state = next_state
# sys.exit()
if agent.progress == "Training":
agent.train_model()
if done or ep_step % agent.target_update_cycle == 0:
# return# copy q_net --> target_net
agent.Copy_Weights()
agent.score = rewards
if done:
if agent.progress == "Training":
agent.episode += 1
scores.append(agent.score)
episodes.append(agent.episode)
avg_score = np.mean(scores[-min(30, len(scores)):])
print('episode :{:>6,d}'.format(agent.episode),'/ ep step :{:>5,d}'.format(ep_step), \
'/ rewards :{:>4.1f}'.format(rewards),'/ status :', agent.progress, \
'/ epsilon :{:>1.4f}'.format(agent.epsilon),'/ last 30 avg :{:> 4.1f}'.format(avg_score) )
break
# Save model
agent.save_model()
pylab.plot(episodes, scores, 'b')
pylab.savefig("./save_graph/cartpole_duelingdqn.png")
e = int(time.time() - start_time)
print(' Elasped time :{:02d}:{:02d}:{:02d}'.format(e // 3600, (e % 3600 // 60), e % 60))
sys.exit()
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
2e46169ee7920f10d3f35283121fc25deec3598f
|
7d82a28dd3b30f147ce0469f8245ee73f5ae15f1
|
/Oop/practiseseconf.py
|
639a89a1d280982cad70919c93bb51278eb93ab0
|
[] |
no_license
|
Shreejan-git/allpy
|
e9d8530606be18fecb710cb759da9ae03b1e0a7a
|
f245c34b371090bdb530c25b77337882a3acacf6
|
refs/heads/master
| 2023-04-20T22:24:26.688556 | 2021-05-12T04:33:38 | 2021-05-12T04:33:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 238 |
py
|
from abc import ABC, abstractmethod
class Shape(ABC):
@abstractmethod
def printacom(self):
return True
class Rectangle(Shape):
# def printacom(self):
# print("Hahaha")
pass
a=Rectangle()
a.printacom()
|
[
"[email protected]"
] | |
4f96b620da95552342807310090b498dadae38b1
|
9975aae590a527597627715975e2b09b59a06476
|
/lab6/lab6_cp2.py
|
3a019f1aaeec7247efec14d132724c3f6bd7c4e2
|
[] |
no_license
|
LuHaofan/ECE437
|
0ea636cb17711b53abfc5af0f769b656f59ddcd9
|
85a814ffe0accdcfd02e83dfd7c2803be7b37ab3
|
refs/heads/master
| 2021-01-06T19:41:58.272961 | 2020-05-13T15:48:14 | 2020-05-13T15:48:14 | 241,496,709 | 0 | 1 | null | 2020-04-06T20:54:45 | 2020-02-19T00:21:06 |
Verilog
|
UTF-8
|
Python
| false | false | 11,385 |
py
|
# NOTE 1
# If your power supply goes into an error state (i.e., the word
# error is printed on the front of the device), use this command
# power_supply.write("*CLS")
# to clear the error so that you can rerun your code. The supply
# typically beeps after an error has occured.
import visa
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import time
# import various libraries necessery to run your Python code
import sys # system related library
ok_loc = 'C:\\Program Files\\Opal Kelly\\FrontPanelUSB\\API\\Python\\3.6\\x64'
sys.path.append(ok_loc) # add the path of the OK library
import ok # OpalKelly library
import pickle
mpl.style.use('ggplot')
#%%
# Define FrontPanel device variable, open USB communication and
# load the bit file in the FPGA
dev = ok.okCFrontPanel() # define a device for FrontPanel communication
SerialStatus=dev.OpenBySerial("") # open USB communicaiton with the OK board
ConfigStatus=dev.ConfigureFPGA("JTEG_Test_File.bit") # Configure the FPGA with this bit file
# Check if FrontPanel is initialized correctly and if the bit file is loaded.
# Otherwise terminate the program
print("----------------------------------------------------")
if SerialStatus == 0:
print ("FrontPanel host interface was successfully initialized.")
else:
print ("FrontPanel host interface not detected. The error code number is:" + str(int(SerialStatus)))
print("Exiting the program.")
sys.exit ()
if ConfigStatus == 0:
print ("Your bit file is successfully loaded in the FPGA.")
else:
print ("Your bit file did not load. The error code number is:" + str(int(ConfigStatus)))
print ("Exiting the progam.")
sys.exit ()
print("----------------------------------------------------")
print("----------------------------------------------------")
#%%
# This section of the code cycles through all USB connected devices to the computer.
# The code figures out the USB port number for each instrument.
# The port number for each instrument is stored in a variable named “instrument_id”
# If the instrument is turned off or if you are trying to connect to the
# keyboard or mouse, you will get a message that you cannot connect on that port.
device_manager = visa.ResourceManager()
devices = device_manager.list_resources()
number_of_device = len(devices)
power_supply_id = -1
waveform_generator_id = -1
digital_multimeter_id = -1
oscilloscope_id = -1
# assumes only the DC power supply is connected
for i in range (0, number_of_device):
# check that it is actually the power supply
try:
device_temp = device_manager.open_resource(devices[i])
print("Instrument connect on USB port number [" + str(i) + "] is " + device_temp.query("*IDN?"))
if (device_temp.query("*IDN?") == 'HEWLETT-PACKARD,E3631A,0,3.2-6.0-2.0\r\n'):
power_supply_id = i
if (device_temp.query("*IDN?") == 'HEWLETT-PACKARD,E3631A,0,3.0-6.0-2.0\r\n'):
power_supply_id = i
if (device_temp.query("*IDN?") == 'Agilent Technologies,33511B,MY52301259,3.03-1.19-2.00-52-00\n'):
waveform_generator_id = i
if (device_temp.query("*IDN?") == 'Agilent Technologies,34461A,MY53207918,A.01.10-02.25-01.10-00.35-01-01\n'):
digital_multimeter_id = i
if (device_temp.query("*IDN?") == 'Keysight Technologies,34461A,MY53213065,A.02.08-02.37-02.08-00.49-01-01\n'):
digital_multimeter_id = i
if (device_temp.query("*IDN?") == 'KEYSIGHT TECHNOLOGIES,MSO-X 3024T,MY54440298,07.10.2017042905\n'):
oscilloscope_id = i
device_temp.close()
except:
print("Instrument on USB port number [" + str(i) + "] cannot be connected. The instrument might be powered of or you are trying to connect to a mouse or keyboard.\n")
#%%
# Open the USB communication port with the power supply.
# The power supply is connected on USB port number power_supply_id.
# If the power supply ss not connected or turned off, the program will exit.
# Otherwise, the power_supply variable is the handler to the power supply
if (power_supply_id == -1):
print("Power supply instrument is not powered on or connected to the PC.")
else:
print("Power supply is connected to the PC.")
power_supply = device_manager.open_resource(devices[power_supply_id])
if (digital_multimeter_id == -1):
print("Digital multimeter instrument is not powered on or connected to the PC.")
else:
print("Digital multimeter is connected to the PC")
digital_multimeter = device_manager.open_resource(devices[digital_multimeter_id])
#%%
# The power supply output voltage will be swept from 0 to 1.5V in steps of 0.05V.
# This voltage will be applied on the 6V output ports.
# For each voltage applied on the 6V power supply, we will measure the actual
# voltage and current supplied by the power supply.
# If your circuit operates correctly, the applied and measured voltage will be the same.
# If the power supply reaches its maximum allowed current,
# then the applied voltage will not be the same as the measured voltage.
def bit2Temp(tmp):
b = bin(tmp)[2:-3]
if(len(b)<13):
b = '0'*(13-len(b))+b
if (b[0] == '1'):
re = (int(b,2)-8192)/16
else:
re = int(b,2)/16
return re
output_voltage = np.arange(0, 4.3, 0.086)
supply_voltage_mean = np.array([]) # create an empty list to hold our values
supply_current_mean = np.array([])
power_consump_mean = np.array([])
supply_voltage_std = np.array([]) # create an empty list to hold our values
supply_current_std = np.array([])
power_consump_std = np.array([])
temp_arr_tmp = np.array([])
temp_mean = np.array([])
temp_std = np.array([])
measured_current = np.array([]) # create an empty list to hold our values
power_supply.write("*CLS")
print(power_supply.write("OUTPUT ON")) # power supply output is turned on
# loop through the different voltages we will apply to the power supply
# For each voltage applied on the power supply,
# measure the voltage and current supplied by the 6V power supply
while(True):
print('Please activate the sensor')
time.sleep(1)
dev.UpdateWireOuts()
result = dev.GetWireOutValue(0x20)
if result != 0:
break
for v in output_voltage:
supply_voltage_tmp = np.array([]) # size 100
supply_current_tmp = np.array([])
power_consump_tmp = np.array([])
print('Measuring data for voltage {}'.format(v))
# apply the desired voltage on teh 6V power supply and limist the output current to 0.5A
power_supply.write("APPLy P6V, %0.2f, 0.13" % v)
# read the output current on the 6V power supply
measured_current_tmp = digital_multimeter.query("MEAS:CURR:DC?")
print('Current:{}'.format(measured_current_tmp))
measured_current = np.append(measured_current, float(measured_current_tmp))
flag = 0
# read the output voltage on the 6V power supply
for i in range(100):
time.sleep(0.01)
measured_voltage_sup = power_supply.query("MEASure:VOLTage:DC? P6V")
supply_voltage_tmp = np.append(supply_voltage_tmp, float(measured_voltage_sup))
measured_current_sup = power_supply.query("MEASure:CURR:DC? P6V")
supply_current_tmp = np.append(supply_current_tmp, float(measured_current_sup))
power_consump_tmp = np.append(power_consump_tmp, float(measured_current_tmp)*float(measured_voltage_sup))
# power supply output is turned off
if float(measured_current_tmp)*float(measured_voltage_sup) > 0.5:
print(power_supply.write("OUTPUT OFF"))
flag = 1
# collect temperature data
dev.UpdateWireOuts()
result = dev.GetWireOutValue(0x20)
if result != 0:
temp_arr_tmp = np.append(temp_arr_tmp,bit2Temp(result))
if flag == 1:
break
# Calculate the mean and standard dev of voltage and current
supply_voltage_std = np.append(supply_voltage_std, np.std(supply_voltage_tmp))
supply_voltage_mean = np.append(supply_voltage_mean, np.mean(supply_voltage_tmp))
supply_current_std = np.append(supply_current_std, np.std(supply_current_tmp))
supply_current_mean = np.append(supply_current_mean, np.mean(supply_current_tmp))
power_consump_std = np.append(power_consump_std, np.std(power_consump_tmp))
power_consump_mean = np.append(power_consump_mean, np.mean(power_consump_tmp))
temp_std = np.append(temp_std, np.std(temp_arr_tmp))
temp_mean = np.append(temp_mean, np.mean(temp_arr_tmp))
print('Current Temperature:{}\n'.format(temp_mean[-1]))
# close the power supply USB handler.
# Otherwise you cannot connect to it in the future
if flag == 0:
print(power_supply.write("OUTPUT OFF"))
power_supply.close()
f = open('sup_v_std.pkl','wb')
pickle.dump(supply_voltage_std, f)
f.close()
f = open('sup_v_mean.pkl','wb')
pickle.dump(supply_voltage_mean, f)
f.close()
f = open('sup_c_std.pkl','wb')
pickle.dump(supply_current_std, f)
f.close()
f = open('sup_c_mean.pkl','wb')
pickle.dump(supply_current_mean, f)
f.close()
f = open('sup_p_std.pkl','wb')
pickle.dump(power_consump_std, f)
f.close()
f = open('sup_p_mean.pkl','wb')
pickle.dump(power_consump_mean, f)
f.close()
f = open('sup_t_std.pkl','wb')
pickle.dump(temp_std, f)
f.close()
f = open('sup_t_mean.pkl','wb')
pickle.dump(temp_mean, f)
f.close()
#%%
'''
# plot results (applied voltage vs measured supplied current)
plt.figure()
plt.plot(output_voltage,measured_current)
plt.title("Applied Volts vs. Measured Supplied Current for Diode")
plt.xlabel("Applied Volts [V]")
plt.ylabel("Measured Current [A]")
plt.draw()
'''
# plot results (voltage vs current)
plt.figure()
plt.plot(output_voltage,supply_current_mean)
plt.plot(output_voltage, supply_current_std)
plt.title("Voltage vs. Current for resistor")
plt.xlabel("Voltage Across the diode [V]")
plt.ylabel("Current Through the circuit[A]")
plt.draw()
# plot results (voltage vs voltage)
plt.figure()
plt.plot(output_voltage,supply_voltage_std)
plt.plot(output_voltage, supply_voltage_mean)
plt.title("Voltage vs. supply voltage for resistor")
plt.xlabel("Voltage Across the diode [V]")
plt.ylabel("Voltage Through the circuit[A]")
plt.draw()
plt.figure()
plt.plot(output_voltage,power_consump_std)
plt.plot(output_voltage, power_consump_mean)
plt.title("Voltage vs. power consumption for resistor")
plt.xlabel("Voltage Across the diode [V]")
plt.ylabel("Power consumption Through the circuit[A]")
plt.draw()
plt.figure()
plt.plot(output_voltage,temp_mean)
plt.plot(output_voltage, temp_std)
plt.title("Voltage vs. temperature around resistor")
plt.xlabel("Voltage Across the diode [V]")
plt.ylabel("Temperature")
plt.draw()
# show all plots
plt.show()
|
[
"[email protected]"
] | |
8ef50d9f03de4d23383e7f97a3e35f396daa2066
|
26c331317a8ccfc8404af8a4750d2b1e7cf8388b
|
/data_to_pomdp.py
|
b0b3c5b9888aee0fd0a101aa60fb83655e1b484f
|
[] |
no_license
|
cfoster0/tangle_simulator
|
bc9e8f37536483ac4cfd2281095de861241756dd
|
f6d250d2b93d73d7b610118a15d303621fbc0344
|
refs/heads/master
| 2021-08-24T15:28:09.624960 | 2017-12-10T07:30:51 | 2017-12-10T07:30:51 | 113,731,600 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,573 |
py
|
from collections import Counter
import itertools
"""
discount: 1.0
values: [ reward, cost ]
states: list of strings
actions: adopt build
observations: list of strings
start: <name of starting state>
T: adopt : * : <name of starting state> 1.0
// for all the start-end combinations we see in the data:
T: build : <start-state> : <end-state> : probability
// for all the state-observation combinations we see in the data:
O: * : <end-state> : <observation> %f
R: <action> : <start-state> : <end-state> : <observation> %f
R: build : * : * : * -epsilon
// for all the winning states
R: * : * : <win-state> : * 1.0
"""
alphabet = {}
alphabet[0] = 'a'
alphabet[1] = 'b'
alphabet[2] = 'c'
alphabet[3] = 'd'
alphabet[4] = 'e'
alphabet[5] = 'f'
alphabet[6] = 'g'
alphabet[7] = 'h'
alphabet[8] = 'i'
alphabet[9] = 'j'
alphabet[10] = 'k'
def data_to_pomdp(data, fname='tangle.POMDP'):
with open(fname,'w') as pomdp_file:
S = get_S(data)
A = ['back', 'wait', 'build']
T = get_T(S, A[1:], data)
O = get_O(S, A, data)
O_states = S
win_states = get_R(S, A, data)
discount = 0.9
epsilon = -0.0001
start = 'a-a'
pomdp_file.write("# Tangle File \n")
pomdp_file.write("discount: {f} \n".format(f=discount))
pomdp_file.write("values: reward \n")
pomdp_file.write("states: {s} \n".format(s=' '.join(S)))
pomdp_file.write("actions: {a} \n".format(a=' '.join(A)))
pomdp_file.write("observations: {o} \n".format(o=' '.join(O_states)))
pomdp_file.write("\n")
pomdp_file.write("start: {st} \n".format(st=start))
pomdp_file.write("\n")
pomdp_file.write("T: back : * : {st} 1.0 \n".format(st=start))
for win_state in win_states:
pomdp_file.write("T: * : {ws} : {st} 1.0 \n".format(ws=win_state, st=start))
for (action, start_state, end_state, probability) in T:
pomdp_file.write("T: {a} : {ss} : {es} {p} \n".format(a=action, ss=start_state, es=end_state, p=probability))
for (end_state, observation, probability) in O:
pomdp_file.write("O: * : {es} : {o} {p} \n".format(es=end_state, o=observation, p=probability))
pomdp_file.write("R: build : * : * : * {eps} \n".format(eps=epsilon))
for win_state in win_states:
pomdp_file.write("R: * : * : {ws} : * 1.0 \n".format(ws=win_state))
return pomdp_file
def get_S(data):
#S = ['(1,0)', '(1,1)', '(1,2)']
S = []
for i in range(11):
for j in range(11):
S.append("{}-{}".format(alphabet[i], alphabet[j]))
return S
def get_T(S, A, data):
#T = [('wait', '(1,0)', '(1,0)', 0.50), ('wait', '(1,0)', '(1,1)', 0.50), ('build', '(1,0)', '(1,1)', 0.50), ('build', '(1,0)', '(1,2)', 0.50), ('build', '(1,1)', '(1,2)', 1.0)]
T = []
for action in A:
transition_counters = {}
for state in S:
transition_counters[state] = Counter()
for (start_state, end_state) in itertools.product(S, S):
transition_counters[start_state][end_state] += 1
for i, step in enumerate(data['global']):
start_state = step[0]
if step[1] is not action:
continue
if i == len(data['global']) - 1:
break
end_state = data['global'][i+1][0]
transition_counters[start_state][end_state] += 1
probabilities = []
for start_state in transition_counters:
total = 0.0
for end_state in transition_counters[start_state]:
total += transition_counters[start_state][end_state]
for end_state in transition_counters[start_state]:
T_tuple = (action, start_state, end_state, transition_counters[start_state][end_state] / total)
probabilities.append(T_tuple)
for p in probabilities:
T.append(p)
return T
def get_O(S, A, data):
#O = [('(1,0)', '(100, 0)', 1.0), ('(1,1)', '(50, 50)', 1.0), ('(1,2)', '(33,67)', 1.0)]
O = []
transition_counters = {}
for state in S:
transition_counters[state] = Counter()
for (event, observation) in itertools.product(S, S):
transition_counters[event][observation] += 1
for step, observation in zip(data['global'], data['local']):
event = step[0]
transition_counters[event][observation] += 1
for state in transition_counters:
total = 0.0
for observation in transition_counters[state]:
total += transition_counters[state][observation]
for observation in transition_counters[state]:
O_tuple = (state, observation, transition_counters[state][observation] / total)
O.append(O_tuple)
return O
def get_R(S, A, data):
#R = ['(1,2)']
R = []
#for i in range(51, 100):
# for j in range(i+1, 100):
# R.append("({orig},{doub})".format(orig=i, doub=j))
for i in range(11):
for j in range(i+1, 11):
R.append("{orig}-{doub}".format(orig=alphabet[i], doub=alphabet[j]))
return R
|
[
"[email protected]"
] | |
63d97a4042ea1c94875bb42957b33061db5ac700
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/nnreggio.py
|
32b325c4e4eef33665e12e96b01b39fc616f374c
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 |
Python
|
UTF-8
|
Python
| false | false | 216 |
py
|
ii = [('ClarGE2.py', 1), ('RoscTTI3.py', 1), ('RoscTTI2.py', 2), ('MedwTAI.py', 1), ('HogaGMM.py', 3), ('MartHRW.py', 1), ('WestJIT.py', 1), ('RoscTTI.py', 1), ('BrewDTO.py', 2), ('ClarGE3.py', 2), ('RogeSIP.py', 1)]
|
[
"[email protected]"
] | |
1434e505ed21fd67e0d5b956ee3d0c00286409d9
|
c6125c2d971011a0b9ff602192bb73dc76f46d0a
|
/music_analysis/neural_networks/cnn.py
|
0d316547b52642584f6aecbf8bde43a53bdc227e
|
[] |
no_license
|
knsushma/CS760-ML-Project
|
e66127c212a53e17df0189e21f47cb09d72dfe2c
|
4aa6071d8a2c8035e674107c0c79a4a5130f2efc
|
refs/heads/master
| 2020-05-01T14:46:53.365573 | 2019-05-06T20:30:51 | 2019-05-06T20:30:51 | 177,529,467 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,186 |
py
|
import tensorflow.keras as keras
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, Activation, BatchNormalization, Dropout, Dense, Flatten, GlobalAveragePooling2D, concatenate, ELU, ReLU
from tensorflow.keras.regularizers import l2
from tensorflow.keras.optimizers import Adam
from Loader import Loader
import config
import sys
def build_model(learning_rate, with_compile = True, model_type = 'multitask'):
# Building the CNN
regularizer = l2(1e-5)
# regularizer = l2(0)
input_shape = tuple(config.IMG_DIMS)
inputs_ph = keras.Input(shape=input_shape)
x = inputs_ph
# Layer 1
x = Conv2D(16, (5,5), padding='same', kernel_regularizer=regularizer,
name = 'conv_1')(x)
x = ReLU()(x)
x = MaxPooling2D(pool_size=(2,1), strides=(2,1), name='MP_1')(x)
# Layer 2
x = Conv2D(32, (3,3), padding='same', kernel_regularizer=regularizer,
name = 'conv_2')(x)
x = BatchNormalization(axis=3)(x)
x = ReLU()(x)
x = MaxPooling2D(pool_size=(2,2), strides=(2,2), name='MP_2')(x)
x = Dropout(0.1)(x)
# Layer 3
x = Conv2D(64, (3,3), padding='same', kernel_regularizer=regularizer,
name = 'conv_3')(x)
x = ReLU()(x)
x = MaxPooling2D(pool_size=(2,2), strides=(2,2), name='MP_3')(x)
# Layer 4
x = Conv2D(64, (3,3), padding='same', kernel_regularizer=regularizer,
name = 'conv_4')(x)
x = BatchNormalization(axis=3)(x)
x = ReLU()(x)
x = MaxPooling2D(pool_size=(2,2), strides=(2,2), name='MP_4')(x)
x = Dropout(0.1)(x)
# Layer 5
x = Conv2D(128, (3,3), padding='same', kernel_regularizer=regularizer,
name = 'conv_5')(x)
x = ReLU()(x)
x = MaxPooling2D(pool_size=(2,2), strides=(2,2), name='MP_5')(x)
# Layer 6
x = Conv2D(256, (3,3), padding='same', kernel_regularizer=regularizer,
name = 'conv_6')(x)
x = ReLU()(x)
# layer 7
x = Conv2D(256, (1,1), padding='same', kernel_regularizer=regularizer,
name = 'conv_7')(x)
x = BatchNormalization(axis=3)(x)
x = ReLU()(x)
# # GAP
x = GlobalAveragePooling2D()(x)
x = BatchNormalization()(x)
# Dense
x = Dense(256, kernel_regularizer=regularizer, name = 'dense')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
x = Dropout(0.5)(x)
# output
# Year
if model_type == 'year':
x = Dense(1, kernel_regularizer = regularizer, name = 'output')(x)
model = Model(inputs_ph, x)
# Genre
if model_type == 'genre':
x = Dense(8, kernel_regularizer=regularizer, name='output')(x)
x = Activation('softmax')(x)
model = Model(inputs_ph, x)
# Artist
if model_type == 'artist':
x = Dense(config.NUM_ARTISTS, kernel_regularizer = regularizer, name = 'output')(x)
x = Activation('softmax')(x)
model = Model(inputs_ph, x)
# model
if model_type == 'multitask':
year = Dense(1, kernel_regularizer = regularizer, name = 'year')(x)
genre = Dense(8, kernel_regularizer=regularizer, name='genre_activation')(x)
genre = Activation('softmax', name = 'genre')(genre)
artist = Dense(config.NUM_ARTISTS, kernel_regularizer = regularizer, name = 'artist_activation')(x)
artist = Activation('softmax', name = 'artist')(artist)
model = Model(inputs_ph, outputs = [year, genre, artist])
if with_compile:
optimizer = Adam(lr = learning_rate)
if model_type == 'year':
model.compile(optimizer=optimizer,
loss = 'mean_squared_error',
metrics = ['mean_absolute_error'])
if model_type == 'genre' or model_type == 'artist':
model.compile(optimizer=optimizer,
loss = 'categorical_crossentropy',
metrics=['accuracy'])
if model_type == 'multitask':
model.compile(optimizer=optimizer,
loss={'genre': 'categorical_crossentropy', 'artist': 'categorical_crossentropy',
'year': 'mean_squared_error'},
metrics={'genre': 'accuracy', 'artist': 'accuracy', 'year': 'mean_absolute_error'})
return model
if __name__ == '__main__':
'''
Args:
argv[1] - Either 'validate' to use a train/validation fold, or 'test' to use train/test set
argv[2] - Learning Rate (float)
argv[3] - either 'year', 'genre', 'artist', or 'multitask' for the task type
argv[4] - The number of epochs to train on
'''
loader_batch_size = 8
if sys.argv[1] == 'validate':
train_folds = [config.FULL_TRAIN_LABELS]
validation_folds = [config.FULL_TEST_LABELS]
else:
train_folds = [config.TRAIN_FOLDS_LABELS[0]]
validation_folds = [config.TEST_FOLDS_LABELS[0]]
learning_rate = float(sys.argv[2])
model_type = sys.argv[3]
num_epochs = int(sys.argv[4])
loader_train = Loader(train_folds, loader_batch_size)
loader_validation = Loader(validation_folds, loader_batch_size)
model = build_model(learning_rate)
# Verbose Out
keras.utils.plot_model(model, to_file = model_type + '.png')
with open(model_type + '.txt', 'w+') as f:
def print_s(s): print(s, file = f)
model.summary(print_fn = print_s)
if num_epochs == 0:
exit()
for epoch in range(num_epochs):
batch = loader_train.next_batch()
if model_type == 'multitask':
losses = [0] * 7
else:
losses = [0, 0]
batch_N = 0
while batch:
batch_N += 1
imgs = batch[0]
years = batch[1]
genre = batch[2]
artist = batch[3]
if model_type == 'genre':
loss = model.train_on_batch(imgs, genre)
if model_type == 'year':
loss = model.train_on_batch(imgs, years)
if model_type == 'artist':
loss = model.train_on_batch(imgs, artist)
if model_type == 'multitask':
loss = model.train_on_batch(imgs, {'year': years, 'genre': genre, 'artist': artist})
if not model_type == 'multitask':
losses[0] += loss[0]
losses[1] += loss[1]
else:
for i in range(7):
losses[i] += loss[i]
batch = loader_train.next_batch()
print('Epoch: ', epoch + 1)
if model_type == 'multitask':
losses = [loss / batch_N for loss in losses]
print('Losses:', *losses)
else:
print('Avg Acc: ', losses[0] / batch_N, losses[1] / batch_N)
# Final Train Set Evaluation
if model_type == 'multitask':
losses = [0] * 7
else:
losses = [0,0]
batch_N = 0
batch = loader_train.next_batch()
while batch:
batch_N += 1
imgs = batch[0]
years = batch[1]
genre = batch[2]
artist = batch[3]
if model_type == 'genre':
loss = model.test_on_batch(imgs, genre)
if model_type == 'year':
loss = model.test_on_batch(imgs, years)
if model_type == 'artist':
loss = model.test_on_batch(imgs, artist)
if model_type == 'multitask':
loss = model.test_on_batch(imgs, {'year': years, 'genre': genre, 'artist': artist})
if not model_type == 'multitask':
losses[0] += loss[0]
losses[1] += loss[1]
else:
for i in range(7):
losses[i] += loss[i]
batch = loader_train.next_batch()
if model_type == 'multitask':
losses = [loss / batch_N for loss in losses]
print('Final Train Accuracy:', *losses)
else:
print('Final Train Accuracy: ', losses[0] / batch_N, losses[1] / batch_N)
# Final Validation Set Accuracy
if model_type == 'multitask':
losses = [0] * 7
else:
losses = [0,0]
batch_N = 0
batch = loader_validation.next_batch()
while batch:
batch_N += 1
imgs = batch[0]
years = batch[1]
genre = batch[2]
artist = batch[3]
if model_type == 'genre':
loss = model.test_on_batch(imgs, genre)
if model_type == 'year':
loss = model.test_on_batch(imgs, years)
if model_type == 'artist':
loss = model.test_on_batch(imgs, artist)
if model_type == 'multitask':
loss = model.test_on_batch(imgs, {'year': years, 'genre': genre, 'artist': artist})
if not model_type == 'multitask':
losses[0] += loss[0]
losses[1] += loss[1]
else:
for i in range(7):
losses[i] += loss[i]
batch = loader_validation.next_batch()
if model_type == 'multitask':
losses = [loss / batch_N for loss in losses]
print('Final Validation Accuracy:', *losses)
else:
print('Final Validation Accuracy: ', losses[0] / batch_N, losses[1] / batch_N)
|
[
"[email protected]"
] | |
7ded1099f3ff3fd1d8e319d35e46977d4a7f120d
|
78e36d40259e207c0fa2d7837188e744871f93dd
|
/Incomeexpenseapi/wsgi.py
|
34b3afabe3f3cedc6f47ba0ddeed205feae93837
|
[] |
no_license
|
easylord/Auth
|
c3f006aeea82ff9807e6805abc3ffa699f83b2d1
|
66da243058d5e294d848c23fbdb7766dbe930333
|
refs/heads/main
| 2023-01-01T13:54:45.589510 | 2020-10-19T05:37:05 | 2020-10-19T05:37:05 | 305,273,089 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 409 |
py
|
"""
WSGI config for Incomeexpenseapi project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Incomeexpenseapi.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
f2f26b0c7d5ec008c4e0789cdad918e0c79ec68d
|
ddaf98c681ec91ed0150b34f46104aac3ff08be9
|
/sum.py
|
33164241ddabdda23b89ff28979ba7f28a5f2c61
|
[
"MIT"
] |
permissive
|
napman/easy_bench
|
50afc13f8e5b8cb2e0fec00f543d38d140d1e610
|
50b948ac67b692845130a6bb8b3a3d54c49d23e9
|
refs/heads/master
| 2020-05-20T12:24:29.668512 | 2015-09-22T02:03:41 | 2015-09-22T02:03:41 | 42,829,736 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 92 |
py
|
import sys
from bench import *
for key, value in sum_bench().items():
print key, value
|
[
"[email protected]"
] | |
351fc1cd117645fd8bb4225b82e2026437414d89
|
5bb5a8c194b7eb9c4e40295b240d8415e6e6b2f2
|
/omega_trading/game/bet.py
|
815d4a212691d2653f8900bc13d3d79bfde2e7c7
|
[] |
no_license
|
crnorthc/omega-trading
|
4e5463999ec819e59ee831cc9ae50bd1e069c189
|
74cdd4904695c8bfc4d34773f64a7e32627d922c
|
refs/heads/main
| 2023-07-15T18:55:22.920810 | 2021-08-24T23:00:13 | 2021-08-24T23:00:13 | 341,263,034 | 1 | 0 | null | 2021-08-24T17:53:54 | 2021-02-22T16:25:26 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,553 |
py
|
from bip_utils import Bip39SeedGenerator, Bip44, Bip44Coins
from .TopSecret import mnemonic
from web3 import Web3
# Binance Chain API - https://github.com/sammchardy/python-binance-chain
def get_address(id, coin):
seed_bytes = Bip39SeedGenerator(mnemonic).Generate()
if coin == 'BNB':
coin = Bip44Coins.BINANCE_CHAIN
if coin == 'BTC':
coin = Bip44Coins.BITCOIN
if coin == 'LTC':
coin = Bip44Coins.LITECOIN
if coin == 'ETH':
coin = Bip44Coins.ETHEREUM
root = Bip44.FromSeed(seed_bytes, coin)
account = root.Purpose().Coin().Account(id)
return account.PublicKey().ToAddress()
def get_key(id, coin):
seed_bytes = Bip39SeedGenerator(mnemonic).Generate()
if coin == 'BNB':
coin = Bip44Coins.BINANCE_CHAIN
if coin == 'BTC':
coin = Bip44Coins.BITCOIN
if coin == 'LTC':
coin = Bip44Coins.LITECOIN
if coin == 'ETH':
coin = Bip44Coins.ETHEREUM
root = Bip44.FromSeed(seed_bytes, coin)
account = root.Purpose().Coin().Account(id)
return account.PrivateKey().Raw()
def gas_quote():
w3 = Web3(Web3.HTTPProvider('HTTP://127.0.0.1:7545'))
return w3.eth.gas_price
def checkedsummed(address):
return Web3.toChecksumAddress(address)
def get_raw_hex(to, from_key, value, nonce):
w3 = Web3()
signed_txn = w3.eth.account.sign_transaction(dict(
nonce=nonce,
gas=100000,
gasPrice=10000,
to=to,
value=value,
),
from_key,
)
return signed_txn.rawTransaction.hex()
|
[
"[email protected]"
] | |
c2a0e1d31e6f2d5a7b75e235e08017346dae90ec
|
89d203e0c8e4438d8e913a1fff48578a3ac38c78
|
/components/node.py
|
6700aaffd90cea581241962fd7355fd670f5e197
|
[] |
no_license
|
jdvin/spiking-nn
|
a334516448ffe7750b19996eb77875d3af7b5f7f
|
fd750ef3afd983ed093ff02117b1c7a41e1fc51d
|
refs/heads/main
| 2023-07-04T12:37:12.241039 | 2021-08-06T13:34:53 | 2021-08-06T13:34:53 | 375,919,367 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,065 |
py
|
class Node():
def __init__(self, layer, response_function, threshold_function, spiking_method,
base_threshold, abs_refrac, rel_refrac):
self.layer = layer
self.synapses = []
self.threshold = base_threshold
self.potential = 0
self.last_fired = None
self.firing_history = []
self.response_function = response_function
self.threshold_function = threshold_function
self.base_threshold = base_threshold
self.abs_refrac = abs_refrac
self.rel_refrac = rel_refrac
self.spike = spiking_method
def reset(self):
self.last_fired = None
self.firing_history = []
# p is recalculated each update, do we want a continuous value?
def get_potential(self, t):
'''
calculate the firing potential for the node at t
self is the post node
'''
p = 0
for synapse in self.synapses:
if synapse.pre_node.last_fired != None:
# assign for cleaner use
n = synapse.pre_node
# determine dominate synapse
if synapse.excit_perm > synapse.inhib_perm:
w = synapse.excit_perm
else:
w = -1 * synapse.inhib_perm
# calculate post synaptic potential
psp = w * n.response_function(n.last_fired, t)
# add to permanence
p += psp
return p
def update(self, t):
'''
update the firing behaviour of self at t
'''
#
if self.last_fired != None:
self.threshold = self.threshold_function(self.last_fired, t, self.base_threshold,
self.abs_refrac, self.rel_refrac)
self.potential = self.get_potential(t)
if self.spike(self.potential, self.threshold):
self.last_fired = t
self.firing_history.append(1)
else:
self.firing_history.append(0)
|
[
"[email protected]"
] | |
7c3d2e7c79c1ed82a4df216081443454e1fd6e0d
|
d9f66c812c7287187563a15d1dc72e08cf1eaa91
|
/Wrappers/java/OpenNI.jni/CreateMethods.py
|
ff11b3935d05ec820766775a39aef65358db8f70
|
[
"Apache-2.0",
"IJG",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Delicode/OpenNI2
|
9c722138a9e45a887dda93b8522a523c59f3376d
|
99dd4e7d9f3c08a8e31b9dc9c0fd4d7448bc6ea0
|
refs/heads/master
| 2021-06-06T00:19:11.533537 | 2021-04-28T08:33:33 | 2021-04-28T08:33:33 | 362,108,942 | 0 | 0 |
Apache-2.0
| 2021-04-27T12:44:15 | 2021-04-27T12:44:14 | null |
UTF-8
|
Python
| false | false | 2,398 |
py
|
#/****************************************************************************
#* *
#* OpenNI 1.x Alpha *
#* Copyright (C) 2012 PrimeSense Ltd. *
#* *
#* This file is part of OpenNI. *
#* *
#* Licensed under the Apache License, Version 2.0 (the "License"); *
#* you may not use this file except in compliance with the License. *
#* You may obtain a copy of the License at *
#* *
#* http://www.apache.org/licenses/LICENSE-2.0 *
#* *
#* Unless required by applicable law or agreed to in writing, software *
#* distributed under the License is distributed on an "AS IS" BASIS, *
#* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
#* See the License for the specific language governing permissions and *
#* limitations under the License. *
#* *
#****************************************************************************/
import os
import re
# ----------------------- MAIN -------------------------
java_header = open("org_openni_NativeMethods.h")
cont = java_header.read()
java_header.close()
result = open("methods.inl", "w")
result.write("static JNINativeMethod methods[] = {\n")
while True:
match = re.search("Method:\s*(\w*)", cont)
if match is None:
break
method_name = match.group(1)
match = re.search("Signature:\s*([\w\(\)\[;/]*)", cont)
if match is None:
break
signature = match.group(1)
match = re.search("JNIEXPORT.*JNICALL (\w*)", cont)
if match is None:
break
method = match.group(1)
result.write('\t{ "' + method_name + '", "' + signature + '", (void*)&' + method + ' },\n')
cont = cont[match.end():];
result.write('};\n');
result.close()
|
[
"[email protected]"
] | |
67eabf0ae2a7a48804060b2815b630e26deab170
|
b0c811e00137129e725499c99ef7d4d0c7282a60
|
/ictivity/wsgi.py
|
409630f0d94274cb4477ac8a068bc7906fcd57cd
|
[] |
no_license
|
mengcz13/Ictivity
|
8a3b01e05bb41353dad70d5572b646e475861441
|
850b8ca477a456ee3edf51f7f2ede4d5841f3ff7
|
refs/heads/master
| 2021-01-21T15:04:46.460606 | 2016-06-24T15:00:40 | 2016-06-24T15:00:40 | 59,628,161 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 393 |
py
|
"""
WSGI config for ictivity project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ictivity.settings")
application = get_wsgi_application()
|
[
"[email protected]"
] | |
a57cd9343ff71474af55efd8ce017714a82041eb
|
fcff90f4f8af055d26715a8f7a997dc4d4f4091d
|
/Phrasism/urls.py
|
d7a8dee64ec28ca682a041714b71bbc77bb6ef24
|
[] |
no_license
|
weichinhsu/phrasism-backend
|
644a75fa276c37c3ca64ac4b04a8642a5d32deb8
|
675dc8fd14e38ae26453358357ebfe8e230e2234
|
refs/heads/main
| 2023-04-15T21:21:48.468205 | 2021-04-21T08:53:30 | 2021-04-21T08:53:30 | 360,094,035 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 949 |
py
|
"""Phrasism URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from .api import index, translate, translate2, google_translate
urlpatterns = [
path('', index),
path('translate/<str:phrase>', translate),
path('translate2/<str:phrase>', translate2),
path('gtranslate/<str:phrase>', google_translate),
]
|
[
"[email protected]"
] | |
a8a973fe9ce85b93f99827d4cd97e42e85c8ddc4
|
455c79b1a536bdf17ab54bbe39d3215f05184d57
|
/app/movies/utils.py
|
f0e01e6e5c52cbb7bdd750fda56ff96552f0765a
|
[] |
no_license
|
shivamsingh14/movie-ticketing
|
b2f8eeffe1868648edb22ea95f897158fd905568
|
89f27dc280517bad171161805450280c10ab19ca
|
refs/heads/master
| 2020-06-04T08:14:18.858185 | 2019-06-14T12:41:14 | 2019-06-14T12:41:14 | 191,940,846 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 528 |
py
|
from app.movies.models import Auditorium
from app.movies.models import Slot
class FreeSlotUtil():
def get_free_slots(self, audi, start_date, end_date):
audi_available_slot = range(audi.opening_time, (audi.closing_time-2), 3)
audi_booked_slots = Slot.objects.filter(
audi=audi,
date__gte=start_date,
date__lte=end_date
).values_list('slot', flat=True)
audi_free_slots = list(set(audi_available_slot)-set(audi_booked_slots))
return audi_free_slots
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.