seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
41373317914
|
from django.db import models
from django.db.models.signals import post_save
from django.contrib.auth.models import AbstractUser
class User(AbstractUser):
username = models.CharField(max_length=100)
email = models.EmailField(unique=True)
fecha_nacimiento = models.CharField(max_length=10, blank=True, null=True)
nacional = models.BooleanField(default=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
def profile(self):
profile = Profile.objects.get(user=self)
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
full_name = models.CharField(max_length=1000)
bio = models.CharField(max_length=100)
image = models.ImageField(upload_to="user_images", default="default.jpg")
verified = models.BooleanField(default=False)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
post_save.connect(create_user_profile, sender=User)
post_save.connect(save_user_profile, sender=User)
class TipoEvento(models.Model):
nombre = models.CharField(max_length=100)
descripcion = models.TextField()
class ActividadTipo(models.Model):
tipoevento = models.ForeignKey(TipoEvento, on_delete=models.CASCADE)
idactividades = models.ManyToManyField('Actividad')
class Actividad(models.Model):
nombre = models.CharField(max_length=100)
longitud = models.DecimalField(max_digits=10, decimal_places=6)
latitud = models.DecimalField(max_digits=10, decimal_places=6)
fecha = models.DateField()
descripcion = models.TextField()
img1 = models.TextField(blank=True, null=True)
img2 = models.TextField(blank=True, null=True)
class UsuarioActividad(models.Model):
idusuario = models.ForeignKey(User, on_delete=models.CASCADE)
idactividad = models.ForeignKey(Actividad, on_delete=models.CASCADE)
fecha_de_interes = models.DateField()
|
isabellaaguilar/ProyectoFinal-Turisteo-Cultural
|
backend_api/api/models.py
|
models.py
|
py
| 2,026 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2795680906
|
#PCA => Principal componet analysis using HSI
import math
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.decomposition import KernelPCA
class princiapalComponentAnalysis:
def __init__(self):
pass
def __str__(self):
pass
def pca_calculate(self,imagen_in,varianza = None,componentes = None):
dataImagen = imagen_in.copy()
if varianza != None :
imageTemp = dataImagen.reshape((dataImagen.shape[0],dataImagen.shape[1]*dataImagen.shape[2])).T
pca = PCA()
pca.fit(imageTemp)
imageTemp = pca.transform(imageTemp)
#Evaluar el numero de coeficientes en base a los datos de varianza
var = 0
num_componentes = 0
for i in range(pca.explained_variance_ratio_.shape[0]):
var += pca.explained_variance_ratio_[i]
if var > varianza:
break
else:
num_componentes += 1
imageTemp = imageTemp.reshape( (dataImagen.shape[1], dataImagen.shape[2],dataImagen.shape[0]) )
imagePCA = np.zeros( (num_componentes, dataImagen.shape[1], dataImagen.shape[2]) )
for i in range(imagePCA.shape[0]):
imagePCA[i] = imageTemp[:,:,i]
if componentes != None:
imageTemp = dataImagen.reshape((dataImagen.shape[0],dataImagen.shape[1]*dataImagen.shape[2])).T
c_pca = PCA(n_components=componentes)
c_pca.fit(imageTemp)
imageTemp = c_pca.transform(imageTemp)
imageTemp = imageTemp.reshape( (dataImagen.shape[1], dataImagen.shape[2],imageTemp.shape[1]) )
imagePCA = np.zeros( (componentes, dataImagen.shape[1], dataImagen.shape[2]) )
for i in range(imagePCA.shape[0]):
imagePCA[i] = imageTemp[:,:,i]
return imagePCA
def kpca_calculate(self, imagenInput, componentes = None):
imagen_in = imagenInput.copy()
#TOMA LA PORCION DE LA IMAGEN DE TAMAÑO W
i = 0 #Indice x para la imagen
j = 0 #Indice y para la imagen
W = 50 #Tamaño de subconjunto 50 por indices
fx_pc = 10 #Numero fijo de componentes
n_componentes = 0 #Numero inicial de componentes principales
for i in range(imagen_in.shape[1]): #Recorrer x
i_l = i*W
i_h = (i+1)*W
if i_l >= imagen_in.shape[1]:
break
if i_h > imagen_in.shape[1]:
i_h = imagen_in.shape[1]
for j in range(imagen_in.shape[2]): #Recorrer y
j_l = j*W
j_h = (j+1)*W
if j_l >= imagen_in.shape[2]:
break
if j_h > imagen_in.shape[2]:
j_h = imagen_in.shape[2]
dataImagen = imagen_in[:, i_l:i_h, j_l:j_h]
imageTemp = dataImagen.reshape((dataImagen.shape[0],dataImagen.shape[1]*dataImagen.shape[2])).T #Reorganiza para aplicar KPCA
#APLICA KPCA SOBRE TODOS LOS ELEMENTOS DIMENSIONALES
kpca = KernelPCA( kernel='rbf' ) # n_components=None, gamma=0.01
X_transformed = kpca.fit_transform(imageTemp)
#Calcula el porcentaje de varianza de cada componente y el número de componentes a utilizar
if componentes != None :
if n_componentes == 0:
n_componentes = componentes
ImagenOut = np.zeros( (n_componentes, imagen_in.shape[1], imagen_in.shape[2]) )
else:
if n_componentes == 0:
sum_varianza = 0
varianza = kpca.lambdas_/np.sum(kpca.lambdas_)
for v in range(varianza.shape[0]):
sum_varianza = sum_varianza+varianza[v]
if sum_varianza > 0.95:
break
else:
n_componentes += 1
if n_componentes < fx_pc:
print('pc find:'+str(n_componentes))
n_componentes = fx_pc
print('msn 1: fix number of PC used')
if n_componentes > imagen_in.shape[0]/2:
print('pc find:'+str(n_componentes))
n_componentes = fx_pc
print('msn 2: fix number of PC used')
ImagenOut = np.zeros( (n_componentes, imagen_in.shape[1], imagen_in.shape[2]) )
#RECUPERA EL NUMERO DE COMPONENTES NECESARIO
imageTemp = X_transformed[:,0:n_componentes].reshape( (dataImagen.shape[1], dataImagen.shape[2],n_componentes) )
imageKPCA = np.zeros( (n_componentes, dataImagen.shape[1], dataImagen.shape[2]) )
# RECONTRUIR LA SALIDA EN LA FORMA DE LA IMAGEN DE ENTRADA
for i in range(imageKPCA.shape[0]):
imageKPCA[i] = imageTemp[:,:,i]
ImagenOut[:, i_l:i_h, j_l:j_h] = imageKPCA
return ImagenOut
def kpca2_calculate(self, imagen_in, componentes):
dataImagen = imagen_in.copy()
imageTemp = dataImagen.reshape((dataImagen.shape[0],dataImagen.shape[1]*dataImagen.shape[2])).T
print(imageTemp.shape)
kpca = KernelPCA(n_components=componentes, kernel='rbf', gamma=0.3)
X_transformed = kpca.fit_transform(imageTemp)
print(X_transformed.shape)
imageTemp = X_transformed.reshape( (dataImagen.shape[1], dataImagen.shape[2],X_transformed.shape[1]) )
imageKPCA = np.zeros( (componentes, dataImagen.shape[1], dataImagen.shape[2]) )
for i in range(imageKPCA.shape[0]):
imageKPCA[i] = imageTemp[:,:,i]
return imageKPCA
def graficarPCA(self,imagePCA, channel):
plt.figure(1)
plt.imshow(imagePCA[channel])
plt.colorbar()
plt.show()
|
davidruizhidalgo/unsupervisedRemoteSensing
|
package/PCA.py
|
PCA.py
|
py
| 6,139 |
python
|
es
|
code
| 13 |
github-code
|
6
|
8665123714
|
# -*- coding: utf-8 -*-
import os
import boto3
import settings
from jsonschema import validate, ValidationError
from cognito_trigger_base import CognitoTriggerBase
from user_util import UserUtil
from private_chain_util import PrivateChainUtil
class CustomMessage(CognitoTriggerBase):
def get_schema(self):
return {
'type': 'object',
'properties': {
'phone_number': settings.parameters['phone_number']
}
}
def validate_params(self):
params = self.event['request']['userAttributes']
if UserUtil.check_try_to_register_as_line_user(self.event['userName']) or \
UserUtil.check_try_to_register_as_twitter_user(self.event['userName']) or \
UserUtil.check_try_to_register_as_yahoo_user(self.event['userName']) or \
UserUtil.check_try_to_register_as_facebook_user(self.event['userName']):
raise ValidationError("external provider's user can not execute")
if params.get('phone_number', '') != '' and \
params.get('phone_number_verified', '') != 'true' and \
self.event['triggerSource'] != 'CustomMessage_ForgotPassword':
validate(params, self.get_schema())
client = boto3.client('cognito-idp')
response = client.list_users(
UserPoolId=self.event['userPoolId'],
Filter='phone_number = "%s"' % params['phone_number'],
)
for user in response['Users']:
for attribute in user['Attributes']:
if attribute['Name'] == 'phone_number_verified' and attribute['Value'] == 'true':
raise ValidationError('This phone_number is already exists')
# セキュリティ観点より、電話番号変更を実行させない。
# これにより XSS が発生したとしても、電話番号認証が必要な処理は回避が可能
if self.event['triggerSource'] == 'CustomMessage_VerifyUserAttribute':
# phone_number_verified が true の場合は電話番号変更を行っていないため当チェックは不要
if params.get('phone_number_verified', '') != 'true':
self.__validate_has_not_token(params)
# サードパーティを利用したユーザの場合、パスワード変更を実行させない
if self.event['triggerSource'] == 'CustomMessage_ForgotPassword':
# サードパーティを利用したユーザかを確認
if UserUtil.is_external_provider_user(self.dynamodb, self.event['userName']):
raise ValidationError("external provider's user can not execute")
def exec_main_proc(self):
if self.event['triggerSource'] == 'CustomMessage_ForgotPassword':
self.event['response']['smsMessage'] = '{user}さんのパスワード再設定コードは {code} です。'.format(
user=self.event['userName'], code=self.event['request']['codeParameter'])
self.event['response']['emailSubject'] = '【ALIS】パスワードの変更:再設定コードの送付'
self.event['response']['emailMessage'] = "{user}さんのパスワード再設定コードは {code} です".format(
code=self.event['request']['codeParameter'],
user=self.event['userName'])
else:
self.event['response']['smsMessage'] = 'ALISです。\n{user}さんの認証コードは {code} です。'.format(
user=self.event['userName'], code=self.event['request']['codeParameter'])
self.event['response']['emailSubject'] = '【ALIS】登録のご案内:メールアドレスの確認'
self.event['response']['emailMessage'] = """\
{user}様
ALISをご利用いただきありがとうございます。
仮登録が完了しました。
下記URLにアクセスし、ログインをして登録手続きを完了してください。
https://{domain}/confirm?code={code}&user={user}
※注意事項
・24時間以内に手続きを完了しない場合、上記URLは無効になります。最初から手続きをやり直してください。
・上記URLをクリックしてもページが開かない場合は、URLをコピーし、ブラウザのアドレス欄に貼り付けてください。
・このメールにお心当たりの無い場合は、恐れ入りますが、下記までお問合せください。
お問合せ(https://{domain}/help)
・このメールアドレスは配信専用となっております。本メールに返信していただきましても、お問合せにはお答えできませんのでご了承ください。
ALIS:https://alismedia.jp
""".format(
domain=os.environ['DOMAIN'],
code=self.event['request']['codeParameter'],
user=self.event['userName']
).replace("\n", "<br />")
return self.event
# トークンを保持していた場合は例外を出力
def __validate_has_not_token(self, params):
address = params.get('custom:private_eth_address')
if address is not None:
url = 'https://' + os.environ['PRIVATE_CHAIN_EXECUTE_API_HOST'] + '/production/wallet/balance'
payload = {'private_eth_address': address[2:]}
token = PrivateChainUtil.send_transaction(request_url=url, payload_dict=payload)
if token is not None and token != '0x0000000000000000000000000000000000000000000000000000000000000000':
raise ValidationError("Do not allow phone number updates")
|
AlisProject/serverless-application
|
src/handlers/cognito_trigger/custommessage/custom_message.py
|
custom_message.py
|
py
| 5,666 |
python
|
ja
|
code
| 54 |
github-code
|
6
|
3940897296
|
import numpy as np
import torch
from torchvision import models
import torch.nn as nn
# from resnet import resnet34
# import resnet
from torch.nn import functional as F
class ConvBnRelu(nn.Module):
def __init__(self, in_planes, out_planes, ksize, stride, pad, dilation=1,
groups=1, has_bn=True, norm_layer=nn.BatchNorm2d,
has_relu=True, inplace=True, has_bias=False):
super(ConvBnRelu, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=ksize,
stride=stride, padding=pad,
dilation=dilation, groups=groups, bias=has_bias)
self.has_bn = has_bn
if self.has_bn:
self.bn = nn.BatchNorm2d(out_planes)
self.has_relu = has_relu
if self.has_relu:
self.relu = nn.ReLU(inplace=inplace)
def forward(self, x):
x = self.conv(x)
if self.has_bn:
x = self.bn(x)
if self.has_relu:
x = self.relu(x)
return x
class double_conv(nn.Module):
'''(conv => BN => ReLU) * 2'''
def __init__(self, in_ch, out_ch, reduction=16):
super(double_conv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(),
nn.Conv2d(out_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU()
)
self.channel_conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_ch)
)
def forward(self, x):
residual = x
x = self.conv(x)
# x = self.se(x)
if residual.shape[1] != x.shape[1]:
residual = self.channel_conv(residual)
x += residual
return x
class up_edge(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=True):
super(up_edge, self).__init__()
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_ch // 2, in_ch // 2, 2, stride=2)
self.conv = double_conv(in_ch, out_ch)
self.sigmoid = nn.Sigmoid()
self.change_ch = nn.Conv2d(int(in_ch), int(in_ch/2), kernel_size=1)
def forward(self, x1, x2,edge):
#x1:Decoder x2:Encoder,a_map edge
# print("x1", x1.size())
# print("x2", x2.size())
# print("a_map", a_map.size())
# print("a_map1", a_map.size())
x1 = self.up(x1)
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, (diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2))
x = torch.cat([edge,x2, x1], dim=1)
x = self.conv(x)
return x
class up(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=True):
super(up, self).__init__()
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_ch // 2, in_ch // 2, 2, stride=2)
self.conv = double_conv(in_ch, out_ch)
self.sigmoid = nn.Sigmoid()
self.change_ch = nn.Conv2d(int(in_ch), int(in_ch/2), kernel_size=1)
def forward(self, x1, x2):
# print("x1", x1.size())
# print("x2", x2.size())
# print("a_map", a_map.size())
x1 = self.up(x1)
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, (diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2))
if x2.shape[1]!=x1.shape[1]:
x1=self.change_ch(x1)
# print("x2", x2.shape)
# print("x1", x1.shape)
x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
return x
class outconv(nn.Module):
def __init__(self, in_ch, out_ch, dropout=False, rate=0.1):
super(outconv, self).__init__()
self.dropout = dropout
if dropout:
print('dropout', rate)
self.dp = nn.Dropout2d(rate)
self.conv = nn.Conv2d(in_ch, out_ch, 1)
def forward(self, x):
if self.dropout:
x = self.dp(x)
x = self.conv(x)
return x
def autopad(k, p=None):
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k]
return p
class dual_down(nn.Module):
def __init__(self, in_ch,out_ch):
super(dual_down, self).__init__()
self.conv1 = nn.Sequential(nn.Conv2d(in_ch, in_ch, 3,2,autopad(3, 1),groups=1),nn.ReLU(),nn.Dropout2d())
self.conv2 = nn.Sequential(nn.Conv2d(2*in_ch, out_ch, 1), nn.ReLU(), nn.Dropout2d())
def forward(self, x1, x2):
x1=self.conv1(x1)
# print("x1",x1.shape,"x2",x2.shape)
x=torch.cat([x1,x2],dim=1)
x=self.conv2(x)
return x
class atten_down(nn.Module):
def __init__(self, in_ch):
super(atten_down, self).__init__()
self.edge_atten = nn.Sequential(nn.Conv2d(in_ch,in_ch,kernel_size=3, padding=1),
nn.Sigmoid())
self.conv = nn.Conv2d(in_ch, in_ch, kernel_size=3, bias=False)
self.bn = nn.BatchNorm2d(in_ch, eps=0.001, momentum=0.03)
self.act = nn.LeakyReLU(0.1, inplace=True)
def forward(self, mask, edge):
e_atten=self.edge_atten(edge)
mask=self.act(self.bn(self.edge_atten(mask)))
mask=mask*e_atten
return mask
|
Winterspringkle/EIANet
|
models/master.py
|
master.py
|
py
| 5,598 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30011949474
|
from flask import Blueprint, request, abort
from epmanage.lib.auth import AuthController, AuthException
auth_component = Blueprint('auth_component', __name__)
@auth_component.route('/', methods=['POST'])
def auth_do():
"""Perform authentication"""
try:
return AuthController.get_token_agent(request.json)
except AuthException:
abort(503)
except:
abort(503)
@auth_component.route('/enroll', methods=['POST'])
def enroll_do():
"""Perform enrollment"""
try:
return AuthController.enroll_agent(request.json)
except AuthException:
abort(503)
except:
abort(503)
|
PokeSec/EPManage
|
epmanage/auth/auth.py
|
auth.py
|
py
| 642 |
python
|
en
|
code
| 1 |
github-code
|
6
|
73730902266
|
import tensorflow as tf
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
import pandas as pd
from NS_model_tf import Sampler, Navier_Stokes2D
if __name__ == '__main__':
def U_gamma_1(x):
num = x.shape[0]
return np.tile(np.array([1.0, 0.0]), (num, 1))
def U_gamma_2(x):
num = x.shape[0]
return np.zeros((num, 2))
def f(x):
num = x.shape[0]
return np.zeros((num, 2))
def operator(psi, p, x, y, Re, sigma_x=1.0, sigma_y=1.0):
u = tf.gradients(psi, y)[0] / sigma_y
v = - tf.gradients(psi, x)[0] / sigma_x
u_x = tf.gradients(u, x)[0] / sigma_x
u_y = tf.gradients(u, y)[0] / sigma_y
v_x = tf.gradients(v, x)[0] / sigma_x
v_y = tf.gradients(v, y)[0] / sigma_y
p_x = tf.gradients(p, x)[0] / sigma_x
p_y = tf.gradients(p, y)[0] / sigma_y
u_xx = tf.gradients(u_x, x)[0] / sigma_x
u_yy = tf.gradients(u_y, y)[0] / sigma_y
v_xx = tf.gradients(v_x, x)[0] / sigma_x
v_yy = tf.gradients(v_y, y)[0] / sigma_y
Ru_momentum = u * u_x + v * u_y + p_x - (u_xx + u_yy) / Re
Rv_momentum = u * v_x + v * v_y + p_y - (v_xx + v_yy) / Re
return Ru_momentum, Rv_momentum
# Parameters of equations
Re = 100.0
# Domain boundaries
bc1_coords = np.array([[0.0, 1.0],
[1.0, 1.0]])
bc2_coords = np.array([[0.0, 0.0],
[0.0, 1.0]])
bc3_coords = np.array([[1.0, 0.0],
[1.0, 1.0]])
bc4_coords = np.array([[0.0, 0.0],
[1.0, 0.0]])
dom_coords = np.array([[0.0, 0.0],
[1.0, 1.0]])
# Create boundary conditions samplers
bc1 = Sampler(2, bc1_coords, lambda x: U_gamma_1(x), name='Dirichlet BC1')
bc2 = Sampler(2, bc2_coords, lambda x: U_gamma_2(x), name='Dirichlet BC2')
bc3 = Sampler(2, bc3_coords, lambda x: U_gamma_2(x), name='Dirichlet BC3')
bc4 = Sampler(2, bc4_coords, lambda x: U_gamma_2(x), name='Dirichlet BC4')
bcs_sampler = [bc1, bc2, bc3, bc4]
# Create residual sampler
res_sampler = Sampler(2, dom_coords, lambda x: f(x), name='Forcing')
# Define model
mode = 'M1'
layers = [2, 50, 50, 50, 2]
model = Navier_Stokes2D(layers, operator, bcs_sampler, res_sampler, Re, mode)
# Train model
model.train(nIter=40001, batch_size=128)
# Test Data
nx = 100
ny = 100 # change to 100
x = np.linspace(0.0, 1.0, nx)
y = np.linspace(0.0, 1.0, ny)
X, Y = np.meshgrid(x, y)
X_star = np.hstack((X.flatten()[:, None], Y.flatten()[:, None]))
# Predictions
psi_pred, p_pred = model.predict_psi_p(X_star)
u_pred, v_pred = model.predict_uv(X_star)
psi_star = griddata(X_star, psi_pred.flatten(), (X, Y), method='cubic')
p_star = griddata(X_star, p_pred.flatten(), (X, Y), method='cubic')
u_star = griddata(X_star, u_pred.flatten(), (X, Y), method='cubic')
v_star = griddata(X_star, v_pred.flatten(), (X, Y), method='cubic')
velocity = np.sqrt(u_pred**2 + v_pred**2)
velocity_star = griddata(X_star, velocity.flatten(), (X, Y), method='cubic')
# Reference
u_ref= np.genfromtxt("reference_u.csv", delimiter=',')
v_ref= np.genfromtxt("reference_v.csv", delimiter=',')
velocity_ref = np.sqrt(u_ref**2 + v_ref**2)
# Relative error
error = np.linalg.norm(velocity_star - velocity_ref.T, 2) / np.linalg.norm(velocity_ref, 2)
print('l2 error: {:.2e}'.format(error))
### Plot ###
###########
# Reference solution & Prediceted solution
fig_1 = plt.figure(1, figsize=(18, 5))
fig_1.add_subplot(1, 3, 1)
plt.pcolor(X.T, Y.T, velocity_ref, cmap='jet')
plt.colorbar()
plt.xlabel('x')
plt.ylabel('y')
plt.title('Reference Velocity')
fig_1.add_subplot(1, 3, 2)
plt.pcolor(x, Y, velocity_star, cmap='jet')
plt.colorbar()
plt.xlabel('x')
plt.ylabel('y')
plt.title('Predicted Velocity')
plt.tight_layout()
fig_1.add_subplot(1, 3, 3)
plt.pcolor(X, Y, np.abs(velocity_star - velocity_ref.T), cmap='jet')
plt.colorbar()
plt.xlabel('x')
plt.ylabel('y')
plt.title('Absolute Error')
plt.show()
## Loss ##
loss_res = model.loss_res_log
loss_bcs = model.loss_bcs_log
fig_2 = plt.figure(2)
ax = fig_2.add_subplot(1, 1, 1)
ax.plot(loss_res, label='$\mathcal{L}_{r}$')
ax.plot(loss_bcs, label='$\mathcal{L}_{u_b}$')
ax.set_yscale('log')
ax.set_xlabel('iterations')
ax.set_ylabel('Loss')
plt.legend()
plt.tight_layout()
plt.show()
## Adaptive Constant
adaptive_constant = model.adpative_constant_bcs_log
fig_3 = plt.figure(3)
ax = fig_3.add_subplot(1, 1, 1)
ax.plot(adaptive_constant, label='$\lambda_{u_b}$')
ax.set_xlabel('iterations')
plt.legend()
plt.tight_layout()
plt.show()
## Gradients #
data_gradients_res = model.dict_gradients_res_layers
data_gradients_bcs = model.dict_gradients_bcs_layers
num_hidden_layers = len(layers) -1
cnt = 1
fig_4 = plt.figure(4, figsize=(13, 4))
for j in range(num_hidden_layers):
ax = plt.subplot(1, 4, cnt)
ax.set_title('Layer {}'.format(j + 1))
ax.set_yscale('symlog')
gradients_res = data_gradients_res['layer_' + str(j + 1)][-1]
gradients_bcs = data_gradients_bcs['layer_' + str(j + 1)][-1]
sns.distplot(gradients_res, hist=False,
kde_kws={"shade": False},
norm_hist=True, label=r'$\nabla_\theta \mathcal{L}_r$')
sns.distplot(gradients_bcs, hist=False,
kde_kws={"shade": False},
norm_hist=True, label=r'$\nabla_\theta \mathcal{L}_{u_b}$')
ax.get_legend().remove()
ax.set_xlim([-1.0, 1.0])
ax.set_ylim([0, 100])
cnt += 1
handles, labels = ax.get_legend_handles_labels()
fig_4.legend(handles, labels, loc="upper left", bbox_to_anchor=(0.35, -0.01),
borderaxespad=0, bbox_transform=fig_4.transFigure, ncol=2)
plt.tight_layout()
plt.show()
|
PredictiveIntelligenceLab/GradientPathologiesPINNs
|
Lid-driven Cavity/NS.py
|
NS.py
|
py
| 6,568 |
python
|
en
|
code
| 134 |
github-code
|
6
|
35694932356
|
# pylint: disable=E1111
from faker import Faker
from src.infra.entities import Pet as PetModel
from src.infra.config.db_config import DBConnectionHandler
from src.infra.entities.pet import AnimalTypes
from .pet_repository import PetRepository
faker = Faker()
pet_repository = PetRepository()
db_connection_handle = DBConnectionHandler()
def test_insert_pet():
"""Should Insert pet"""
name = faker.name()
species = "dog"
age = faker.random_number(digits=2)
user_id = faker.random_number()
engine = db_connection_handle.get_engine()
# SQL Commands
new_pet = pet_repository.insert_pet(name, species, age, user_id)
query_pet = engine.execute(f"SELECT * FROM pets WHERE id='{new_pet.id}'").fetchone()
engine.execute(f"DELETE FROM pets WHERE id='{new_pet.id}'")
assert new_pet.id == query_pet.id
assert new_pet.name == query_pet.name
assert new_pet.species == query_pet.species
assert new_pet.age == query_pet.age
assert new_pet.user_id == query_pet.user_id
def test_select_pet():
"""Should Select a pet in pets table and comapare it"""
pet_id = faker.random_number(digits=5)
name = faker.name()
species = "fish"
age = faker.random_number(digits=1)
user_id = faker.random_number()
species_mock = AnimalTypes("fish")
data = PetModel(
id=pet_id, name=name, species=species_mock, age=age, user_id=user_id
)
engine = db_connection_handle.get_engine()
engine.execute(
"INSERT INTO pets (id, name, species, age, user_id) "
+ f"VALUES ('{pet_id}', '{name}', '{species}', '{age}', '{user_id}')"
)
query_pet1 = pet_repository.select_pet(pet_id=pet_id)
query_pet2 = pet_repository.select_pet(user_id=user_id)
query_pet3 = pet_repository.select_pet(pet_id=pet_id, user_id=user_id)
assert data in query_pet1
assert data in query_pet2
assert data in query_pet3
engine.execute(f"DELETE FROM pets WHERE id='{pet_id}'")
|
YuryTinos/backend-python
|
src/infra/repo/pet_repository_test.py
|
pet_repository_test.py
|
py
| 1,976 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11878624496
|
"""
Given two integers r and c, indicating the number of rows and columns, print a two-dimensional matrix such that the elements of the matrix are in an increasing sequence from 1 to rXc, in a row-major order.
Input Format:
First line of the input contains two space separated integers indicating the rows and columns
Output Format:
Display r lines indicating the elements of the Matrix
Example:
Input:
3 3
Output:
1 2 3
4 5 6
7 8 9
"""
a,b=input().split()
a,b=int(a),int(b)
c=1
for i in range(1,a+1):
for j in range(1,b+1):
if j!=b:
print(c,"",end="")
else:
print(c,end="")
c+=1
if i!=a:
print("")
else:
print("",end="")
|
HrideshSingh/PythonPrograms
|
Matrix.py
|
Matrix.py
|
py
| 712 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11485171714
|
#epidemics.py
import networkx as nx
import random
class Model_ep:
def __init__(self, dyngraph, infected):
self.G = dyngraph
self.I = infected
self.S = []
self.R = []
self.E = []
self.beta = 0.5
self.gamma = 0.5
# self.model = model
self.states = {}
self.nodes = []
self.nodestate = zip(self.nodes, self.states)
self.I_period = 100
self.E_period = 50
self.summary = []
def model_config(self):
for g in self.G:
for node in g.nodes():
self.nodes.append(node)
self.nodes = list(self.nodes)
# print(self.nodes)
for node in self.nodes:
if node in self.I.keys():
# self.states.append(1)
pass
else:
# self.states.append(0)
self.S.append(node)
# print(self.S)
# self.nodestate = zip(nodes, states)
def random_toss(self):
random.seed()
r = random.random()
if r < self.beta:
return True
else:
return False
def simulate_SIR(self):
self.model_config()
self.summary.append((len(self.S), len(self.I), len(self.R)))
counter = 0
for g in self.G:
for node in g.nodes():
if node in self.I.keys():
for n in g.neighbors(node):
if n not in self.I.keys():
# print(n)
# print(self.I.keys())
# print(self.S)
r = self.random_toss()
if r:
self.I.update({n: 0})
self.S.remove(n)
for node in self.I.keys():
if self.I[node] == self.I_period:
self.I.update({node: 0})
self.R.append(node)
elif self.I[node] != 0:
self.I.update({node: self.I[node] + 1})
return self.summary
|
farzana0/graph-epidemics
|
epidemics.py
|
epidemics.py
|
py
| 1,553 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11932429947
|
from module.program import program
from module.convert import convert
from module.openFileJson import openFileJson
def main():
condition = True
while condition :
question = int(input('Pilih menu berikut :\n1. Convert File\n2. Automated Post-Test\nPilih Salah satu :\n'))
if question == 1 :
try:
convert()
print('==========================\nBerhasil Convert File')
except Exception as err:
print(err)
elif question == 2 :
try:
program(openFileJson())
except Exception as err:
print(err)
else :
condition = False
print('Pilihan anda salah! Keluar Program')
main()
|
bangef/pz
|
python/post-test/main.py
|
main.py
|
py
| 756 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73019294587
|
#!/bin/python3
import sys
import csv
from pysam import VariantFile
import subprocess
vcf_in = VariantFile(sys.argv[1])
multiVcf = VariantFile(sys.argv[2])
new_header = vcf_in.header
# new_header.generic.add("Multi allelic variants added from Pisces.")
vcf_out = VariantFile(sys.argv[3], 'w', header=new_header)
for record in vcf_in.fetch():
vcf_out.write(record)
for mRecord in multiVcf.fetch():
if record.contig == mRecord.contig and record.pos == mRecord.pos:
# import pdb; pdb.set_trace()
if record.alts[0] != mRecord.alts[0]:
vcf_out.write(mRecord)
|
clinical-genomics-uppsala/pomfrey
|
src/variantCalling/multiallelicAdd.py
|
multiallelicAdd.py
|
py
| 612 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39249804904
|
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
ans = []
def preorder(root):
if root is not None:
ans.append(root.val)
if root.left:
preorder(root.left)
if root.right:
preorder(root.right)
|
midnightbot/snapalgo
|
snapalgo/template_generator/preorder.py
|
preorder.py
|
py
| 375 |
python
|
en
|
code
| 2 |
github-code
|
6
|
19795243462
|
import csv
from collections import defaultdict, OrderedDict
import itertools
import json
class clicknode:
node_count = itertools.count()
group_count = itertools.count()
group_map = {}
def __init__(self, **nodedict):
group = nodedict['REGION_VIEW_ID']
if group not in clicknode.group_map:
clicknode.group_map[group] = next(clicknode.group_count)
# use dictionary to populate object's fields
self.__dict__.update(nodedict)
self.id = next(clicknode.node_count)
# that each node is a single entity (used in merging group of nodes)
self.count = 1
def to_JSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
def __str__(self):
return str(self.id)+" "+self.REGION_VIEW_ID+" "+self.CLIENT_ID
class clicklink:
def __init__(self, nodea, nodeb, edge):
self.source = nodea
self.dest = nodeb
self.linkwt = edge
def __str__(self):
return ";".join(map(lambda x: str(x), [self.source, self.dest, self.linkwt]))
class linkwt:
name = None
def __init__(self, src, dest):
self.count = 1
self.val = getattr(dest, self.name)
self.length = 80
class countwt(linkwt):
name = "count"
def merge(self, linkwt):
self.val+=linkwt.val
self.count+=linkwt.count
class responsetimewt(linkwt):
name = "RESPONSE_TIME"
def merge(self, linkwt):
self.val = (self.count*self.val + linkwt.val*linkwt.count)/(self.count+linkwt.count)
self.count += linkwt.count
# make nodes from a click logs of one user
def make_nodes(click_session):
click_session.sort(key=lambda x:[x[('DATE')], x[('STARTTIME')]])
last = None
nodes = []
links = []
link_map = {}
for i in click_session:
if i['REGION_VIEW_ID'] == '/AtkNotificationFlowTF/AtkNotificationPage':
continue
node = clicknode(**i)
nodes.append(node)
return {"nodes":nodes}
# make links from the sequence of clicks based on the node field and link type given. such as response time links between all component types or count(frequency) links between all client ids
# returns list of nodes (id, group), list of edges (src, dest, linkwt)
def make_links(nodes, field, link_type):
#Ordered so that index of a key is constant with updates to the dict
node_map = OrderedDict()
if len(nodes) <= 1: return None
last = nodes[0]
link_map = {}
# get the field of an object dynamically
node_map[(getattr(last,field), last.REGION_VIEW_ID)] = nodes[0]
links = []
for node in nodes[1:]:
# None nodes are breaks representing change of sessions
if node is None:
continue
if node not in node_map:
# node not in node_map
node_map[(getattr(node,field), node.REGION_VIEW_ID)] = node
dest = node_map.keys().index((getattr(node,field), node.REGION_VIEW_ID))
src = node_map.keys().index((getattr(last,field), last.REGION_VIEW_ID))
edge = link_type(last, node)
if (src,dest) not in link_map:
link = clicklink(src, dest, edge)
link_map[(src, dest)] = link
else:
link = link_map[(src, dest)]
(link.linkwt).merge(edge)
last = node
return (node_map,link_map)
# to put all elements of the same RVID together, create extra links of 0 weight between all nodes of the same RVID
def converge_rvid_nodes(response):
nodes = response["nodes"]
# better algorithm: get_pairs(nodes) --> sorts the nodes based on group field so that each group ends at a known index
# and then for each group, return all pairs of indices
for i in range(len(nodes)):
for j in range(i+1, len(nodes)):
if nodes[i]["group"] == nodes[j]["group"]:
data = {}
data["source"] = i
data["target"] = j
data["value"] = 0
data["len"] = 40
response["links"].append(data)
# input from make_links()
# outputs json string format to be send as response
def jsonify_data(node_data, link_data):
response = {"nodes":[], "links":[]}
for field, group in node_data:
data = {}
data["name"] = field
data["group"] = group
data["prop"] = node_data[(field, group)].__dict__
response["nodes"].append(data)
for link in link_data:
l = link_data[link]
data = {}
data["source"] = l.source
data["target"] = l.dest
data["value"] = 1
data["len"] = l.linkwt.length
response["links"].append(data)
return response
def parse(lines):
users = defaultdict(list)
rvidDict = defaultdict(list)
ctypeDict = defaultdict(list)
for line in lines:
users[line['DSID']].append(line)
rvidDict[line['REGION_VIEW_ID']].append(line)
ctypeDict[line['COMPONENT_TYPE']].append(line)
return users
# pick the session numbered "num" from click history data
def session_fetch(user_data, num, envfilters=[]):
users = filter(lambda x: user_data[x][0]['ENVIRONMENT'] not in envfilters, user_data)
num = num%len(users)
session = user_data[users[num]]
return make_nodes(session)
def longest_session(users):
b = map(lambda x: [len(users[x]), x], users)
click_one = max(b)[1]
#users[click_one]
click_session = users[click_one]
return make_nodes(click_session)
def all_sessions(users):
nodes = []
for i in users:
s = users[i]
nodes.extend(make_nodes(s)["nodes"])
nodes.append(None)
return {"nodes":nodes}
|
arbazkhan002/Clix
|
clickparser.py
|
clickparser.py
|
py
| 5,102 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37516858475
|
import os
import tarfile
import time
import shutil
from scipy.io import loadmat
import csv
DEVKIT_FILE_NAME = "ILSVRC2012_devkit_t12.tar.gz"
TRAIN_FILE_NAME = "ILSVRC2012_img_train.tar"
VAL_FILE_NAME = "ILSVRC2012_img_val.tar"
TEST_FILE_NAME = "ILSVRC2012_img_test_v10102019.tar"
def untar(file, target_dir="", is_show_detail=False):
file_name = file.split('.')[0]
file_ext = file.split('.')[-1]
mode = 'r'
if file_ext == 'gz':
mode = 'r:gz'
if is_show_detail:
print("read the file" + file)
tar_file = tarfile.open(file, mode)
if is_show_detail:
print("check or create directory")
if target_dir == "":
target_dir = file_name
if not os.path.exists(target_dir):
os.mkdir(target_dir)
files = tar_file.getnames()
if is_show_detail:
total_files = len(files)
current_file_index = 1
print("start to extract files")
for f in files:
if is_show_detail:
print("[" + str(current_file_index) + "/" + str(total_files) + "] extracting: " + f)
tar_file.extract(f, target_dir)
if is_show_detail:
print("[" + str(current_file_index) + "/" + str(total_files) + "] successfully extracted: " + f)
current_file_index += 1
tar_file.close()
def clear_folder(folder):
if os.path.exists(folder):
for root, dirs, files in os.walk(folder):
for file in files:
os.remove(os.path.join(root, file))
print("remove " + os.path.join(root, file))
for directory in dirs:
clear_folder(os.path.join(root, directory))
os.rmdir(folder)
if __name__ == '__main__':
#unzip dev kit
print("{1/4} extract development kit ")
DEVKIT_NAME = DEVKIT_FILE_NAME.split('.')[0]
untar(DEVKIT_FILE_NAME, "devkit")
print("{1/4} parse the validation ground truth")
val_index_label_pairs = {}
path_devkit_data = os.path.join("devkit",DEVKIT_NAME)
path_devkit_data = os.path.join(path_devkit_data,"data")
path_val_ground_truth = os.path.join(path_devkit_data,"ILSVRC2012_validation_ground_truth.txt")
file_val_ground_truth = open(path_val_ground_truth, "r")
lines = file_val_ground_truth.readlines()
line_index = 1
for line in lines:
val_index_label_pairs[line_index]=line.strip('\n')
line_index += 1
print("{1/4} validation ground truth cached")
print("{1/4} create the wnid-label-category-explanation form")
headers = ['wnid', 'label', 'category', 'explanation']
rows = []
path_train_labels = os.path.join(path_devkit_data,"meta.mat")
train_labels = loadmat(path_train_labels)
train_labels = train_labels['synsets']
for i in range(len(train_labels)):
row = {'wnid': train_labels[i][0][1][0], 'label': train_labels[i][0][0][0][0], 'category':train_labels[i][0][2][0], 'explanation': train_labels[i][0][3][0]}
rows.append(row)
with open('train_labels.csv', 'w') as f:
f_csv = csv.DictWriter(f, headers)
f_csv.writeheader()
f_csv.writerows(rows)
print("{1/4} wnid-label-category-explanation form created")
print("{1/4} development kit successfully extracted")
#unzip the training data
print("{2/4} extract training data")
print("{2/4} clean the train folder")
clear_folder("train")
print("{2/4} unzip the training dataset, may take a longer time")
untar(TRAIN_FILE_NAME, "train", is_show_detail=True)
print("{2/4} unzip the subfolders of training dataset, may take a longer time")
train_tar_files = os.listdir("train")
total_train_tar_files = len(train_tar_files)
train_tar_file_counter = 0
for train_tar_file in train_tar_files:
untar("train/"+train_tar_file, is_show_detail=False)
os.remove("train/"+train_tar_file)
train_tar_file_counter += 1
print("[" + str(train_tar_file_counter) + "/" + str(total_train_tar_files) + "] extracted: " + train_tar_file)
print("{2/4} trainning data successfully extracted")
#unzip the validation data
print("{3/4} extract validation data")
print("{3/4} clean the validation folder")
clear_folder("val")
print("{3/4} unzip the validation dataset, may take a longer time")
untar(VAL_FILE_NAME, "val", is_show_detail=True)
val_images = os.listdir('val')
num_val_images = len(val_images)
val_image_counter = 0
for image in val_images:
image_path = os.path.join("val", image)
image_index = int(image.split('.')[0].split('_')[-1])
image_target_dir = os.path.join("val", val_index_label_pairs[image_index])
if not os.path.exists(image_target_dir):
os.mkdir(image_target_dir)
shutil.move(image_path, image_target_dir)
val_image_counter += 1
print("[" + str(val_image_counter) + "/" + str(num_val_images) + "] moved: " + image)
print("{3/4} validation data successfully extracted")
#unzip the test data
print("{4/4} extract testing data")
print("{4/4} clean the test folder")
clear_folder("test")
print("{4/4} unzip the test dataset, may take a longer time")
untar(TEST_FILE_NAME, "test", is_show_detail=True)
print("{4/4} testing data successfully extracted")
print("Finished!")
|
lizhouyu/ImageNet-Parser
|
imagenet.py
|
imagenet.py
|
py
| 5,306 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43967535036
|
def fasta_from_SAR_dict(sar_dict,fa_file):
""" makes a multi fasta with candidates from SAR dictionary """
with fa_file as f:
for data in sar_dict.values():
f.writelines(">{}\n".format(data["description"]))
f.writelines("{}\n".format(data["sequence"]))
def gff3_from_SAR_dict(sar_dict,gff3_file):
""" make a multi gff3 with candidates from SAR dictionary """
gff3_cols = ["Seqid","Source","Type","Start","End","Score","Strand","Phase","Attributes"]
with gff3_file as f:
f.writelines(f"{gff3_cols[0]}\t{gff3_cols[1]}\t{gff3_cols[2]}\t{gff3_cols[3]}\t{gff3_cols[4]}\t{gff3_cols[5]}\t{gff3_cols[6]}\t{gff3_cols[7]}\t{gff3_cols[8]}\n")
if sar_dict:
#print(sar_dict)
for name, data in sar_dict.items():
min_idx = 0
f.writelines("##gff-version 3\n")
f.writelines(f"##sequence-region {name}\n")
n_start, n_end = split_seq_string(data["TMD_"+str(data["biggest_sar"])][min_idx][4])
sar_start, sar_end = split_seq_string(data["TMD_"+str(data["biggest_sar"])][min_idx][5])
c_start, c_end = split_seq_string(data["TMD_"+str(data["biggest_sar"])][min_idx][6])
f.writelines(f'{name}\tSAR_finder\tTopological domain\t{n_start}\t{n_end}\t.\t.\t.\tNote=N-terminal net charge is {data["TMD_"+str(data["biggest_sar"])][min_idx][2]}\n')
f.writelines(f'{name}\tSAR_finder\tSAR domain\t{sar_start}\t{sar_end}\t.\t.\t.\tNote=residue % in SAR {[perc for perc in data["TMD_"+str(data["biggest_sar"])][min_idx][3]]},Total % is {round(sum(j for i,j in data["TMD_"+str(data["biggest_sar"])][min_idx][3]),2)}\n')
f.writelines(f'{name}\tSAR_finder\tTopological domain\t{c_start}\t{c_end}\t.\t.\t.\tNote=C-terminus\n')
else:
f.writelines("##gff-version 3\n")
f.writelines(f"##sequence-region\n")
def tab_from_SAR_dict(sar_dict,stat_file,hydrophillic_res, sar_min, sar_max):
""" convert SAR dict to a dataframe """
columns = ["Name","Protein Sequence","Protein Length","SAR Length","SAR Start","Putative SAR Sequence","SAR End",[f"{res}%" for res in hydrophillic_res],"% Total","N-term Sequence","N-term net Charge"] # using different residues for percent calc: [f"{res}%" for res in hydrophillic_res]
with stat_file as f:
f.writelines(f"{columns[0]}\t{columns[1]}\t{columns[2]}\t{columns[3]}\t{columns[4]}\t{columns[5]}\t{columns[6]}\t{columns[7]}\t{columns[8]}\t{columns[9]}\t{columns[10]}\n")
if sar_dict:
#print(sar_dict)
for name, data in sar_dict.items():
for tmd_size in range(sar_max, sar_min-1, -1):
if "TMD_"+str(tmd_size) in data:
for each_match in data["TMD_"+str(tmd_size)]:
if each_match != [""]:
#print(f"{name} - {data}")
#print(each_match)
#for perc in each_match[3]:
# print(perc)
try:
f.writelines(f'{name}\t{data["sequence"]}\t{data["size"]}\t{tmd_size}\t{int(each_match[7])+1}\t{each_match[0]}\t{int(each_match[8])+1}\t{[perc for perc in each_match[3]]}\t{round(sum(j for i,j in each_match[3]),2)}\t{each_match[1]}\t{each_match[2]}\n')
except IndexError:
f.writelines(f'ERROR\tERROR\tERROR\tERROR\tERROR\tERROR\tERROR\tERROR\tERROR\tERROR\tERROR\n')
else:
continue
def stat_file_from_SAR_dict(sar_dict, stat_file, sar_min, sar_max):
""" summary statistics from SAR finder function """
with stat_file as f:
f.writelines("..........:::::: Candidate SAR Proteins ::::::..........\n\n")
if sar_dict:
for data in sar_dict.values():
f.writelines("Protein Description and Name: {}\n".format(data["description"]))
f.writelines("Protein Sequence: {}\n".format(data["sequence"]))
f.writelines("Protein Length: {}\n".format(data["size"]))
f.writelines("SAR Criteria matching region(s)\n")
for tmd_size in range(sar_max, sar_min-1, -1):
if "TMD_"+str(tmd_size) in data:
f.writelines("\nSAR length of {}:\n".format(tmd_size))
for each_match in data["TMD_"+str(tmd_size)]:
if each_match != ['']:
f.writelines("\nPotential SAR domain sequence: {}\n".format(each_match[0]))
f.writelines("N-term sequence: {}\n".format(each_match[1]))
f.writelines("N-term net charge: {}\n".format(each_match[2]))
for each_perc_calc in each_match[3]:
f.writelines("Percent {} content: {}%\n".format(each_perc_calc[0],each_perc_calc[1]))
f.writelines("N-term coords: {}\n".format(each_match[4]))
f.writelines("SAR coords: {}\n".format(each_match[5]))
f.writelines("C-term coords: {}\n".format(each_match[6]))
f.writelines("SAR start: {}\n".format(each_match[7]))
else:
continue
f.writelines("========================================================\n\n")
else:
f.writelines("No candidate SAR Proteins found")
def split_seq_string(input_range, python_indexing=True):
""" splits a #..# sequence into the two respective starts and ends, if python indexing, adds 1, otherwise keeps """
if python_indexing:
values = input_range.split("..")
start =int(values[0]) + 1
end = int(values[1]) + 1
else:
values = input_range.split("..")
start = values[0]
end = values[1]
return start, end
if __name__ == "__main__":
pass
|
TAMU-CPT/galaxy-tools
|
tools/SAR/file_operations.py
|
file_operations.py
|
py
| 6,173 |
python
|
en
|
code
| 5 |
github-code
|
6
|
41244789670
|
from datetime import date
ano_atual = date.today().year
nascimento = int(input('Digite seu ano de nascimento: '))
idade = ano_atual - nascimento
if idade == 18:
print('Se alistar')
elif idade < 18:
saldo = 18 - idade
print('ainda faltam {} anos(s) para se alistar'.format(saldo))
ano = ano_atual + saldo
print('Seu alistamento será em {}'.format(ano))
elif idade > 18:
saldo = idade - 18
print('Já devia ter se alistado a {} ano'.format(saldo))
ano = ano_atual - saldo
print('Seu alistamento deveria ter sido em {}'.format(ano))
|
andrematos90/Python
|
CursoEmVideo/Módulo 2/Desafio 039B.py
|
Desafio 039B.py
|
py
| 570 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
71971270909
|
import tempfile
import os
import posixpath
import stat
import logging
import collections
from kubeflow.fairing import utils as fairing_utils
from kubeflow.fairing.preprocessors.base import BasePreProcessor
from kubeflow.fairing.builders.append.append import AppendBuilder
from kubeflow.fairing.deployers.job.job import Job
from kubeflow.fairing.deployers.tfjob.tfjob import TfJob
from kubeflow.fairing.constants import constants
from kubeflow.fairing.kubernetes import utils as k8s_utils
from kubeflow.fairing.cloud import storage
from kubeflow.fairing.cloud import gcp
from kubeflow.fairing.frameworks import lightgbm_dist_training_init
from kubeflow.fairing.frameworks import utils
logger = logging.getLogger(__name__)
TRAIN_DATA_FIELDS = ["data", "train", "train_data",
"train_data_file", "data_filename"]
TEST_DATA_FIELDS = ["valid", "test", "valid_data", "valid_data_file", "test_data",
"test_data_file", "valid_filenames"]
NUM_MACHINES_FILEDS = ["num_machines", "num_machine"]
PORT_FIELDS = ["local_listen_port", "local_port"]
MLIST_FIELDS = ["machine_list_filename",
"machine_list_file", "machine_list", "mlist"]
OUTPUT_MODEL_FIELDS = ["output_model", "model_output", "model_out"]
INPUT_MODEL_FIELDS = ["input_model", "model_input", "model_in"]
OUTPUT_RESULT_FIELDS = ["output_result", "predict_result", "prediction_result",
"predict_name", "prediction_name", "pred_name", "name_pred"]
MACHINE_FIELDS = ["machines", "workers", "nodes"]
TREE_LEARNER_FIELDS = ["tree_learner",
"tree", "tree_type", "tree_learner_type"]
ENTRYPOINT = posixpath.join(constants.DEFAULT_DEST_PREFIX, "entrypoint.sh")
LIGHTGBM_EXECUTABLE = "lightgbm"
CONFIG_FILE_NAME = "config.conf"
MLIST_FILE_NAME = "mlist.txt"
BLACKLISTED_FIELDS = PORT_FIELDS + MLIST_FIELDS + MACHINE_FIELDS
WEIGHT_FILE_EXT = ".weight"
DATA_PARALLEL_MODES = ["data", "voting"]
def _modify_paths_in_config(config, field_names, dst_base_dir):
"""modify lightgbm config fields
:param config: config entries
:param field_names: list of fields
:param dst_base_dir: path to destination directory
"""
field_name, field_value = utils.get_config_value(config, field_names)
if field_value is None:
return [], []
src_paths = field_value.split(",")
dst_paths = []
for src_path in src_paths:
file_name = os.path.split(src_path)[-1]
dst_paths.append(posixpath.join(dst_base_dir, file_name))
config[field_name] = ",".join(dst_paths)
return src_paths, dst_paths
def _update_maps(output_map, copy_files, src_paths, dst_paths):
"""update maps
:param output_map: output map entries
:param copy_files: files to be copied
:param src_paths: source paths
:param dst_paths: destination paths
"""
for src_path, dst_path in zip(src_paths, dst_paths):
if os.path.exists(src_path):
output_map[src_path] = dst_path
else:
copy_files[src_path] = dst_path
def _get_commands_for_file_ransfer(files_map):
"""get commands for file transfer
:param files_map: files to be mapped
"""
cmds = []
for k, v in files_map.items():
storage_obj = storage.get_storage_class(k)()
if storage_obj.exists(k):
cmds.append(storage_obj.copy_cmd(k, v))
else:
raise RuntimeError("Remote file {} does't exist".format(k))
return cmds
def _generate_entrypoint(copy_files_before, copy_files_after, config_file,
init_cmds=None, copy_patitioned_files=None):
""" generate entry point
:param copy_files_before: previous copied files
:param copy_files_after: files to be copied
:param config_file: path to config file
:param init_cmds: commands(Default value = None)
:param copy_patitioned_files: (Default value = None)
"""
buf = ["#!/bin/sh",
"set -e"]
if init_cmds:
buf.extend(init_cmds)
# In data prallel mode, copying files based on RANK of the worker in the cluster.
# The data is partitioned (#partitions=#workers) and each worker gets one partition of the data.
if copy_patitioned_files and len(copy_patitioned_files) > 0: #pylint:disable=len-as-condition
buf.append("case $RANK in")
for rank, files in copy_patitioned_files.items():
buf.append("\t{})".format(rank))
buf.extend(
["\t\t" + cmd for cmd in _get_commands_for_file_ransfer(files)])
buf.append("\t\t;;")
buf.append("esac")
# copying files that are common to all workers
buf.extend(_get_commands_for_file_ransfer(copy_files_before))
buf.append("echo 'All files are copied!'")
buf.append("{} config={}".format(LIGHTGBM_EXECUTABLE, config_file))
for k, v in copy_files_after.items():
storage_obj = storage.get_storage_class(k)()
buf.append(storage_obj.copy_cmd(v, k))
_, file_name = tempfile.mkstemp()
with open(file_name, 'w') as fh:
content = "\n".join(buf)
fh.write(content)
fh.write("\n")
st = os.stat(file_name)
os.chmod(file_name, st.st_mode | stat.S_IEXEC)
return file_name
def _add_train_weight_file(config, dst_base_dir):
"""add train weight file
:param config: config entries
:param dst_base_dir: destination directory
"""
_, field_value = utils.get_config_value(config, TRAIN_DATA_FIELDS)
if field_value is None:
return [], []
else:
src_paths = field_value.split(",")
weight_paths = [x+WEIGHT_FILE_EXT for x in src_paths]
weight_paths_found = []
weight_paths_dst = []
for path in weight_paths:
found = os.path.exists(path)
if not found:
# in case the path is local and doesn't exist
storage_class = storage.lookup_storage_class(path)
if storage_class:
found = storage_class().exists(path)
if found:
weight_paths_found.append(path)
file_name = os.path.split(path)[-1]
weight_paths_dst.append(
posixpath.join(dst_base_dir, file_name))
return weight_paths_found, weight_paths_dst
def generate_context_files(config, config_file_name, num_machines):
"""generate context files
:param config: config entries
:param config_file_name: config file name
:param num_machines: number of machines
"""
# Using ordered dict to have consistent behaviour around order in which
# files are copied in the worker nodes.
output_map = collections.OrderedDict()
copy_files_before = collections.OrderedDict()
copy_files_after = collections.OrderedDict()
copy_patitioned_files = collections.OrderedDict()
# config will be modified inplace in this function so taking a copy
config = config.copy() # shallow copy is good enough
_, tree_learner = utils.get_config_value(config, TREE_LEARNER_FIELDS)
parition_data = tree_learner and tree_learner.lower() in DATA_PARALLEL_MODES
remote_files = [(copy_files_before,
[TEST_DATA_FIELDS, INPUT_MODEL_FIELDS]),
(copy_files_after,
[OUTPUT_MODEL_FIELDS, OUTPUT_RESULT_FIELDS])]
if parition_data:
train_data_field, train_data_value = utils.get_config_value(
config, TRAIN_DATA_FIELDS)
train_files = train_data_value.split(",")
if len(train_files) != num_machines:
raise RuntimeError("#Training files listed in the {}={} field in the config should be "
"equal to the num_machines={} config value."\
.format(train_data_field, train_data_value, num_machines))
weight_src_paths, weight_dst_paths = _add_train_weight_file(config,
constants.DEFAULT_DEST_PREFIX)
dst = posixpath.join(constants.DEFAULT_DEST_PREFIX, "train_data")
config[train_data_field] = dst
for i, f in enumerate(train_files):
copy_patitioned_files[i] = collections.OrderedDict()
copy_patitioned_files[i][f] = dst
if f+WEIGHT_FILE_EXT in weight_src_paths:
copy_patitioned_files[i][f +
WEIGHT_FILE_EXT] = dst+WEIGHT_FILE_EXT
else:
train_data_field, train_data_value = utils.get_config_value(
config, TRAIN_DATA_FIELDS)
if len(train_data_value.split(",")) > 1:
raise RuntimeError("{} has more than one file specified but tree-learner is set to {} "
"which can't handle multiple files. For distributing data across "
"multiple workers, please use one of {} as a tree-learner method. "
"For more information please refer the LightGBM parallel guide"
" https://github.com/microsoft/LightGBM/blob/master/docs/"
"Parallel-Learning-Guide.rst".format(
train_data_field, tree_learner, DATA_PARALLEL_MODES))
remote_files[0][1].insert(0, TRAIN_DATA_FIELDS)
weight_src_paths, weight_dst_paths = _add_train_weight_file(config,
constants.DEFAULT_DEST_PREFIX)
_update_maps(output_map, copy_files_before, weight_src_paths, weight_dst_paths)
for copy_files, field_names_list in remote_files:
for field_names in field_names_list:
src_paths, dst_paths = _modify_paths_in_config(
config, field_names, constants.DEFAULT_DEST_PREFIX)
_update_maps(output_map, copy_files, src_paths, dst_paths)
if len(output_map) + len(copy_files_before) + len(copy_patitioned_files) == 0:
raise RuntimeError("Both train and test data is missing in the config")
modified_config_file_name = utils.save_properties_config_file(config)
config_in_docker = posixpath.join(
constants.DEFAULT_DEST_PREFIX, CONFIG_FILE_NAME)
output_map[modified_config_file_name] = config_in_docker
output_map[config_file_name] = config_in_docker + ".original"
init_cmds = None
if num_machines > 1:
init_file = lightgbm_dist_training_init.__file__
init_file_name = os.path.split(init_file)[1]
output_map[init_file] = os.path.join(
constants.DEFAULT_DEST_PREFIX, init_file_name)
init_cmds = ["RANK=`python {} {} {}`".format(init_file_name,
CONFIG_FILE_NAME,
MLIST_FILE_NAME)]
entrypoint_file_name = _generate_entrypoint(
copy_files_before, copy_files_after, config_in_docker, init_cmds, copy_patitioned_files)
output_map[entrypoint_file_name] = ENTRYPOINT
output_map[utils.__file__] = os.path.join(
constants.DEFAULT_DEST_PREFIX, "utils.py")
return output_map
def execute(config,
docker_registry,
base_image="gcr.io/kubeflow-fairing/lightgbm:latest",
namespace=None,
stream_log=True,
cores_per_worker=None,
memory_per_worker=None,
pod_spec_mutators=None):
"""Runs the LightGBM CLI in a single pod in user's Kubeflow cluster.
Users can configure it to be a train, predict, and other supported tasks
by using the right config.
Please refere https://github.com/microsoft/LightGBM/blob/master/docs/Parameters.rst
for more information on config options.
:param config: config entries
:param docker_registry: docker registry name
:param base_image: base image (Default value = "gcr.io/kubeflow-fairing/lightgbm:latest")
:param namespace: k8s namespace (Default value = None)
:param stream_log: should that stream log? (Default value = True)
:param cores_per_worker: number of cores per worker (Default value = None)
:param memory_per_worker: memory value per worker (Default value = None)
:param pod_spec_mutators: pod spec mutators (Default value = None)
"""
if not namespace and not fairing_utils.is_running_in_k8s():
namespace = "kubeflow"
namespace = namespace or fairing_utils.get_default_target_namespace()
config_file_name = None
if isinstance(config, str):
config_file_name = config
config = utils.load_properties_config_file(config)
elif isinstance(config, dict):
config_file_name = utils.save_properties_config_file(config)
else:
raise RuntimeError("config should be of type dict or string(filepath) "
"but got {}".format(type(dict)))
utils.scrub_fields(config, BLACKLISTED_FIELDS)
_, num_machines = utils.get_config_value(config, NUM_MACHINES_FILEDS)
num_machines = num_machines or 1
if num_machines:
try:
num_machines = int(num_machines)
except ValueError:
raise ValueError("num_machines value in config should be an int >= 1 "
"but got {}".format(config.get('num_machines')))
if num_machines < 1:
raise ValueError(
"num_machines value in config should >= 1 but got {}".format(num_machines))
if num_machines > 1:
config['machine_list_file'] = "mlist.txt"
output_map = generate_context_files(
config, config_file_name, num_machines)
preprocessor = BasePreProcessor(
command=[ENTRYPOINT], output_map=output_map)
builder = AppendBuilder(registry=docker_registry,
base_image=base_image, preprocessor=preprocessor)
builder.build()
pod_spec = builder.generate_pod_spec()
pod_spec_mutators = pod_spec_mutators or []
pod_spec_mutators.append(gcp.add_gcp_credentials_if_exists)
pod_spec_mutators.append(k8s_utils.get_resource_mutator(
cores_per_worker, memory_per_worker))
if num_machines == 1:
# non-distributed mode
deployer = Job(namespace=namespace,
pod_spec_mutators=pod_spec_mutators,
stream_log=stream_log)
else:
# distributed mode
deployer = TfJob(namespace=namespace,
pod_spec_mutators=pod_spec_mutators,
chief_count=1,
worker_count=num_machines-1,
stream_log=stream_log)
deployer.deploy(pod_spec)
return deployer
|
kubeflow/fairing
|
kubeflow/fairing/frameworks/lightgbm.py
|
lightgbm.py
|
py
| 14,637 |
python
|
en
|
code
| 336 |
github-code
|
6
|
7748783174
|
import cv2
from cvzone.HandTrackingModule import HandDetector
import numpy as np
import pyfirmata
cap = cv2.VideoCapture(0)
cap.set(3, 1280)
cap.set(4, 720)
if not cap.isOpened():
print("Camera couldn't access")
exit()
detector = HandDetector(detectionCon=0.7)
port = "COM7"
board = pyfirmata.Arduino(port)
servo_pinX = board.get_pin('d:5:s') #pin 5 Arduino
servo_pinY = board.get_pin('d:6:s') #pin 6 Arduino
x, y = 150, 230
w, h = 200, 200
col = (255, 0, 255)
while cap.isOpened():
success, img = cap.read()
img = detector.findHands(img)
lmList, bboxInfo = detector.findPosition(img)
servoX = np.interp(x, [0, 1280], [0, 180])
servoY = np.interp(y, [0, 720], [0, 180])
if lmList:
dist,_,_ = detector.findDistance(8, 12, img, draw = False)
#print(dist)
fingers = detector.fingersUp()
if fingers[1] == 1 and fingers[2] == 1:
cursor = lmList[8]
if dist < 50:
if x-w // 2 < cursor[0] < x+w-120 // 2 and y-h // 2 < cursor[1] < y+h-120 // 2:
col = (255, 255, 0)
x, y = cursor
cv2.circle(img, cursor, 50, (255, 255, 0), cv2.FILLED)
cv2.putText(img, "HOLD", (cursor[0]-40, cursor[1]), cv2.FONT_HERSHEY_COMPLEX,1,(0,0,255), 2)
else:
col = (255, 0, 255)
cv2.rectangle(img, (x-w // 2, y-h // 2), (x+w // 2, y+h // 2), col, cv2.FILLED)
cv2.putText(img, f'({str(x)}, {str(y)})', (x-90, y), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)
cv2.rectangle(img, (40,20), (350,110), (0,255,255), cv2.FILLED)
cv2.putText(img, f'Servo X: {int(servoX)} deg', (50, 50), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)
cv2.putText(img, f'Servo Y: {int(servoY)} deg', (50, 100), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)
servo_pinX.write(servoX)
servo_pinY.write(servoY)
cv2.imshow("Image", img)
cv2.waitKey(1)
|
rizkydermawan1992/virtualdragdrop
|
drag and drop.py
|
drag and drop.py
|
py
| 1,936 |
python
|
en
|
code
| 5 |
github-code
|
6
|
37617555944
|
from django.shortcuts import render
from django.http import HttpResponse
from django.shortcuts import HttpResponse
from .models import Product
from math import ceil
# Create your views here.
def index(request):
#products = Product.objects.all()
#n = len(products)
allProds = []
catprods = Product.objects.values('cat' , 'Product_id')
cates = {item['cat'] for item in catprods}
for cats in cates:
prod = Product.objects.filter(cat=cats)
n = len(prod)
nSlides = n//4 + ceil((n/4) - (n//4))
#parms = {'no_of_slide':nSlide,'range':range(1,nSlide),'product':products}
#allProds=[[products, range(1, len(products)), nSlides],[products, range(1, len(products)), nSlides]]
allProds.append([prod,range(1,nSlides),nSlides])
parms={'allProds':allProds }
#print(catprods)
#print(cates)
#print(cats)
#print(prod)
return render(request, 'shop/template/index.html',parms)
#{% for products, range(1, len(products)), nSlides in allProds %}
def productview(request,myid):
product = Product.objects.filter(Product_id=myid)
print(product)
return render(request,'shop/template/prodview.html',{'product':product[0]})
|
a22616/Django-project-2
|
shopcart/shop/views.py
|
views.py
|
py
| 1,244 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8384182801
|
from __future__ import absolute_import
import sys
from optparse import OptionParser
import sumolib # noqa
from functools import reduce
def parse_args():
USAGE = "Usage: " + sys.argv[0] + " <netfile> [options]"
optParser = OptionParser()
optParser.add_option("-o", "--outfile", help="name of output file")
optParser.add_option("-r", "--radius", type=float, default=10., help="maximum air distance around the edge")
optParser.add_option("-t", "--travel-distance", type=float, help="maximum travel distance in the graph")
optParser.add_option("--symmetrical", action="store_true",
default=False, help="extend the bidi-relationship to be symmetrical")
options, args = optParser.parse_args()
try:
options.net, = args
except Exception:
sys.exit(USAGE)
if options.outfile is None:
options.outfile = options.net + ".taz.xml"
return options
def getCandidates(edge, net, radius):
candidates = []
r = min(radius, sumolib.geomhelper.polyLength(edge.getShape()) / 2)
for x, y in edge.getShape():
nearby = set()
for edge2, dist in net.getNeighboringEdges(x, y, r):
nearby.add(edge2)
candidates.append(nearby)
return candidates
ASYM_BIDI_CACHE = {} # edge : opposites
def computeBidiTazAsymByRadius(edge, net, radius):
if edge not in ASYM_BIDI_CACHE:
candidates = getCandidates(edge, net, radius)
opposites = reduce(lambda a, b: a.intersection(b), candidates)
opposites.update(set(edge.getToNode().getOutgoing()).intersection(
set(edge.getFromNode().getIncoming())))
ASYM_BIDI_CACHE[edge] = opposites
return ASYM_BIDI_CACHE[edge]
def computeAllBidiTaz(net, radius, travelDist, symmetrical):
for edge in net.getEdges():
travelOpposites = set()
if travelDist is not None:
queue = [(edge, -1.)]
while not len(queue) == 0:
edge2, dist = queue.pop()
if edge2 not in travelOpposites and dist < travelDist:
travelOpposites.add(edge2)
if dist == -1.:
dist = 0.
else:
dist += edge2.getLength()
toN = edge2.getToNode()
fromN = edge2.getFromNode()
for e in toN.getOutgoing() + toN.getIncoming() + fromN.getOutgoing() + fromN.getIncoming():
queue.append((e, dist))
if radius is not None and radius > 0.:
opposites = computeBidiTazAsymByRadius(edge, net, radius)
if symmetrical:
candidates = reduce(
lambda a, b: a.union(b), getCandidates(edge, net, radius))
for cand in candidates:
if edge in computeBidiTazAsymByRadius(cand, net, radius):
opposites.add(cand)
travelOpposites.update(opposites)
yield edge, travelOpposites
def main(netFile, outFile, radius, travelDist, symmetrical):
net = sumolib.net.readNet(netFile, withConnections=False, withFoes=False)
with open(outFile, 'w') as outf:
sumolib.writeXMLHeader(
outf, "$Id$") # noqa
outf.write('<tazs>\n')
for taz, edges in computeAllBidiTaz(net, radius, travelDist, symmetrical):
outf.write(' <taz id="%s" edges="%s"/>\n' % (
taz.getID(), ' '.join(sorted([e.getID() for e in edges]))))
outf.write('</tazs>\n')
return net
if __name__ == "__main__":
options = parse_args()
main(options.net, options.outfile, options.radius,
options.travel_distance, options.symmetrical)
|
ngctnnnn/DRL_Traffic-Signal-Control
|
sumo-rl/sumo/tools/generateBidiDistricts.py
|
generateBidiDistricts.py
|
py
| 3,730 |
python
|
en
|
code
| 17 |
github-code
|
6
|
36618145736
|
#!/usr/bin/env python3
import copy
import json
import logging
import os
import psutil
import shutil
import sys
import tempfile
from datetime import datetime
# import pysqlite3
from joblib import Parallel, delayed, parallel_backend
from tabulate import tabulate
from . import utils
from .config import Config
class PipelineWise(object):
"""..."""
def __init_logger(self, logger_name, log_file=None, level=logging.INFO):
self.logger = logging.getLogger(logger_name)
# Default log level is less verbose
level = logging.INFO
# Increase log level if debug mode needed
if self.args.debug:
level = logging.DEBUG
# Set the log level
self.logger.setLevel(level)
# Set log formatter and add file and line number in case of DEBUG level
if level == logging.DEBUG:
str_format = (
"%(asctime)s %(processName)s %(levelname)s %(filename)s (%(lineno)s): %(message)s"
)
else:
str_format = "%(asctime)s %(levelname)s: %(message)s"
formatter = logging.Formatter(str_format, "%Y-%m-%d %H:%M:%S")
# Create console handler
fh = logging.StreamHandler(sys.stdout)
fh.setLevel(level)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
# Create log file handler if required
if log_file and log_file != "*":
fh = logging.FileHandler(log_file)
fh.setLevel(level)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
def __init__(self, args, config_dir, venv_dir):
self.args = args
self.__init_logger("Pipelinewise CLI", log_file=args.log)
self.config_dir = config_dir
self.venv_dir = venv_dir
self.pipelinewise_bin = os.path.join(self.venv_dir, "cli", "bin", "pipelinewise")
self.config_path = os.path.join(self.config_dir, "config.json")
self.load_config()
if args.tap != "*":
self.tap = self.get_tap(args.target, args.tap)
self.tap_bin = self.get_connector_bin(self.tap["type"])
if args.target != "*":
self.target = self.get_target(args.target)
self.target_bin = self.get_connector_bin(self.target["type"])
self.tranform_field_bin = self.get_connector_bin("transform-field")
def create_consumable_target_config(self, target_config, tap_inheritable_config):
try:
dictA = utils.load_json(target_config)
dictB = utils.load_json(tap_inheritable_config)
# Copy everything from dictB into dictA - Not a real merge
dictA.update(dictB)
# Save the new dict as JSON into a temp file
tempfile_path = tempfile.mkstemp()[1]
utils.save_json(dictA, tempfile_path)
return tempfile_path
except Exception as exc:
raise Exception("Cannot merge JSON files {} {} - {}".format(dictA, dictB, exc))
def create_filtered_tap_properties(
self, target_type, tap_type, tap_properties, tap_state, filters, create_fallback=False
):
"""
Create a filtered version of tap properties file based on specific filter conditions.
Return values:
1) A temporary JSON file where only those tables are selected to
sync which meet the filter criterias
2) List of tap_stream_ids where filter criterias matched
3) OPTIONAL when create_fallback is True:
Temporary JSON file with table that don't meet the
filter criterias
4) OPTIONAL when create_fallback is True:
List of tap_stream_ids where filter criteries don't match
"""
# Get filer conditions with default values from input dictionary
# Nothing selected by default
f_selected = filters.get("selected", None)
f_target_type = filters.get("target_type", None)
f_tap_type = filters.get("tap_type", None)
f_replication_method = filters.get("replication_method", None)
f_initial_sync_required = filters.get("initial_sync_required", None)
# Lists of tables that meet and don't meet the filter criterias
filtered_tap_stream_ids = []
fallback_filtered_tap_stream_ids = []
self.logger.debug("Filtering properties JSON by conditions: {}".format(filters))
try:
# Load JSON files
properties = utils.load_json(tap_properties)
state = utils.load_json(tap_state)
# Create a dictionary for tables that don't meet filter criterias
fallback_properties = copy.deepcopy(properties) if create_fallback else None
# Foreach every stream (table) in the original properties
self.logger.info(tap_properties)
for stream_idx, stream in enumerate(properties.get("streams", tap_properties)):
selected = False
replication_method = None
initial_sync_required = False
# Collect required properties from the properties file
tap_stream_id = stream.get("tap_stream_id")
table_name = stream.get("table_name")
metadata = stream.get("metadata", [])
# Collect further properties from the properties file under the metadata key
table_meta = {}
for meta_idx, meta in enumerate(metadata):
if type(meta) == dict and len(meta.get("breadcrumb", [])) == 0:
table_meta = meta.get("metadata")
break
# table_meta = next((i for i in metadata if type(i) == dict and len(i.get("breadcrumb", [])) == 0), {}).get("metadata")
selected = table_meta.get("selected")
replication_method = table_meta.get("replication-method")
# Detect if initial sync is required. Look into the state file, get the bookmark
# for the current stream (table) and if valid bookmark doesn't exist then
# initial sync is required
bookmarks = state.get("bookmarks", {}) if type(state) == dict else {}
stream_bookmark = bookmarks.get(tap_stream_id, {})
if (
# Initial sync is required for INCREMENTAL and LOG_BASED tables
# where the state file has no valid bookmark.
#
# Valid bookmark keys:
# 'replication_key_value' key created for INCREMENTAL tables
# 'log_pos' key created by MySQL LOG_BASED tables
# 'lsn' key created by PostgreSQL LOG_BASED tables
#
# FULL_TABLE replication method is taken as initial sync required
replication_method == "FULL_TABLE"
or (
(replication_method in ["INCREMENTAL", "LOG_BASED"])
and (
not (
"replication_key_value" in stream_bookmark
or "log_pos" in stream_bookmark
or "lsn" in stream_bookmark
)
)
)
):
initial_sync_required = True
# Compare actual values to the filter conditions.
# Set the "selected" key to True if actual values meet the filter criterias
# Set the "selected" key to False if the actual values don't meet the filter criterias
if (
(f_selected == None or selected == f_selected)
and (f_target_type == None or target_type in f_target_type)
and (f_tap_type == None or tap_type in f_tap_type)
and (f_replication_method == None or replication_method in f_replication_method)
and (
f_initial_sync_required == None
or initial_sync_required == f_initial_sync_required
)
):
self.logger.debug(
"""Filter condition(s) matched:
Table : {}
Tap Stream ID : {}
Selected : {}
Replication Method : {}
Init Sync Required : {}
""".format(
table_name,
tap_stream_id,
selected,
replication_method,
initial_sync_required,
)
)
# Filter condition matched: mark table as selected to sync
properties["streams"][stream_idx]["metadata"][meta_idx]["metadata"][
"selected"
] = True
filtered_tap_stream_ids.append(tap_stream_id)
# Filter ocndition matched: mark table as not selected to sync in the fallback properties
if create_fallback:
fallback_properties["streams"][stream_idx]["metadata"][meta_idx][
"metadata"
]["selected"] = False
else:
# Filter condition didn't match: mark table as not selected to sync
properties["streams"][stream_idx]["metadata"][meta_idx]["metadata"][
"selected"
] = False
# Filter condition didn't match: mark table as selected to sync in the fallback properties
# Fallback only if the table is selected in the original properties
if create_fallback and selected == True:
fallback_properties["streams"][stream_idx]["metadata"][meta_idx][
"metadata"
]["selected"] = True
fallback_filtered_tap_stream_ids.append(tap_stream_id)
# Save the generated properties file(s) and return
# Fallback required: Save filtered and fallback properties JSON
if create_fallback:
# Save to files: filtered and fallback properties
temp_properties_path = tempfile.mkstemp()[1]
utils.save_json(properties, temp_properties_path)
temp_fallback_properties_path = tempfile.mkstemp()[1]
utils.save_json(fallback_properties, temp_fallback_properties_path)
return (
temp_properties_path,
filtered_tap_stream_ids,
temp_fallback_properties_path,
fallback_filtered_tap_stream_ids,
)
# Fallback not required: Save only the filtered properties JSON
else:
# Save eed to save
temp_properties_path = tempfile.mkstemp()[1]
utils.save_json(properties, temp_properties_path)
return temp_properties_path, filtered_tap_stream_ids
except Exception as exc:
raise Exception("Cannot create JSON file - {}".format(exc))
def load_config(self):
self.logger.debug("Loading config at {}".format(self.config_path))
config = utils.load_json(self.config_path)
if config:
self.config = config
else:
self.config = {}
def get_tap_dir(self, target_id, tap_id):
return os.path.join(self.config_dir, target_id, tap_id)
def get_tap_log_dir(self, target_id, tap_id):
return os.path.join(self.get_tap_dir(target_id, tap_id), "log")
def get_target_dir(self, target_id):
return os.path.join(self.config_dir, target_id)
def get_connector_bin(self, connector_type):
return os.path.join(self.venv_dir, connector_type, "bin", connector_type)
def get_connector_files(self, connector_dir):
return {
"config": os.path.join(connector_dir, "config.json"),
"inheritable_config": os.path.join(connector_dir, "inheritable_config.json"),
"properties": os.path.join(connector_dir, "properties.json"),
"state": os.path.join(connector_dir, "state.json"),
"transformation": os.path.join(connector_dir, "transformation.json"),
"selection": os.path.join(connector_dir, "selection.json"),
}
def get_targets(self):
self.logger.debug("Getting targets from {}".format(self.config_path))
self.load_config()
try:
targets = self.config.get("targets", [])
except Exception as exc:
raise Exception("Targets not defined")
return targets
def get_target(self, target_id):
self.logger.debug("Getting {} target".format(target_id))
targets = self.get_targets()
target = False
target = next((item for item in targets if item["id"] == target_id), False)
if target == False:
raise Exception("Cannot find {} target".format(target_id))
target_dir = self.get_target_dir(target_id)
if os.path.isdir(target_dir):
target["files"] = self.get_connector_files(target_dir)
else:
raise Exception("Cannot find target at {}".format(target_dir))
return target
def get_taps(self, target_id):
self.logger.debug("Getting taps from {} target".format(target_id))
target = self.get_target(target_id)
try:
taps = target["taps"]
# Add tap status
for tap_idx, tap in enumerate(taps):
taps[tap_idx]["status"] = self.detect_tap_status(target_id, tap["id"])
except Exception as exc:
raise Exception("No taps defined for {} target".format(target_id))
return taps
def get_tap(self, target_id, tap_id):
self.logger.debug("Getting {} tap from target {}".format(tap_id, target_id))
taps = self.get_taps(target_id)
tap = False
tap = next((item for item in taps if item["id"] == tap_id), False)
if tap == False:
raise Exception("Cannot find {} tap in {} target".format(tap_id, target_id))
tap_dir = self.get_tap_dir(target_id, tap_id)
if os.path.isdir(tap_dir):
tap["files"] = self.get_connector_files(tap_dir)
else:
raise Exception("Cannot find tap at {}".format(tap_dir))
# Add target and status details
tap["target"] = self.get_target(target_id)
tap["status"] = self.detect_tap_status(target_id, tap_id)
return tap
def merge_schemas(self, old_schema, new_schema):
schema_with_diff = new_schema
if not old_schema:
schema_with_diff = new_schema
else:
new_streams = new_schema["streams"]
old_streams = old_schema["streams"]
for new_stream_idx, new_stream in enumerate(new_streams):
new_tap_stream_id = new_stream["tap_stream_id"]
old_stream = False
old_stream = next(
(item for item in old_streams if item["tap_stream_id"] == new_tap_stream_id),
False,
)
# Is this a new stream?
if not old_stream:
new_schema["streams"][new_stream_idx]["is-new"] = True
# Copy stream selection from the old properties
else:
# Find table specific metadata entries in the old and new streams
new_stream_table_mdata_idx = 0
old_stream_table_mdata_idx = 0
try:
new_stream_table_mdata_idx = [
i
for i, md in enumerate(new_stream["metadata"])
if md["breadcrumb"] == []
][0]
old_stream_table_mdata_idx = [
i
for i, md in enumerate(old_stream["metadata"])
if md["breadcrumb"] == []
][0]
except Exception:
False
# Copy is-new flag from the old stream
try:
new_schema["streams"][new_stream_idx]["is-new"] = old_stream["is-new"]
except Exception:
False
# Copy selected from the old stream
try:
new_schema["streams"][new_stream_idx]["metadata"][
new_stream_table_mdata_idx
]["metadata"]["selected"] = old_stream["metadata"][
old_stream_table_mdata_idx
][
"metadata"
][
"selected"
]
except Exception:
False
# Copy replication method from the old stream
try:
new_schema["streams"][new_stream_idx]["metadata"][
new_stream_table_mdata_idx
]["metadata"]["replication-method"] = old_stream["metadata"][
old_stream_table_mdata_idx
][
"metadata"
][
"replication-method"
]
except Exception:
False
# Copy replication key from the old stream
try:
new_schema["streams"][new_stream_idx]["metadata"][
new_stream_table_mdata_idx
]["metadata"]["replication-key"] = old_stream["metadata"][
old_stream_table_mdata_idx
][
"metadata"
][
"replication-key"
]
except Exception:
False
# Is this new or modified field?
new_fields = new_schema["streams"][new_stream_idx]["schema"]["properties"]
old_fields = old_stream["schema"]["properties"]
for new_field_key in new_fields:
new_field = new_fields[new_field_key]
new_field_mdata_idx = -1
# Find new field metadata index
for i, mdata in enumerate(
new_schema["streams"][new_stream_idx]["metadata"]
):
if (
len(mdata["breadcrumb"]) == 2
and mdata["breadcrumb"][0] == "properties"
and mdata["breadcrumb"][1] == new_field_key
):
new_field_mdata_idx = i
# Field exists
if new_field_key in old_fields.keys():
old_field = old_fields[new_field_key]
old_field_mdata_idx = -1
# Find old field metadata index
for i, mdata in enumerate(old_stream["metadata"]):
if (
len(mdata["breadcrumb"]) == 2
and mdata["breadcrumb"][0] == "properties"
and mdata["breadcrumb"][1] == new_field_key
):
old_field_mdata_idx = i
new_mdata = new_schema["streams"][new_stream_idx]["metadata"][
new_field_mdata_idx
]["metadata"]
old_mdata = old_stream["metadata"][old_field_mdata_idx]["metadata"]
# Copy is-new flag from the old properties
try:
new_mdata["is-new"] = old_mdata["is-new"]
except Exception:
False
# Copy is-modified flag from the old properties
try:
new_mdata["is-modified"] = old_mdata["is-modified"]
except Exception:
False
# Copy field selection from the old properties
try:
new_mdata["selected"] = old_mdata["selected"]
except Exception:
False
# Field exists and type is the same - Do nothing more in the schema
if new_field == old_field:
self.logger.debug(
"Field exists in {} stream with the same type: {} : {}".format(
new_tap_stream_id, new_field_key, new_field
)
)
# Field exists but types are different - Mark the field as modified in the metadata
else:
self.logger.debug(
"Field exists in {} stream but types are different: {} : {}".format(
new_tap_stream_id, new_field_key, new_field
)
)
try:
new_schema["streams"][new_stream_idx]["metadata"][
new_field_mdata_idx
]["metadata"]["is-modified"] = True
new_schema["streams"][new_stream_idx]["metadata"][
new_field_mdata_idx
]["metadata"]["is-new"] = False
except Exception:
False
# New field - Mark the field as new in the metadata
else:
self.logger.debug(
"New field in stream {}: {} : {}".format(
new_tap_stream_id, new_field_key, new_field
)
)
try:
new_schema["streams"][new_stream_idx]["metadata"][
new_field_mdata_idx
]["metadata"]["is-new"] = True
except Exception:
False
schema_with_diff = new_schema
return schema_with_diff
def make_default_selection(self, schema, selection_file):
if os.path.isfile(selection_file):
self.logger.info("Loading pre defined selection from {}".format(selection_file))
tap_selection = utils.load_json(selection_file)
selection = tap_selection["selection"]
not_selected = []
streams = schema["streams"]
for stream_idx, stream in enumerate(streams):
tap_stream_id = stream.get("tap_stream_id")
tap_stream_sel = False
for sel in selection:
if "tap_stream_id" in sel and tap_stream_id == sel["tap_stream_id"]:
tap_stream_sel = sel
# Find table specific metadata entries in the old and new streams
try:
stream_table_mdata_idx = [
i for i, md in enumerate(stream["metadata"]) if md["breadcrumb"] == []
][0]
except Exception:
False
if tap_stream_sel:
self.logger.info(
"Mark {} tap_stream_id as selected with properties {}".format(
tap_stream_id, tap_stream_sel
)
)
schema["streams"][stream_idx]["metadata"][stream_table_mdata_idx]["metadata"][
"selected"
] = True
if "replication_method" in tap_stream_sel:
schema["streams"][stream_idx]["metadata"][stream_table_mdata_idx][
"metadata"
]["replication-method"] = tap_stream_sel["replication_method"]
if "replication_key" in tap_stream_sel:
schema["streams"][stream_idx]["metadata"][stream_table_mdata_idx][
"metadata"
]["replication-key"] = tap_stream_sel["replication_key"]
else:
# self.logger.info("Mark {} tap_stream_id as not selected".format(tap_stream_id))
not_selected.append(tap_stream_id)
schema["streams"][stream_idx]["metadata"][stream_table_mdata_idx]["metadata"][
"selected"
] = False
if not_selected:
self.logger.info("The following were not selected: {}".format(", ".join(not_selected)))
return schema
def init(self):
self.logger.info("Initialising new project {}...".format(self.args.name))
project_dir = os.path.join(os.getcwd(), self.args.name)
# Create project dir if not exists
if os.path.exists(project_dir):
self.logger.error(
"Directory exists and cannot create new project: {}".format(self.args.name)
)
sys.exit(1)
else:
os.mkdir(project_dir)
for yaml in sorted(utils.get_sample_file_paths()):
yaml_basename = os.path.basename(yaml)
dst = os.path.join(project_dir, yaml_basename)
self.logger.info(" - Creating {}...".format(yaml_basename))
shutil.copyfile(yaml, dst)
def test_tap_connection(self):
tap_id = self.tap["id"]
tap_type = self.tap["type"]
target_id = self.target["id"]
target_type = self.target["type"]
self.logger.info(
"Testing {} ({}) tap connection in {} ({}) target".format(
tap_id, tap_type, target_id, target_type
)
)
# Generate and run the command to run the tap directly
# We will use the discover option to test connection
tap_config = self.tap["files"]["config"]
command = "{} --config {} --discover".format(self.tap_bin, tap_config)
result = utils.run_command(command)
# Get output and errors from tap
rc, new_schema, tap_output = result
if rc != 0:
self.logger.error("Testing tap connection ({} - {}) FAILED".format(target_id, tap_id))
sys.exit(1)
# If the connection success then the response needs to be a valid JSON string
if not utils.is_json(new_schema):
self.logger.error(
"Schema discovered by {} ({}) is not a valid JSON.".format(tap_id, tap_type)
)
sys.exit(1)
else:
self.logger.info("Testing tap connection ({} - {}) PASSED".format(target_id, tap_id))
def discover_tap(self, tap=None, target=None):
# Define tap props
if tap is None:
tap_id = self.tap.get("id")
tap_type = self.tap.get("type")
tap_config_file = self.tap.get("files", {}).get("config")
tap_properties_file = self.tap.get("files", {}).get("properties")
tap_selection_file = self.tap.get("files", {}).get("selection")
tap_bin = self.tap_bin
else:
tap_id = tap.get("id")
tap_type = tap.get("type")
tap_config_file = tap.get("files", {}).get("config")
tap_properties_file = tap.get("files", {}).get("properties")
tap_selection_file = tap.get("files", {}).get("selection")
tap_bin = self.get_connector_bin(tap_type)
# Define target props
if target is None:
target_id = self.target.get("id")
target_type = self.target.get("type")
else:
target_id = target.get("id")
target_type = target.get("type")
self.logger.info(
"Discovering {} ({}) tap in {} ({}) target...".format(
tap_id, tap_type, target_id, target_type
)
)
# Generate and run the command to run the tap directly
command = "{} --config {} --discover".format(tap_bin, tap_config_file)
result = utils.run_command(command)
# Get output and errors from tap
rc, new_schema, output = result
if rc != 0:
return "{} - {}".format(target_id, tap_id)
# Convert JSON string to object
try:
new_schema = json.loads(new_schema)
except Exception as exc:
return "Schema discovered by {} ({}) is not a valid JSON.".format(tap_id, tap_type)
# Merge the old and new schemas and diff changes
old_schema = utils.load_json(tap_properties_file)
if old_schema:
schema_with_diff = self.merge_schemas(old_schema, new_schema)
else:
schema_with_diff = new_schema
# Make selection from selectection.json if exists
try:
schema_with_diff = self.make_default_selection(schema_with_diff, tap_selection_file)
schema_with_diff = utils.delete_keys_from_dict(
self.make_default_selection(schema_with_diff, tap_selection_file),
# Removing multipleOf json schema validations from properties.json,
# that's causing run time issues
["multipleOf"],
)
except Exception as exc:
return "Cannot load selection JSON at {}. {}".format(tap_selection_file, str(exc))
# Save the new catalog into the tap
try:
self.logger.info(
"Writing new properties file with changes into {}".format(tap_properties_file)
)
utils.save_json(schema_with_diff, tap_properties_file)
except Exception as exc:
return "Cannot save file. {}".format(str(exc))
def detect_tap_status(self, target_id, tap_id, set_pid=False):
self.logger.debug("Detecting {} tap status in {} target".format(tap_id, target_id))
tap_dir = self.get_tap_dir(target_id, tap_id)
log_dir = self.get_tap_log_dir(target_id, tap_id)
connector_files = self.get_connector_files(tap_dir)
current_pid = os.getpid()
pid_path = os.path.join(tap_dir, "pid")
status = {
"currentStatus": "unknown",
"lastStatus": "unknown",
"lastTimestamp": None,
"pid": current_pid,
}
if os.path.exists(pid_path):
try:
executed_pid = int(open(pid_path, "r").readlines()[0])
if executed_pid in psutil.pids():
status["currentStatus"] = "running"
return status
except:
pass
if set_pid:
if os.path.exists(pid_path):
os.remove(pid_path)
open(pid_path, "w").write(str(current_pid))
# Tap exists but configuration not completed
if not os.path.isfile(connector_files["config"]):
status["currentStatus"] = "not-configured"
# Configured and not running
else:
status["currentStatus"] = "ready"
# Get last run instance
if os.path.isdir(log_dir):
log_files = utils.search_files(
log_dir, patterns=["*.log.success", "*.log.failed"], sort=True
)
if len(log_files) > 0:
last_log_file = log_files[0]
log_attr = utils.extract_log_attributes(last_log_file)
status["lastStatus"] = log_attr["status"]
status["lastTimestamp"] = log_attr["timestamp"]
return status
def status(self):
targets = self.get_targets()
tab_headers = [
"Tap ID",
"Tap Type",
"Target ID",
"Target Type",
"Enabled",
"Status",
"Last Sync",
"Last Sync Result",
]
successful_taps = []
unsuccessful_taps = []
unknown_taps = []
for target in targets:
taps = self.get_taps(target["id"])
for tap in taps:
current_status = tap.get("status", {}).get("lastStatus", "<Unknown>")
tap_status = [
tap.get("id", "<Unknown>"),
tap.get("type", "<Unknown>"),
target.get("id", "<Unknown>"),
target.get("type", "<Unknown>"),
tap.get("enabled", "<Unknown>"),
tap.get("status", {}).get("currentStatus", "<Unknown>"),
tap.get("status", {}).get("lastTimestamp", "<Unknown>"),
tap.get("status", {}).get("lastStatus", "<Unknown>"),
]
if current_status == "success":
successful_taps.append(tap_status)
elif current_status == "failed":
unsuccessful_taps.append(tap_status)
else:
unknown_taps.append(tap_status)
if successful_taps:
print(f"{len(successful_taps)} currently succeeding\n")
print(
tabulate(
sorted(successful_taps, key=lambda x: x[0]),
headers=tab_headers,
tablefmt="simple",
)
)
print("\n")
if unsuccessful_taps:
print(f"{len(unsuccessful_taps)} currently failing\n")
print(
tabulate(
sorted(unsuccessful_taps, key=lambda x: x[0]),
headers=tab_headers,
tablefmt="simple",
)
)
print("\n")
if unknown_taps:
print(f"{len(unknown_taps)} currently in an unknown state\n")
print(
tabulate(
sorted(unknown_taps, key=lambda x: x[0]), headers=tab_headers, tablefmt="simple"
)
)
def reset_tap(self):
tap_id = self.tap["id"]
tap_type = self.tap["type"]
target_id = self.target["id"]
target_type = self.target["type"]
log_dir = self.get_tap_log_dir(target_id, tap_id)
self.logger.info("Resetting {} tap in {} target".format(tap_id, target_id))
# Run only if tap enabled
if not self.tap.get("enabled", False):
self.logger.info(
"Tap {} is not enabled. Do nothing and exit normally.".format(self.tap["name"])
)
sys.exit(0)
# Run only if not running
tap_status = self.detect_tap_status(target_id, tap_id)
if tap_status["currentStatus"] != "running":
self.logger.info("Tap is not currently running, nothing to reset")
sys.exit(0)
os.remove(utils.search_files(log_dir, patterns=["*.log.running"])[0])
self.logger.info("Tap log successfully removed")
def clean_logs(self, to_keep=2):
"""
Removes all but the most recent logs, cleaning space but preserving last run success/failure
"""
targets = self.get_targets()
for target in targets:
taps = self.get_taps(target["id"])
for tap in taps:
self.logger.info("Cleaning {}".format(tap["id"]))
log_dir = self.get_tap_log_dir(target["id"], tap["id"])
log_files = utils.search_files(
log_dir, patterns=["*.log.success", "*.log.failed"], sort=True
)
if len(log_files) < to_keep:
self.logger.info("No logs to clean")
for file in log_files[to_keep:]:
os.remove(os.path.join(log_dir, file))
self.logger.info("{} files removed".format(len(log_files[1:])))
def run_tap_singer(
self,
tap_type,
tap_config,
tap_properties,
tap_state,
tap_transformation,
target_config,
log_file,
):
"""
Generating and running piped shell command to sync tables using singer taps and targets
"""
new_tap_state = tempfile.mkstemp()[1]
# Following the singer spec the catalog JSON file needs to be passed by the --catalog argument
# However some tap (i.e. tap-mysql and tap-postgres) requires it as --properties
# This is problably for historical reasons and need to clarify on Singer slack channels
tap_catalog_argument = utils.get_tap_property_by_tap_type(tap_type, "tap_catalog_argument")
# Add state arugment if exists to extract data incrementally
if not os.path.isfile(tap_state):
open(tap_state, "w").write("{}")
tap_state_arg = "--state {}".format(tap_state)
# Remove the state and rewrite the config if necessary
if self.args.start_date:
self.original_start = None
config = json.load(open(tap_config))
if "start_date" in config.keys():
self.original_start = config["start_date"]
config["start_date"] = datetime.strptime(self.args.start_date, "%Y-%m-%d").strftime(
"%Y-%m-%dT00:00:00Z"
)
open(tap_config, "w").write(json.dumps(config))
os.remove(tap_state)
open(tap_state, "w").write("{}")
else:
self.logger.warning(
"Tried to start from {} but this tap doesn't use start date".format(
self.args.start_date
)
)
# Detect if transformation is needed
has_transformation = False
if os.path.isfile(tap_transformation):
tr = utils.load_json(tap_transformation)
if "transformations" in tr and len(tr["transformations"]) > 0:
has_transformation = True
# Run without transformation in the middle
if not has_transformation:
command = " ".join(
(
" {} --config {} {} {} {}".format(
self.tap_bin,
tap_config,
tap_catalog_argument,
tap_properties,
tap_state_arg,
),
"| {} --config {}".format(self.target_bin, target_config),
"> {}".format(new_tap_state),
)
)
self.logger.info(command)
# Run with transformation in the middle
else:
command = " ".join(
(
" {} --config {} {} {} {}".format(
self.tap_bin,
tap_config,
tap_catalog_argument,
tap_properties,
tap_state_arg,
),
"| {} --config {}".format(self.tranform_field_bin, tap_transformation),
"| {} --config {}".format(self.target_bin, target_config),
"> {}".format(new_tap_state),
)
)
# Do not run if another instance is already running
log_dir = os.path.dirname(log_file)
# Run command
result = utils.run_command(command, log_file)
# Save the new state file if created correctly
if utils.is_json_file(new_tap_state):
self.logger.info("Writing new state file")
self.logger.info(open(new_tap_state, "r").readlines())
shutil.copyfile(new_tap_state, tap_state)
os.remove(new_tap_state)
else:
self.logger.warning("Not a valid state record")
# Reset the config back
if self.args.start_date:
if self.original_start:
config["start_date"] = self.original_start
os.remove(tap_config)
open(tap_config, "w").write(json.dumps(config))
def run_tap_fastsync(
self,
tap_type,
target_type,
tap_config,
tap_properties,
tap_state,
tap_transformation,
target_config,
log_file,
):
"""
Generating and running shell command to sync tables using the native fastsync components
"""
fastsync_bin = utils.get_fastsync_bin(self.venv_dir, tap_type, target_type)
# Add state arugment if exists to extract data incrementally
tap_transform_arg = ""
if os.path.isfile(tap_transformation):
tap_transform_arg = "--transform {}".format(tap_transformation)
command = " ".join(
(
" {} ".format(fastsync_bin),
"--tap {}".format(tap_config),
"--properties {}".format(tap_properties),
"--state {}".format(tap_state),
"--target {}".format(target_config),
"{}".format(tap_transform_arg),
"{}".format("--tables {}".format(self.args.tables) if self.args.tables else ""),
)
)
# Do not run if another instance is already running
log_dir = os.path.dirname(log_file)
# Run command
result = utils.run_command(command, log_file)
def run_tap(self):
"""
Generating command(s) to run tap to sync data from source to target
The generated commands can use one or multiple commands of:
1. Fastsync:
Native and optimised component to sync table from a
specific type of tap into a specific type of target.
This command will be used automatically when FULL_TABLE
replication method selected or when initial sync is required.
2. Singer Taps and Targets:
Dynamic components following the singer specification to
sync tables from multiple sources to multiple targets.
This command will be used automatically when INCREMENTAL
and LOG_BASED replication method selected. FULL_TABLE
replication are not using the singer components because
they are too slow to sync large tables.
"""
tap_id = self.tap["id"]
tap_type = self.tap["type"]
target_id = self.target["id"]
target_type = self.target["type"]
self.logger.info("Running {} tap in {} target".format(tap_id, target_id))
# Run only if tap enabled
if not self.tap.get("enabled", False):
self.logger.info(
"Tap {} is not enabled. Do nothing and exit normally.".format(self.tap["name"])
)
sys.exit(0)
# Run only if not running
tap_status = self.detect_tap_status(target_id, tap_id, set_pid=True)
self.logger.info(tap_status)
if tap_status["currentStatus"] == "running":
self.logger.info(
"Tap {} is currently running. Do nothing and exit normally.".format(
self.tap["name"]
)
)
sys.exit(0)
# Generate and run the command to run the tap directly
tap_config = self.tap["files"]["config"]
tap_inheritable_config = self.tap["files"]["inheritable_config"]
tap_properties = self.tap["files"]["properties"]
tap_state = self.tap["files"]["state"]
tap_transformation = self.tap["files"]["transformation"]
target_config = self.target["files"]["config"]
# Some target attributes can be passed and override by tap (aka. inheritable config)
# We merge the two configs and use that with the target
cons_target_config = self.create_consumable_target_config(
target_config, tap_inheritable_config
)
# Output will be redirected into target and tap specific log directory
log_dir = self.get_tap_log_dir(target_id, tap_id)
current_time = datetime.utcnow().strftime("%Y%m%d_%H%M%S")
# Create fastsync and singer specific filtered tap properties that contains only
# the the tables that needs to be synced by the specific command
(
tap_properties_fastsync,
fastsync_stream_ids,
tap_properties_singer,
singer_stream_ids,
) = self.create_filtered_tap_properties(
target_type,
tap_type,
tap_properties,
tap_state,
{
"selected": True,
"target_type": ["target-snowflake", "target-redshift"],
"tap_type": ["tap-mysql", "tap-postgres"],
"initial_sync_required": True,
},
create_fallback=True,
)
log_file_fastsync = os.path.join(
log_dir, "{}-{}-{}.fastsync.log".format(target_id, tap_id, current_time)
)
log_file_singer = os.path.join(
log_dir, "{}-{}-{}.singer.log".format(target_id, tap_id, current_time)
)
try:
# Run fastsync for FULL_TABLE replication method
if len(fastsync_stream_ids) > 0:
self.logger.info(
"Table(s) selected to sync by fastsync: {}".format(fastsync_stream_ids)
)
self.run_tap_fastsync(
tap_type,
target_type,
tap_config,
tap_properties_fastsync,
tap_state,
tap_transformation,
cons_target_config,
log_file_fastsync,
)
else:
self.logger.info("No table available that needs to be sync by fastsync")
# Run singer tap for INCREMENTAL and LOG_BASED replication methods
if len(singer_stream_ids) > 0:
self.logger.info(
"Table(s) selected to sync by singer: {}".format(singer_stream_ids)
)
self.run_tap_singer(
tap_type,
tap_config,
tap_properties_singer,
tap_state,
tap_transformation,
cons_target_config,
log_file_singer,
)
else:
self.logger.info("No table available that needs to be sync by singer")
# Delete temp files if there is any
except utils.RunCommandException as exc:
self.logger.error(exc)
utils.silentremove(cons_target_config)
utils.silentremove(tap_properties_fastsync)
utils.silentremove(tap_properties_singer)
sys.exit(1)
except Exception as exc:
utils.silentremove(cons_target_config)
utils.silentremove(tap_properties_fastsync)
utils.silentremove(tap_properties_singer)
raise exc
utils.silentremove(cons_target_config)
utils.silentremove(tap_properties_fastsync)
utils.silentremove(tap_properties_singer)
def sync_tables(self):
"""
Sync every or a list of selected tables from a specific tap.
The function is using the fastsync components hence it's only
available for taps and targets where the native and optimised
fastsync component is implemented.
"""
tap_id = self.tap["id"]
tap_type = self.tap["type"]
target_id = self.target["id"]
target_type = self.target["type"]
fastsync_bin = utils.get_fastsync_bin(self.venv_dir, tap_type, target_type)
self.logger.info(
"Syncing tables from {} ({}) to {} ({})...".format(
tap_id, tap_type, target_id, target_type
)
)
# Run only if tap enabled
if not self.tap.get("enabled", False):
self.logger.info(
"Tap {} is not enabled. Do nothing and exit normally.".format(self.tap["name"])
)
sys.exit(0)
# Run only if tap not running
tap_status = self.detect_tap_status(target_id, tap_id)
if tap_status["currentStatus"] == "running":
self.logger.info(
"Tap {} is currently running and cannot sync. Stop the tap and try again.".format(
self.tap["name"]
)
)
sys.exit(1)
# Tap exists but configuration not completed
if not os.path.isfile(fastsync_bin):
self.logger.error(
"Table sync function is not implemented from {} datasources to {} type of targets".format(
tap_type, target_type
)
)
sys.exit(1)
# Generate and run the command to run the tap directly
tap_config = self.tap["files"]["config"]
tap_inheritable_config = self.tap["files"]["inheritable_config"]
tap_properties = self.tap["files"]["properties"]
tap_state = self.tap["files"]["state"]
tap_transformation = self.tap["files"]["transformation"]
target_config = self.target["files"]["config"]
# Some target attributes can be passed and override by tap (aka. inheritable config)
# We merge the two configs and use that with the target
cons_target_config = self.create_consumable_target_config(
target_config, tap_inheritable_config
)
# Output will be redirected into target and tap specific log directory
log_dir = self.get_tap_log_dir(target_id, tap_id)
current_time = datetime.utcnow().strftime("%Y%m%d_%H%M%S")
log_file = os.path.join(
log_dir, "{}-{}-{}.fastsync.log".format(target_id, tap_id, current_time)
)
# sync_tables command always using fastsync
try:
self.run_tap_fastsync(
tap_type,
target_type,
tap_config,
tap_properties,
tap_state,
tap_transformation,
cons_target_config,
log_file,
)
# Delete temp file if there is any
except utils.RunCommandException as exc:
self.logger.error(exc)
utils.silentremove(cons_target_config)
sys.exit(1)
except Exception as exc:
utils.silentremove(cons_target_config)
raise exc
utils.silentremove(cons_target_config)
def import_project(self):
"""
Take a list of YAML files from a directory and use it as the source to build
singer compatible json files and organise them into pipeline directory structure
"""
# Read the YAML config files and transform/save into singer compatible
# JSON files in a common directory structure
config = Config.from_yamls(self.config_dir, self.args.dir, self.args.secret)
config.save()
# Activating tap stream selections
#
# Run every tap in discovery mode to generate the singer specific
# properties.json files for the taps. The properties file than
# updated to replicate only the tables that is defined in the YAML
# files and to use the required replication methods
#
# The tap Discovery mode needs to connect to each source databases and
# doing that sequentially is slow. For a better performance we do it
# in parallel.
self.logger.info("ACTIVATING TAP STREAM SELECTIONS...")
total_targets = 0
total_taps = 0
discover_excs = []
# Import every tap from every target
start_time = datetime.now()
for tk in config.targets.keys():
target = config.targets.get(tk)
total_targets += 1
total_taps += len(target.get("taps"))
with parallel_backend("threading", n_jobs=-1):
# Discover taps in parallel and return the list
# of exception of the failed ones
discover_excs.extend(
list(
filter(
None,
Parallel(verbose=100)(
delayed(self.discover_tap)(tap=tap, target=target)
for (tap) in target.get("taps")
),
)
)
)
# Log summary
end_time = datetime.now()
self.logger.info(
"""
-------------------------------------------------------
IMPORTING YAML CONFIGS FINISHED
-------------------------------------------------------
Total targets to import : {}
Total taps to import : {}
Taps imported successfully : {}
Taps failed to import : {}
Runtime : {}
-------------------------------------------------------
""".format(
total_targets,
total_taps,
total_taps - len(discover_excs),
str(discover_excs),
end_time - start_time,
)
)
if len(discover_excs) > 0:
sys.exit(1)
def encrypt_string(self):
"""
Encrypt the supplied string using the provided vault secret
"""
b_ciphertext = utils.vault_encrypt(self.args.string, self.args.secret)
yaml_text = utils.vault_format_ciphertext_yaml(b_ciphertext)
print(yaml_text)
print("Encryption successful")
|
beherap/pipelinewise
|
pipelinewise/cli/pipelinewise.py
|
pipelinewise.py
|
py
| 55,124 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41983296819
|
"""
This is the name of the park to be used as an app-wide constant
"""
PARK_NAME = "Copington Adventure Theme Park"
TICKET_PRICES = {
"child": 12,
"adult": 20,
"senior": 11,
}
WRISTBAND_PRICE = 20
MAXIMUM_PARK_CAPACITY = 500
|
alii/copington-ticket-theme-park
|
utils/constants.py
|
constants.py
|
py
| 241 |
python
|
en
|
code
| 2 |
github-code
|
6
|
21393275553
|
s = {'x', 'y', 'b', 'c', 'a'}
for item in s:
print(item)
# the order of elements is unknow.
class Squares:
def __init__(self, length):
self.length = length
self.i = 0
def __iter__(self):
print("calling __iter__")
self.i = 0
return self
def __next__(self):
print("calling __next__")
if self.i >= self.length:
raise StopIteration
else:
result = self.i ** 2
self.i += 1
return result
def __len__(self):
return self.length
sq = Squares(5)
for i in sq:
print(i)
for i in sq:
print(i)
|
Hopw06/Python
|
Python_Deep_Dive/Part 2/4.IterablesAndIterators/1.IteratingCollections.py
|
1.IteratingCollections.py
|
py
| 680 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29465188143
|
# Take an array and remove every second element from the array.
# Always keep the first element and start removing with the next element.
# Example:
# ["Keep", "Remove", "Keep", "Remove", "Keep", ...] --> ["Keep", "Keep", "Keep", ...]
# None of the arrays will be empty, so you don't have to worry about that!
def remove_every_other(my_list):
# Your code here!
# create a list to hold the elements that meet the criteria
new_list = []
# loop through the list
for i in range(len(my_list)):
# if the index is even, add the element to the new list
if i % 2 == 0:
new_list.append(my_list[i])
return new_list
# # one line solution
# return my_list[::2]
# test.assert_equals(remove_every_other(['Hello', 'Goodbye', 'Hello Again']),
# ['Hello', 'Hello Again'])
# test.assert_equals(remove_every_other([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
# [1, 3, 5, 7, 9])
# test.assert_equals(remove_every_other([[1, 2]]), [[1, 2]])
# test.assert_equals(remove_every_other([['Goodbye'], {'Great': 'Job'}]),
# [['Goodbye']])
|
tuyojr/code_wars-hacker_rank-leetcode
|
code_wars/remove_every_other.py
|
remove_every_other.py
|
py
| 1,136 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12970955601
|
import socket
import time
def SendRec(mode,namefile):
client = socket.socket()
client.connect(('127.0.0.1',1222))
print("Connect to server!")
client.send(mode)
print("sent mode to server!")
time.sleep(3)
client.send(namefile)
print("sent name of file to server!")
time.sleep(3)
if mode == "1":
with open(namefile, "rb") as file:
# send file
print("Sending file ...")
# read the whole file at once
dataUp = file.read()
# Convert the file into smaller segments and send them
client.sendall(dataUp)
print("upload completed!")
#file.close()
elif mode == "2":
file = open(namefile, "wb")
while True:
data = client.recv(4096)
print(data)
if not data:
file.close()
break
file.write(data)
client.close()
return
which = input('1-Upload 2-Download :\n')
fileName = input('Enter Name of File :(ex:"test.pdf)"\n')
SendRec(str(which),str(fileName))
|
MDoroudgarian/fileserverpy
|
client/client.py
|
client.py
|
py
| 1,096 |
python
|
en
|
code
| 1 |
github-code
|
6
|
32188154157
|
# 6-5.py 파이썬의 장점을 살린 퀵 정렬 소스코드
array = [5, 7, 9, 0, 3, 1, 6, 2, 4, 8]
def quick_sort(array):
# 리스트의 길이가 1이하라면 반환
if len(array) <= 1:
return array
pivot = array[0] # 피벗 <- 첫번째 원소
tail = array[1:] # 피벗 이후의 리스트
left_side = [x for x in tail if x <= pivot] # 분할된 왼쪽
right_side = [x for x in tail if x > pivot] # 분할된 오른쪽
# 분할 이후 피벗 왼쪽, 오른쪽 부분을 수행하고 붙여줌
return quick_sort(left_side) + [pivot] + quick_sort(right_side)
print(quick_sort(array))
|
kcw0331/python-for-coding-test
|
thisiscodingtest/정렬(파이썬의장점을살린퀵정렬).py
|
정렬(파이썬의장점을살린퀵정렬).py
|
py
| 636 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
15917640785
|
from django.urls import path
from . import views
app_name = 'main'
urlpatterns = [
path('category_list/', views.category_list, name='category_list'),
path('delete_category/<int:category_id>/', views.delete_category, name='delete_category'),
path('update_category/<int:category_id>/', views.update_category, name='update_category'),
path('product_list/', views.product_list, name='product_list'),
path('delete_product/<int:code>/', views.delete_product, name='delete_product'),
path('update_products/<int:pk>/', views.update_products, name='update_products'),
path('export_pdf/', views.export_pdf, name='export_pdf'),
path('export_excel/', views.export_excel, name='export_excel'),
path('import_excel/', views.import_excel, name='import_excel'),
path('export_import/', views.export_import, name='export_import'),
#path('add_product/', views.add_product, name='add_product'),
#path('update_product/<int:product_id>/', views.update_product, name='update_product'),
#path('delete_product/<int:product_id>/', views.delete_product, name='delete_product'),
#path('index/', views.index, name='index'),
]
|
elumes446/Store-Management-System
|
Store Managment System/main/urls.py
|
urls.py
|
py
| 1,173 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28130211082
|
## import that shit babyyy
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QLabel, QPushButton,QStackedWidget,QScrollArea, QProgressBar, QHBoxLayout, QLineEdit
from PyQt5.QtCore import QObject, QThread, pyqtSignal,Qt
# from pyqtgraph import PlotWidget, plot
import pyqtgraph as pg
from os.path import exists,join
from os import mkdir, remove
import spotipy
from datetime import datetime
from spotipy.oauth2 import SpotifyOAuth, SpotifyClientCredentials, CacheFileHandler
from shutil import rmtree
# import matplotlib.pyplot as plt, matplotlib.dates as mdates
import csv
## gui
class graphWindow(QWidget):
def __init__(self):
super().__init__()
self.layout = QVBoxLayout()
self.stats = QLabel(data.totalSongs())
self.layout.addWidget(self.stats)
self.graph= pg.PlotWidget()
axis = pg.DateAxisItem()
self.graph.setAxisItems({'bottom':axis})
self.loadGraph()
self.layout.addWidget(self.graph)
self.move(0,0)
self.setLayout(self.layout)
def loadGraph(self):
self.setWindowTitle(username)
self.graph.clear()
# graph.plot((lambda date : [datetime.datetime.strptime(i,'%Y-%m-%d').timestamp() for i in date])(date), numSongs)
date_num= {}
lastDate= ''
for i in songData:
date= songData[i][0]
if date != lastDate:
lastDate= date
dateTime= datetime.strptime(date,'%Y-%m-%d').timestamp()
date_num[dateTime]=0
date_num[dateTime]+=1
y= sorted(date_num)
x= [date_num[i] for i in y]
length= len(date_num)
cumulative_x= [len(songData)]
for i in range(length-1, 0,-1): # working backwards from totals songs subrtacting songs added per day
elem= cumulative_x[0]- x[i]
cumulative_x.insert(0,elem)
perDay= ''
if len(y) > 1:
perDay= ' Songs per day: %s' % round((cumulative_x[-1]- cumulative_x[0])/(datetime.fromtimestamp(y[-1])- datetime.fromtimestamp(y[0])).days, 2)
self.stats.setText(data.totalSongs()+ perDay)
self.graph.plot(y,cumulative_x)
print('Graph Loaded')
class MainWindow(QWidget):
def __init__(self):
super(MainWindow, self).__init__()
QApplication.font()
self.graph= graphWindow() # new instance of graph window so to call the functions(of this specific graph) use self.graph.classfunc() <-- ignoring self
self.resize(150,150)
self.loadedUser= my_id
# create pages(stacks)
self.home = QWidget()
self.changeUser = QWidget()
self.main= QWidget()
self.missingPage = QWidget()
self.duplicatePage = QWidget()
self.followArtists = QWidget()
self.searchPage= QWidget()
self.log= QWidget()
self.addUser= QWidget()
# create stack and add all pages
self.Stack = QStackedWidget (self)
self.Stack.addWidget (self.home)
self.Stack.addWidget (self.changeUser)
self.Stack.addWidget (self.main)
self.Stack.addWidget (self.missingPage)
self.Stack.addWidget (self.duplicatePage)
self.Stack.addWidget (self.followArtists)
self.Stack.addWidget (self.searchPage)
self.Stack.addWidget (self.log)
self.Stack.addWidget (self.addUser)
# developing the pages
self.create_home()
self.create_changeUser()
self.create_main()
self.create_missingPage()
self.create_duplicatePage()
self.create_followArtists()
self.create_searchPage()
self.create_logPage()
self.create_addUserPage()
#placing stack in window (class)
layout= QVBoxLayout()
layout.addWidget(self.Stack)
self.setLayout(layout)
self.setWindowTitle("Home")
self.show()
# Home page
def create_home(self):
layout= QVBoxLayout()
layout.setAlignment(Qt.AlignCenter)
hLayout1= QHBoxLayout()
hLayout2= QHBoxLayout()
hLayout3= QHBoxLayout()
self.currentUserLabel= QLabel("Current User: %s" % username)
self.currentUserLabel.setAlignment(Qt.AlignCenter)
layout.addWidget(self.currentUserLabel)
button1= QPushButton("Change User")
button1.clicked.connect(self.showChangeUser)
layout.addWidget(button1)
button2= QPushButton("Run")
button2.clicked.connect(self.run)
hLayout1.addWidget(button2)
button3= QPushButton("Graph")
button3.clicked.connect(self.showGraph)
hLayout1.addWidget(button3)
layout.addLayout(hLayout1)
button4= QPushButton("Missing")
button4.clicked.connect(self.showMissingPage)
hLayout2.addWidget(button4)
button5= QPushButton("Duplicate")
button5.clicked.connect(self.showDuplicatePage)
hLayout2.addWidget(button5)
layout.addLayout(hLayout2)
button6= QPushButton("Follow artists")
button6.clicked.connect(self.showFollowArtists)
hLayout3.addWidget(button6)
button7= QPushButton("Search")
button7.clicked.connect(self.showSearchPage)
hLayout3.addWidget(button7)
layout.addLayout(hLayout3)
button8= QPushButton('Log')
button8.clicked.connect(self.showLogPage)
layout.addWidget(button8)
self.home.setLayout(layout)
#Change user page
def create_changeUser(self):
layout= QVBoxLayout()
scroll, scrollContent, self.userScrollLayout= self.scrollBox()
scroll.setWidget(scrollContent)
layout.addWidget(scroll)
hLayout= QHBoxLayout()
checkUser= QPushButton('Add')
checkUser.clicked.connect(lambda event : self.showAddUser())
hLayout.addWidget(checkUser)
hLayout.addWidget(self.homeButton())
layout.addLayout(hLayout)
self.changeUser.setLayout(layout)
def updateChangeUser(self):
data.get_id_user()
self.deleteLayoutItems(self.userScrollLayout)
for i in id_user:
button= QPushButton(id_user[i])
button.clicked.connect(lambda event, x=i: data.changeActiveUser(x)) # clicked.connect passes a bool to the lambda func so event takes that who knwos why x=i to save the variable as i doesnt stay??????
button.clicked.connect(lambda event : self.showHome()) # go(0)
button.clicked.connect(lambda event : self.graph.loadGraph())
self.userScrollLayout.addWidget(button)
print('Updated Change User')
# missing page
def create_missingPage(self): # this wont update after run
layout= QVBoxLayout()
hLayout= QHBoxLayout()
self.missingChange= QPushButton()
self.missingChange.clicked.connect(self.showAllMissing)
self.missingScroll, self.missingScrollContent, self.missingScrollLayout= self.scrollBox()
layout.addWidget(self.missingScroll)
self.missingScroll.setWidget(self.missingScrollContent)
hLayout.addWidget(self.missingChange)
hLayout.addWidget(self.homeButton())
layout.addLayout(hLayout)
self.missingPage.setLayout(layout)
def showAllMissing(self):
self.setWindowTitle("Missing - All")
self.changeScrollContent(data.missing(), func= 0, scrollLayout= self.missingScrollLayout, connectionFunction=self.missingUserConf)
self.missingChange.setText('Show Deleted')
self.changeConnection(self.missingChange.clicked, self.showDeleted)
def showDeleted(self):
self.setWindowTitle("Missing - Deleted")
self.changeScrollContent(data.deleted(data.missing()), func= 1, scrollLayout= self.missingScrollLayout, connectionFunction=self.missingUserConf)
self.missingChange.setText('Show Missing')
self.changeConnection(self.missingChange.clicked, self.showMissing)
def showMissing(self):
self.setWindowTitle("Missing - Missing")
self.changeScrollContent(data.remDel(data.missing()), func= 2, scrollLayout= self.missingScrollLayout, connectionFunction=self.missingUserConf)
self.missingChange.setText('Show Unconf')
self.changeConnection(self.missingChange.clicked, self.showUnConfMissing)
def showUnConfMissing(self):
self.setWindowTitle("Missing - Unconfirmed")
self.changeScrollContent(data.remConf(data.missing()), func= 3, scrollLayout= self.missingScrollLayout, connectionFunction=self.missingUserConf)
self.missingChange.setText('Show All')
self.changeConnection(self.missingChange.clicked, self.showAllMissing)
# duplicate page
def create_duplicatePage(self):
layout= QVBoxLayout()
hLayout= QHBoxLayout()
self.duplicateChange= QPushButton()
self.duplicateChange.clicked.connect(self.showAllDuplicate)
self.duplicateScroll, self.duplicateScrollContent, self.duplicateScrollLayout= self.scrollBox()
layout.addWidget(self.duplicateScroll)
self.duplicateScroll.setWidget(self.duplicateScrollContent)
hLayout.addWidget(self.duplicateChange)
hLayout.addWidget(self.homeButton())
layout.addLayout(hLayout)
self.duplicatePage.setLayout(layout)
def showAllDuplicate(self):
self.setWindowTitle("Duplicates - All")
self.changeScrollContent(data.duplicates(), func= 0, scrollLayout= self.duplicateScrollLayout, connectionFunction= self.duplicateUserConf)
self.duplicateChange.setText('Show Allowed')
self.changeConnection(self.duplicateChange.clicked, self.showAllowedDuplicate)
def showIllegalDuplicate(self):
self.setWindowTitle("Duplicates - Illegal")
self.changeScrollContent(data.remAllowedDuplicates(data.duplicates()), func= 1, scrollLayout= self.duplicateScrollLayout, connectionFunction= self.duplicateUserConf)
self.duplicateChange.setText('Show All')
self.changeConnection(self.duplicateChange.clicked, self.showAllDuplicate)
def showAllowedDuplicate(self):
self.setWindowTitle("Duplicates - Allowed")
self.changeScrollContent(list(allowedDup.keys()), func= 2, scrollLayout= self.duplicateScrollLayout, connectionFunction= self.duplicateUserConf)
self.duplicateChange.setText('Show illegal')
self.changeConnection(self.duplicateChange.clicked, self.showIllegalDuplicate)
# main(run) page
def create_main(self):
layout= QVBoxLayout()
self.mainLabel= QLabel("change with window.mainLabel.setText(str)")
layout.addWidget(self.mainLabel)
self.progress = QProgressBar()
layout.addWidget(self.progress)
self.main.setLayout(layout)
# follow artists page
def create_followArtists(self):
layout= QVBoxLayout()
scroll, scrollContent, self.followScrollLayout= self.scrollBox()
scroll.setWidget(scrollContent)
layout.addWidget(scroll)
self.followLabel= QLabel()
layout.addWidget(self.followLabel)
self.followProgress= QProgressBar()
self.followProgress.setAlignment(Qt.AlignCenter)
layout.addWidget(self.followProgress)
layout.addWidget(self.homeButton())
self.followArtists.setLayout(layout)
def updateFollowArtists(self):
self.deleteLayoutItems(self.followScrollLayout)
for playlistId in ids_playlists:
button= QPushButton(ids_playlists[playlistId])
button.clicked.connect(lambda event , playlistId= playlistId: self.create_followWorker(playlistId))
self.followScrollLayout.addWidget(button)
print('Updated follow playlists')
def create_followWorker(self, playlistId): # creates worker to follow artists which updates follow artists page
self.followWorker = Worker(caller= 'follow', playlistId= playlistId)
self.followThread = QThread()
self.followWorker.moveToThread(self.followThread)
self.followThread.started.connect(self.followWorker.run)
self.followWorker.finished.connect(self.followThread.quit)
self.followWorker.progress.connect(self.update_followProgress)
self.followWorker.mainLab.connect(self.update_followLabel)
self.followWorker.finished.connect(self.followWorker.deleteLater)
self.followThread.finished.connect(self.followThread.deleteLater)
self.followThread.start()
# search page
def create_searchPage(self):
layout= QVBoxLayout()
self.searchThread= QThread()
scroll, scrollContent, self.searchScrollLayout= self.scrollBox()
self.searchBar= QLineEdit()
self.searchBar.textChanged.connect(lambda event : self.search())
layout.addWidget(self.searchBar)
# search bar enter connect or button
layout.addWidget(scroll)
scroll.setWidget(scrollContent)
layout.addWidget(self.homeButton())
self.searchPage.setLayout(layout)
def search(self):
# stop previous search if ongoing(close thread opended in show search)
self.searchThread.quit()
toSearch= self.searchBar.text()
self.searchWorker= Worker(caller= 'search')
self.searchWorker.moveToThread(self.searchThread)
self.searchThread.started.connect(self.searchWorker.run)
self.searchWorker.finished.connect(self.searchThread.quit)
self.searchWorker.finished.connect(self.searchWorker.deleteLater)
self.searchWorker.searchResults.connect(self.addResults)
if toSearch != '':
self.searchThread.start()
else:
self.setWindowTitle('Search')
self.deleteLayoutItems(self.searchScrollLayout)
def clearSearch(self):
print('Cleared search')
self.searchBar.setText('')
# self.deleteLayoutItems(self.searchScrollLayout)
def addResults(self,trackIds):
resultLayout= QVBoxLayout()
resultLayout.setAlignment(Qt.AlignTop)
self.setWindowTitle('Search - %s' % len(trackIds))
for trackId in trackIds[:100]: # lagg if too many
hLayout= QHBoxLayout()
self.addSong(trackId,hLayout)
resultLayout.addLayout(hLayout)
self.deleteLayoutItems(self.searchScrollLayout) # using another layout and moving delete layout here removes flicker
self.searchScrollLayout.addLayout(resultLayout)
# log page
def create_logPage(self):
layout= QVBoxLayout()
scroll, scrollContent, self.logScrollLayout= self.scrollBox()
layout.addWidget(scroll)
scroll.setWidget(scrollContent)
hLayout= QHBoxLayout()
clear= QPushButton('Clear')
clear.clicked.connect(lambda event : self.clearLog())
hLayout.addWidget(clear)
hLayout.addWidget(self.homeButton())
layout.addLayout(hLayout)
self.log.setLayout(layout)
def updateLog(self): #refreshes scroll area with string from log file
label= QLabel(data.get_log())
self.deleteLayoutItems(self.logScrollLayout)
self.logScrollLayout.addWidget(label)
def clearLog(self): # clears log then refreshes log scroll area
data.clear_log()
self.updateLog()
# create user page
def create_addUserPage(self):
layout= QVBoxLayout()
self.createThread= QThread()
self.addUserLayout= QVBoxLayout()
layout.addLayout(self.addUserLayout)
hLayout= QHBoxLayout()
self.createButton= QPushButton('Next')
self.createButton.clicked.connect(lambda event, string= 'Id has not been input' : self.updateWarning())
hLayout.addWidget(self.createButton)
self.addUserBack= QPushButton('Back')
self.addUserBack.clicked.connect(lambda event : self.showChangeUser())
hLayout.addWidget(self.addUserBack)
layout.addLayout(hLayout)
self.addUser.setLayout(layout)
def create_addUserLayout(self):
label= QLabel()
label.setText('Spotify Account Url:')
self.addUserLayout.addWidget(label)
self.Url= QLineEdit()
self.Url.textChanged.connect(lambda event : self.checkUser())
self.addUserLayout.addWidget(self.Url)
label1= QLabel()
label1.setText('Username:')
self.addUserLayout.addWidget(label1)
self.Username= QLabel()
self.addUserLayout.addWidget(self.Username)
self.warning= QLabel()
self.warning.setStyleSheet('color: red')
self.addUserLayout.addWidget(self.warning)
def checkUser(self): # creates worker to check if if is viable need to change this so if no last user it works lol
self.Url.text()
# seems like workers arent being deleted
self.create= Worker(caller= 'check')
self.create.moveToThread(self.createThread)
self.createThread.started.connect(self.create.run)
self.create.finished.connect(self.createThread.quit)
self.create.finished.connect(self.create.deleteLater)
self.create.warning.connect(self.updateWarning)
self.create.searchResults.connect(self.updateUsername) # username has been found
# self.create.progress.connect(self.changeCreateConnection) # when progress is changed(auth conf) mainlab then changes username
self.createThread.start()
def updateWarning(self,string): # changes the warning label on the change user page if warning emitted means bad username
self.warning.setText(string)
self.Username.setText('Your Username will appear here')
self.changeConnection(self.createButton.clicked, lambda event : self.checkUser())
def updateUsername(self,newUserInfo): # updates username variable; when this func is called it means username is found so it changes state of button to allow progress
self.newUsername= newUserInfo[1]
self.warning.setText('')
self.newId= newUserInfo[0]
self.Username.setText(self.newUsername)
self.changeConnection(self.createButton.clicked, lambda event : self.getVerification()) # button changes to allow progressaw
def getVerification(self): # uses self.newId as user can still change the text box
self.setAnweredState()
self.deleteLayoutItems(self.addUserLayout)
label= QLabel()
print('align these pleaseeeeee')
label.setText('Redirect Url:')
self.addUserLayout.addWidget(label)
self.redirect= QLineEdit()
self.addUserLayout.addWidget(self.redirect)
self.getAuthor= QThread()
self.getFirstSp= checkAuth()
self.getFirstSp.moveToThread(self.getAuthor)
self.getAuthor.started.connect(self.getFirstSp.run)
self.getFirstSp.finished.connect(self.getAuthor.quit)
self.getFirstSp.finished.connect(self.getAuthor.deleteLater)
self.getFirstSp.finished.connect(self.getFirstSp.deleteLater)
self.getFirstSp.sp.connect(lambda sp : self.confAuth(sp)) # sp is given if None it has failed so need to retry
self.getAuthor.start()
self.changeConnection(self.createButton.clicked, lambda event, state= True : self.setAnweredState(state)) # button changes to allow progress
# if auth worked
# self.addConfUser()
def setAnweredState(self, state= False):
self.answered= state
def confAuth(self, sp): # if auth worked/ didnt
if sp == None: self.updateAddUser() # go back
else: # set upd saved ids playlists
self.deleteLayoutItems(self.addUserLayout)
scroll, scrollContent, scrollLayout= self.scrollBox()
scroll.setWidget(scrollContent)
self.addUserLayout.addWidget(scroll)
self.playlistsToAdd= []
for playlistInfo in spotify.find_userPlaylists(sp, self.newId): #returns [ [id,name] ,..]
background= QWidget()
hLayout= QHBoxLayout()
print('if buttons align wrong change here')
hLayout.setAlignment(Qt.AlignLeft)
button1= QPushButton('Y')
button1.clicked.connect(lambda event, state= True, playlistInfo= playlistInfo, background= background : self.setPlaylistState(state, playlistInfo, background))
hLayout.addWidget(button1)
button2= QPushButton('N')
button2.clicked.connect(lambda event, state= False, playlistInfo= playlistInfo, background= background : self.setPlaylistState(state, playlistInfo, background))
hLayout.addWidget(button2)
label= QLabel()
label.setText(playlistInfo[1])
hLayout.addWidget(label)
background.setLayout(hLayout)
scrollLayout.addWidget(background)
self.changeConnection(self.createButton.clicked, self.addConfUser) # creates user saved playlist ids then goes home if only user sets user to made one
def setPlaylistState(self, state, playlistInfo, background):
if state:
if playlistInfo not in self.playlistsToAdd:
self.playlistsToAdd.append(playlistInfo)
background.setStyleSheet('color: green')
else:
if playlistInfo in self.playlistsToAdd:
self.playlistsToAdd.remove(playlistInfo)
background.setStyleSheet('color: red')
def addConfUser(self): # if create on add user pasge is pressed a user with gathered id and user name is created
self.create= Worker(caller= 'create')
self.create.moveToThread(self.createThread)
self.createThread.started.connect(self.create.run)
self.create.finished.connect(self.createThread.quit)
self.create.finished.connect(self.create.deleteLater)
self.create.finished.connect(self.createThread.deleteLater)
self.create.finished.connect(self.showHome)
self.createThread.start()
def updateAddUser(self): # resets add user page to before user id has been checked or just sets it up
self.deleteLayoutItems(self.addUserLayout)
self.create_addUserLayout()
# self.Url.setText('')
# useful code
def homeButton(self): # creates home button widget
button1= QPushButton("Home")
button1.clicked.connect(self.showHome)
return button1
def changeConnection(self, signal, newConnection): # changes connection of signal event eg button.clicked
signal.disconnect()
signal.connect(newConnection)
def scrollBox(self): # creates scroll widget
scroll= QScrollArea()
scroll.setWidgetResizable(True)
scrollContent = QWidget(scroll)
scrollLayout = QVBoxLayout(scrollContent)
scrollLayout.setAlignment(Qt.AlignTop)
return scroll, scrollContent, scrollLayout
def addSong(self, trackId, layout): # adds hlayout (song name , artist, playlists) to layout
song= songData[trackId]
songName= QLabel(song[1])
songName.setFixedWidth(70)
layout.addWidget(songName)
songArtists= QLabel(', '.join(song[2]))
songArtists.setFixedWidth(70)
layout.addWidget(songArtists)
songPlaylists= QLabel(', '.join([ids_playlists[playlist[0]] for playlist in song[3]]))
layout.addWidget(songPlaylists)
def changeScrollContent(self, trackIds, func, scrollLayout, connectionFunction): # refreshes provided scrollLayout and adds all songs in provided list must give function(object) with 2 states(bool) for yes/no buttons
self.deleteLayoutItems(scrollLayout)
for trackId in trackIds:
hScrollLayout= QHBoxLayout()
hButtonsLayout= QHBoxLayout()
hButtonsLayout.setSpacing(0)
hButtonsLayout.setContentsMargins(0,0,0,0) # trying to get the buttons closer together
button1= QPushButton('Y')
button2= QPushButton('N')
button1.clicked.connect(lambda event, Id= trackId, state= True, func= func, layout= hScrollLayout : connectionFunction(Id,state,func,layout))
button2.clicked.connect(lambda event, Id= trackId, state= False, func= func, layout= hScrollLayout : connectionFunction(Id,state,func,layout))
button1.setFixedWidth(30)
button1.setContentsMargins(0,0,0,0)
hButtonsLayout.addWidget(button1)
button2.setFixedWidth(30)
button2.setContentsMargins(0,0,0,0)
hButtonsLayout.addWidget(button2)
hScrollLayout.addLayout(hButtonsLayout)
self.addSong(trackId,hScrollLayout)
scrollLayout.addLayout(hScrollLayout)
def deleteLayoutItems(self, layout): # deletes items in layout but it might only forget them lol
if layout is not None:
while layout.count():
item = layout.takeAt(0)
widget = item.widget()
if widget is not None:
widget.setParent(None)
else:
self.deleteLayoutItems(item.layout())
def missingUserConf(self, trackId, state, func, layout): # on button press it hides song from missing scroll(if needed) and changes deleted state
hide= False
if func != 0:
if func == 1 and not state: hide= True
elif func == 2 and state: hide= True
elif func == 3: hide= True
if hide:
self.deleteLayoutItems(layout) ## remove from view if not showing all what about showing
layout.deleteLater()
data.setDeletedState(trackId,state)
def duplicateUserConf(self,trackId,state,func,layout): # on button press it hides song from duplicate scroll(if needed) and adds/removes from allowed duplicates
hide= False
if func != 0:
if func == 1 and state: hide= True
elif func == 2 and not state: hide= True
if hide: ## this could be turned into a func
self.deleteLayoutItems(layout) ## remove from view if not showing all what about showing
layout.deleteLater()
if state: ## add to allowed duplicates
data.add_allowedDup(trackId, [playlistData[0] for playlistData in songData[trackId][3]])
else: ## remove from allowed duplicates
data.rem_fromAllowedDup(trackId)
## button commands
def showGraph(self):
if self.graph.isVisible(): self.graph.hide()
else: self.graph.show()
def waitHome(self):
from time import sleep
sleep(1)
self.showHome()
def showHome(self): # the go funcs could be changed into func with passed variable for index and list of names with same index
self.currentUserLabel.setText("Current User: %s" % username)
self.setWindowTitle("Home")
self.Stack.setCurrentIndex(0)
self.resize(150, 150)
def showChangeUser(self):
self.updateChangeUser()
self.setWindowTitle("Change User")
self.Stack.setCurrentIndex(1)
def update_mainLabel(self,elem): # changes label on main page
self.mainLabel.setText(elem)
def run(self):
self.setWindowTitle("Sponitor")
self.Stack.setCurrentIndex(2)
self.update_mainLabel('Starting')
self.update_progress(0)
self.thread = QThread()
self.worker = Worker(caller= 'main')
self.worker.moveToThread(self.thread)
self.thread.started.connect(self.worker.run)
self.worker.progress.connect(self.update_progress)
self.worker.mainLab.connect(self.update_mainLabel)
self.worker.finished.connect(self.thread.quit)
self.worker.finished.connect(self.worker.deleteLater)
self.thread.finished.connect(self.thread.deleteLater)
self.thread.finished.connect(self.waitHome)
self.thread.finished.connect(lambda event=None : self.graph.loadGraph())
self.thread.finished.connect(lambda event=None : self.updateMD())
self.thread.start()
def showMissingPage(self):
self.updateMD()
self.showUnConfMissing()
self.Stack.setCurrentIndex(3)
self.resize(430,300)
def showDuplicatePage(self):
self.updateMD()
self.showIllegalDuplicate()
self.Stack.setCurrentIndex(4)
self.resize(430,300)
def showFollowArtists(self):
self.updateFollowArtists()
self.setWindowTitle("Follow Artists")
self.Stack.setCurrentIndex(5)
def showSearchPage(self):
self.clearSearch()
self.setWindowTitle("Search")
self.Stack.setCurrentIndex(6)
def showLogPage(self):
self.updateLog()
self.setWindowTitle("Log")
self.Stack.setCurrentIndex(7)
def showAddUser(self):
self.updateAddUser()
self.setWindowTitle("Create User")
self.Stack.setCurrentIndex(8)
def update_progress(self, progress): # updates progress bar on main page
self.progress.setValue(progress)
def updateMD(self): # refreshes missing and duplicate scrollareas
self.showUnConfMissing()
self.showIllegalDuplicate()
print('Updated Missing, Duplicates')
def update_followLabel(self, text): ## could shorten this and update prgo with a lambda func that ypu give the self var to
self.followLabel.setText(text) # changes label on follow artists page
def update_followProgress(self, pos): # changes progress bar on folllow artists page
self.followProgress.setValue(pos)
class checkAuth(QObject):
finished = pyqtSignal()
sp = pyqtSignal(object)
def __init__(self):
super(checkAuth, self).__init__()
def run(self):
print('check Auth')
sp= spotify.getSp(window.newId,window)
if sp == False:
self.sp.emit(None)
else:
self.sp.emit(sp)
self.finished.emit()
class Worker(QObject):
finished = pyqtSignal()
progress = pyqtSignal(int)
mainLab= pyqtSignal(str)
warning= pyqtSignal(str)
searchResults= pyqtSignal(list)
def __init__(self, caller= '', playlistId= ''):
super(Worker, self).__init__()
self.caller= caller
self.playlistId= playlistId
def run(self):
# Here we pass the update_progress (uncalled!)
# function to the long_running_function:
if self.caller == 'main':
spotify.updateSongs(self.update_label, self.update_progress)
elif self.caller == 'follow':
spotify.followArtistsInPlaylists(self.update_label, self.update_progress, self.playlistId)
elif self.caller == 'search':
self.searchResults.emit(spotify.search(window.searchBar.text()))
elif self.caller == 'check':
data.check_new_user(window.Url.text(), self.update_warning, self.update_label,self.update_results)
elif self.caller == 'create':
data.create_new_user(window.newId, window.newUsername, window.playlistsToAdd)
self.finished.emit()
def update_results(self,results):
self.searchResults.emit(results)
def update_warning(self, string):
self.warning.emit(string)
def update_progress(self, percent):
self.progress.emit(percent)
def update_label(self, string):
self.mainLab.emit(string)
## spotify monitor
class data():
# def create_saved_ids_playlists(saved_ids_playlists):# creates/ updates saved ids_playlists(playlists that get saved)
# with open(join(my_id, 'saved_ids_playlists.txt'),'w+',encoding='utf-8') as file: # replace with if loc not exists create_file
# first= True
# for i in list(saved_ids_playlists.keys()):
# to_write= i+'##'+ saved_ids_playlists[i]
# if not first:
# to_write= '\n'+to_write
# file.write(to_write)
# first=False
def checkFile(loc):
if not exists(loc):
data.createFile(loc)
def create_saved_ids_playlists(Id,playlistInfo):
toAdd= []
for playlist in playlistInfo:
toAdd.append("##".join(playlist))
toAdd= '\n'.join(toAdd)
loc= join(Id, 'saved_ids_playlists.txt')
with open(loc, 'w+', encoding= 'UTF-8') as file:
file.write(toAdd)
def get_saved_ids_playlists(): # returns dict of id:playlists that need to be saved
global ids_playlists
ids_playlists={}
loc= join(my_id, 'saved_ids_playlists.txt')
if not exists(loc):
data.add_log(loc+' does not exist for '+ username)
with open(loc,'r',encoding='utf-8') as file:
for i in file.readlines():
i= i.replace('\n','')
i= i.split('##')
ids_playlists[i[0]]= i[1]
if len(ids_playlists) == 0:
print('create_ids_playlists(code meee)')
def createFile(file_loc, string= ''):
with open(file_loc,'w+',encoding='utf-8') as last:
if string != '':
last.write(string)
print("Created %s." % file_loc)
def get_id_user():# returns dict{id:user}(str)
global id_user
id_user={}
idUser_loc= 'id_user.txt'
data.checkFile(idUser_loc)
with open(idUser_loc,'r',encoding='utf-8') as ids:
for line in ids.readlines():
temp= line.split('##')
id_user[temp[0]]= temp[1].replace('\n','')
def get_log():
loc= join(my_id, username+ '_log.txt')
data.checkFile(loc)
with open(loc, 'r', encoding= 'UTF-8') as file:
log= file.read()
if log == '':
log= 'No log entries'
return log
def add_log(string):
loc= join(my_id, username+ '_log.txt')
data.checkFile(loc)
with open(loc, 'a', encoding= 'UTF-8') as file:
file.write('\n'+ string)
def clear_log():
loc= join(my_id, username+ '_log.txt')
with open(loc, 'w+', encoding= 'UTF-8') as file:
print('Cleared log')
def check_new_user(Id, update_warning, update_label, update_results): # adds id and username to file returns user id
if 'user/' in Id: Id= Id.split('user/')[1][:25]
tempUsername= spotify.verifyUsername(Id)
if tempUsername == False:
spotify.update_ui(text= 'Cannot fetch username', update_label= update_warning)
return
else:
spotify.update_ui(text= tempUsername, update_label= update_label)
update_results([Id,tempUsername])
def create_new_user(Id,temp_username, playlistInfo):
data.get_id_user()
length= len(id_user)
mkdir(Id)
with open('id_user.txt','a+',encoding='utf-8') as ids:
to_write= Id+ '##'+ temp_username
if length > 0: to_write= '\n'+ to_write
ids.write(to_write)
data.get_id_user()
data.create_saved_ids_playlists(Id,playlistInfo)
data.add_log('Created user %s - %s' % (temp_username, Id))
## update with gui
def remove_user(): # removes user from id_user and deletes their files
print('Remove user')
user_id= data.select_user()
if user_id == my_id:
print('this would result in no current user')
#id last user user to be removed then change it (select new user)
# what if removing all users? return to home (only oprion is create new usedr
# homepage()
username_to_delete= data.user(user_id)
password= input('Input password to confirm deletion of %s\n' % username_to_delete)
if password == 'delete':
if exists(username_to_delete):
rmtree(username_to_delete) # cant remove folders with nhabitants
else:
print("Folder already deleted?")
with open('id_user.txt','r',encoding='utf-8') as file:
temp= file.read()
temp= temp.replace(my_id+'##'+username_to_delete+'\n','') # either or
temp= temp.replace('\n'+my_id+'##'+username_to_delete,'')
remove('id_user.txt')
with open('id_user.txt','w+',encoding='utf-8') as file:
file.write(temp)
# remove from id_user
else:print('Incorrect password')
## update with gui
def select_user(): # returns selected id but does not change last user
data.get_id_user()
for i,item in enumerate(list(id_user.keys())):
print(str(i+1)+') '+ id_user[item] )
while True:
temp= input('Select user(num): ')
try:
temp= int(temp)
break
except:print('Invalid input')
selected_id= list(id_user.keys())[temp-1]
print('User selected:', id_user[selected_id])
return selected_id
def update_last_user_id(my_id): # updates user id in file
with open('last_id.txt','w+',encoding='utf-8') as last:
last.write(my_id)
def get_last_user_id():# returns last user to load in with
last_idLoc= 'last_id.txt'
data.checkFile(last_idLoc)
with open(last_idLoc,'r',encoding='utf-8') as last:
return last.read()
def changeActiveUser(Id):
print(Id)
global ids_playlists,my_id,username
my_id= Id
data.update_last_user_id(my_id)
username= id_user[my_id]
data.get_saved_ids_playlists()
data.load_songData()
print('Active user changed to', username)
def save_songData():
columns= ['Track Id','Date First Added','Name','Artists','Current Playlists/Date Addded','Missing','Deleted']
with open(join(my_id,username+'_songData.csv'), 'w', newline='', encoding= 'UTF-8') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=columns)
writer.writeheader()
for trackId in songData:
song= songData[trackId]
artists=seperator.join(song[2])
playlists_dates=[]
for playlist_date in song[3]:
playlists_dates.append(seperator.join(playlist_date))
playlists_dates= seperator2.join(playlists_dates)
row= dict(zip(columns, [trackId,song[0],song[1], artists, playlists_dates,song[4],song[5]]))
writer.writerow(row)
print('Saved songData')
def load_songData():
global songData
songData= {}
loc= join(my_id,username+'_songData.csv')
data.checkFile(loc)
with open(loc, 'r', newline='', encoding= 'UTF-8') as csvfile:
morp= csv.reader(csvfile)
for pos,row in enumerate(morp):
if pos != 0:
artists= row[3].split(seperator)
playlists_dates= []
for elem in row[4].split(seperator2):
playlists_dates.append(elem.split(seperator))
songData[row[0]]= [row[1], row[2], artists, playlists_dates, row[-2], row[-1]]
print('Loaded songData')
#new
def r_nestedElem(pos,nestedList): # returns indexed val of nested list
temp=[]
for eggList in nestedList:
temp.append(eggList[pos])
return temp
def get_allowedDup():
global allowedDup
path= join(my_id,username+'_allowedDuplicates.txt')
data.checkFile(path)
with open(path, 'r', encoding= 'UTF-8') as file:
temp= file.readlines()
allowedDup= {}
if temp != ['']:
for i in temp:
i= i.replace('\n','')
i= i.split(seperator)
allowedDup[i[0]]= i[1:]
print('Loaded allowed duplicates')
def save_allowedDup():
path= join(my_id,username+'_allowedDuplicates.txt')
temp= '\n'.join([i+ seperator+ seperator.join(allowedDup[i]) for i in allowedDup])
with open(path, 'w+', encoding= 'UTF-8') as file:
file.write(temp)
def add_allowedDup(trackId, playlists):
if type(playlists) != list: playlists= [playlists]
if trackId in allowedDup:
allowedDup[trackId].extend(playlists) ## adds new playlists to end of allowed playlist list
else:
allowedDup[trackId]= playlists
data.save_allowedDup()
# return allowedDup
def rem_fromAllowedDup(trackId): ## removes track from allowed duplicates file
allowedDup.pop(trackId)
data.save_allowedDup()
# return allowedDup
def remAllowedDuplicates(trackIds= {}): #removes duplicates that are not allowed returns list of ids
for trackId in allowedDup:
if trackId in trackIds: # if it is an allowed duplicate
allowedPlaylistIds= allowedDup[trackId]
rem= True
for playlistId in trackIds[trackId]:
if playlistId not in allowedPlaylistIds:
rem= False
if rem: # if allowed to be duplicate remove
del trackIds[trackId]
else:# it has been added to another playlist so user has to re authenticate it as allowed
data.rem_fromAllowedDup(trackId)
return list(trackIds.keys())
def duplicates():# returns all duplicates except songs that have been user deleted
duplicates= {}
for trackId in songData:
song= songData[trackId]
if len(song[3]) > 1 and not song[5] == 'true': ## if duplicate(in multiple playlists) and not deleted
duplicates[trackId]= [playlistData[0] for playlistData in song[3]]
return duplicates
# also ignore missing and deleted duplicates?
for trackId in songData:
song= songData[trackId]
if len(song[3]) > 1 and not song[4] == 'true' or song[5] == 'true':# if song duplicated and not missing or deleted then count it
if trackId in allowedDup:
allowed+= len(allowedDup[trackId])-1 #allowed to be duplicated - 'original'
for i in song[3]:
if i[0] not in allowedDup[trackId]:
if input('Allowed duplicate %s is also in %s allow?:' % (song[1], ids_playlists[i[0]])) == 'y':
allowedDup= data.add_allowedDup(trackId,i[0], allowedDup)
allowed+=1
else:
print(song[1],','.join(song[2]),','.join([ids_playlists[playlist[0]] for playlist in song[3]]))
if input('add to allowed duplicate list? ') == 'y':
playlists= [playlist[0] for playlist in song[3]]
allowedDup= data.add_allowedDup(trackId,playlists, allowedDup)
allowed+= len(song[3])
total+=len(song[3])-1
return total,allowed
# 'track_id':['date first added', 'name', ['artist'], (current playlist/s)[['current playlist','date added'], ...], 'missing', 'deleted]
#new
#add gui
def setDeletedState(trackId, state):
if state:
changeState= 'true' # delete tag updated
else:
changeState= 'false' # delete tag updated
song= songData[trackId]
if changeState != song[5]: #only updates songData if state changed
song[5]= changeState
data.add_log('%s delteted state set to %s' % (song[1], changeState))
songData[trackId]= song
data.save_songData()
def missing():# returns list of missing trackIds
missingList= []
for trackId in songData:
if songData[trackId][4] == 'true': # if missing
missingList.append(trackId)
return missingList
def remConf(trackIds):
toRem=[]
for trackId in trackIds:
if songData[trackId][5] in ['true','false']: # if it has been user confirmed remove it
toRem.append(trackId)
for trackId in toRem:
trackIds.remove(trackId)
return trackIds
def remDel(trackIds):
toRem=[]
for trackId in trackIds:
if songData[trackId][5] =='true': # if it has been user confirmed remove it
toRem.append(trackId)
for trackId in toRem:
trackIds.remove(trackId)
return trackIds
def deleted(missingList): # returns deleted(user confirmed) songs from list of misssing trackId
deletedList= []
for trackId in missingList:
if songData[trackId][5] == 'true':
deletedList.append(trackId)
return deletedList
def totalSongs():
missingList= data.missing()
deletedList= data.deleted(missingList)
delSongs= len(deletedList)
missSongs= len(missingList)- delSongs
duplicates= data.duplicates() #dictionary
dupSongs= len(duplicates)
total= len(songData)-delSongs
return 'Total songs: %s Duplicate songs: %s Missing songs: %s' % (total,dupSongs, missSongs)
class spotify():
# new and not sure if working
def getAuth(Id,window=None):
print('getting Auth', window)
as_dict= True
cid, secret= spotify.get_keys()
scope = ['user-library-read', 'playlist-read-private', 'playlist-read-collaborative', 'user-follow-read', 'user-follow-modify']
# sp= getAuth(cid, secret, scope, Id).sp
handler= CacheFileHandler(username= Id)
auth= SpotifyOAuth(scope=scope,client_id=cid, client_secret=secret,redirect_uri= 'https://i.dailymail.co.uk/i/pix/2012/06/04/article-2154283-136ED7F3000005DC-412_634x412.jpg', cache_handler=handler, show_dialog=True)#, username= my_id
def getCode(window):
print('get code', window)
auth._open_auth_url()
if window == None: # if no window open
redirect= input('Redirect Url: ')
else:
while window.answered == False:
pass
redirect= window.redirect.text()
state, code= auth.parse_auth_response_url(redirect)
return code
token_info = auth.validate_token(auth.cache_handler.get_cached_token())
if token_info is not None:
if auth.is_token_expired(token_info):
token_info = auth.refresh_access_token(
token_info["refresh_token"]
)
auth._save_token_info(token_info if as_dict else token_info["access_token"])
return auth
payload = {
"redirect_uri": auth.redirect_uri,
"code": getCode(window),
"grant_type": "authorization_code",
}
if auth.scope:
payload["scope"] = auth.scope
if auth.state:
payload["state"] = auth.state
headers = auth._make_authorization_headers()
response = auth._session.post( # token info needed
auth.OAUTH_TOKEN_URL,
data=payload,
headers=headers,
verify=True,
proxies=auth.proxies,
timeout=auth.requests_timeout,
)
token_info = response.json()
token_info = auth._add_custom_values_to_token_info(token_info)
auth.cache_handler.save_token_to_cache(token_info)
auth._save_token_info(token_info if as_dict else token_info["access_token"])
return auth
#new
def getSp(Id, window= None):
print('getting Sp')
try:
# auth= SpotifyOAuth(scope=scope,client_id=cid, client_secret=secret,redirect_uri= 'https://i.dailymail.co.uk/i/pix/2012/06/04/article-2154283-136ED7F3000005DC-412_634x412.jpg', username= Id, show_dialog=True)#, username= my_id
# sp = spotipy.Spotify(client_credentials_manager=auth)
sp = spotipy.Spotify(client_credentials_manager=spotify.getAuth(Id, window))
test= sp.current_user_playlists(limit=1)
print('got authentication')
except:
data.add_log('Authentication failed for %s' % username)
return False
return sp
def verifyUsername(Id):
cid, secret= spotify.get_keys()
auth= SpotifyClientCredentials(client_id= cid, client_secret= secret)
tempSp = spotipy.Spotify(client_credentials_manager= auth)
try:
newUsername= tempSp.user(Id)['display_name']
return newUsername
except:
return False
def find_userPlaylists(sp,Id): # generates all user playlists user to create ids_playlists
playlistInfo= [[playlist['owner']['id'],playlist['uri'], playlist['name']] for playlist in sp.current_user_playlists(limit=50)['items']]
toReturn= []
for playlist in playlistInfo:
if playlist[0]== Id: # if id owner is the playlist owner
toReturn.append(playlist[1:])
return toReturn
def update_ui(text= None, percent= None, update_label= None, update_progress= None):
if text != None:
print('text:',text)
if update_label != None:
update_label(string= text)
if percent != None and update_progress != None:
update_progress(percent= percent)
#new
#add gui
## update with gui ( parse self then call gui.setMainLabel(self,string)
def updateSongs(update_label= None, update_progress= None): # does not get active user songs only jamies because of spotipy things
global songData
state= 'Auto' if __name__ == 'Main' else 'Manual'
data.add_log('\n%s: (%s) Updating songs for %s:' % (state , datetime.now().strftime("%d-%m-%Y %H:%M:%S"), username) )
sp= spotify.getSp(my_id)
playlistIds= [playlist['uri'] for playlist in sp.current_user_playlists(limit=50)['items']] # if you have more than 50 playlists fuck you
# songData= [['spotify:track:2dje3ZBu1j1r0QfR7mtS0l', 'spotify:playlist:1JTU5zqtgA1zzqb90papUO', '2021-08-16'], ['spotify:track:5H3swhQ72PiGd5PYz4P61P', 'spotify:playlist:1JTU5zqtgA1zzqb90papUO', '2021-08-16']]
loadedSongs=[]# [[id, [ [playlist,date added] ]],...next]
playlistsForDown= list(ids_playlists.keys())
num=0
for playlist_id in playlistIds:
if playlist_id in playlistsForDown:
spotify.update_ui(text= 'Loading %s...' % ids_playlists[playlist_id], update_label= update_label)
start= 0
while True:# the limit is 100 songs so it must be iterated to get all songs
total=0
for items in sp.playlist_tracks(playlist_id, offset=start)["items"]:
artists=[]
for artist in items['track']['artists']:
artists.append(artist['name'])
loadedSongs.append([items['track']['uri'],[[playlist_id, items['added_at'][:-10]]],items['track']['name'],artists])
total+=1
start+=100 # if playlist is exactly a mutiple of 100 this still works
if total != 100:
break
num+=1
spotify.update_ui(percent= round((num/len(playlistsForDown))*100), update_progress= update_progress)
if loadedSongs == []:
spotify.update_ui(text= 'No songs found', update_label= update_label)
else:
spotify.update_ui(text= 'Begin compilation...', update_label= update_label)
loaded_songData={}
total= len(loadedSongs)
pos=0
while loadedSongs != []:
song= loadedSongs.pop(0)
# song= loadedSongs.pop(0) # removes first song and sets song equal to it
# song= [track_id,[ [current_playlist,dateAdded] ],name,[artists]]
trackId= song[0]
while True:
all_trackIds= data.r_nestedElem(0,loadedSongs) # run everytime to update (0 refers to id)
if trackId in all_trackIds:# if duplicate exists
temp= loadedSongs.pop(all_trackIds.index(trackId)) # removes duplictate song and sets temp equal to it
# combine duplicated song data
song[1].append(temp[1][0])# song[1]= [[current_playlist_a,dateAdded_a],[current_playlist_b,dateAdded_b]]
song[1]= sorted(song[1], key= lambda playDate: datetime.strptime(playDate[1],'%Y-%m-%d').timestamp()) # sorts list of current playlists by date added
else:break
loaded_songData[trackId]= song[1:] # [ [ [cur play,date] ],name,artist]
pos+=1
# print('%s/%s' % (pos, total), end= '\r')
spotify.update_ui(percent= round((pos/total)*100), update_progress= update_progress)
# loaded_songData should be { id: [ [curPlaylist,dateAdded] ]],id: [ [curPlaylistA,dateAddedA],[curPlaylistB,dateAddedB] ] }
# when value in loaded_songData has more than one elem it is duplicated
#songData format
# 'track_id':['date first added', 'name', ['artist'], (current playlist/s)[['current playlist','date added'], ...], 'missing', 'deleted]
data.load_songData()
# if update_ui != None: update_ui(percent=50)
# for saved tracks
text= 'total songs: %s' % total
for trackId in songData:
song= songData[trackId]
if trackId in loaded_songData:
song[4]= 'false' # set missing value
song[5]= 'notConf' # set deleted value to not Confirmed so if missing user has to set deleted to either true or false
# loaded song= [ [curPlaylist,dateAdded],name ,[aritists,..] ]
loadedSong= loaded_songData[trackId]
if song[3] != loadedSong[0]:# if current playlists have changed update songData
tempSong= loadedSong[0]
for playlist in song[3]:
# playlist= [playlist,date added]
if playlist not in loadedSong[0]:
temp= '%s removed from %s' % (song[1], ids_playlists[playlist[0]])
data.add_log(temp)
text+=temp
print(temp) ## throwing key error if duplicate in same playlist removed?
else:
tempSong.remove(playlist) # remove playlists that are present in both leaving only new playlists
if tempSong != []: # if new playlist ^^ added
temp= '%s added to %s'% (song[1], ids_playlists[tempSong[0][0]])
data.add_log(temp)
text+=temp
print(temp) ## throwing key error if duplicate in same playlist removed?
song[3]= loadedSong[0] # current playlists updated
if song[1] != loadedSong[1] or song[2] != loadedSong[2]:# if name or artist changed then update
temp= 'Name or artists changed from\n%s %s to %s %s' %(song[1], ','.join(song[2]), loadedSong[1], ','.join(loadedSong[2]))
data.add_log(temp)
print(temp)
if input('Confirm rename? y/n(add to gui somehow)') == 'y':
song[1]= loadedSong[1]
song[2]= loadedSong[2]
# remove song from loaded_songData to leave only new songs
del loaded_songData[trackId]
else:
# song is missing/deleted
if song[4] == 'false': # first time recorded as missing
data.add_log('%s - %s is missing' % (song[1], ','.join(song[2])))
song[4]= 'true' # missing tag updated
songData[trackId]= song # songData updated with new values
spotify.update_ui(text= text, update_label= update_label)
# if update_ui != None: update_ui(percent=75)
# new songs
# only new songs left in loaded data
if loaded_songData != {}: # if new songs exist
numNew= len(loaded_songData)
temp= '\nAdding %s new song(s)' % numNew
data.add_log(temp)
print(temp)
for pos,newTrackId in enumerate(loaded_songData):
print('%s/%s' % (pos, numNew), end= '\r')
song= loaded_songData[newTrackId]# [ [ [cur playlist, date added ], []... ], name, [artists]]
playlist_date= song[0]
dateFirstAdded= playlist_date[0][1] # first date recorded as loaded song data is sorted
# name, artist= spotify.get_nameArtist(sp, newTrackId) # if track worked i would have used this but i have to add names from search through playlist now :(
name= song[1]
artist= song[2]
songData[newTrackId]= [dateFirstAdded,name,artist,playlist_date,'false','false'] # not missing or deleted # could be added to multiple new playlists?
temp= '%s, %s added to %s' % (name, artist[0], ids_playlists[playlist_date[0][0]])
data.add_log(temp)
print(temp)
data.save_songData()
data.totalSongs()
spotify.update_ui(text= 'Done', update_label= update_label)
## update with gui
def get_keys(): # returns client id, client secret
accessLoc= 'spotify access.txt'
if not exists(accessLoc):
cid=input('File %s does not exist\nInput client id: ' % accessLoc)
secret= input('Input client secret: ')
data.createFile(accessLoc, string= cid+'\n'+secret)
else:
with open(accessLoc,'r',encoding= 'utf-8') as keys:
keys= keys.readlines()
cid= keys[0].replace('\n','')
secret= keys[1]
return cid , secret
## update with gui
# def user_playlists(sp,saved_ids_playlists={}):
# # creates dict of found(within saved ids) user made playlists (id; name) for downloading
# # DO NOT PARSE SAVED PLAY IDS IF FIRST TIME SETUP
# ids_playlists={}
# results = sp.current_user_playlists(limit=50)# if you have more than 50 playlists i dont like you :)
# pos=0
# for i in results['items']:
# if i['owner']['id'] == my_id:
# ids_playlists[results['items'][pos]['uri']]= results['items'][pos]['name']
# if saved_ids_playlists != {}: # remove the ones not needed useful option for first set up to find all playlists if needed
# for play_id in list(ids_playlists.keys()):
# if play_id not in list(saved_ids_playlists.keys()): del ids_playlists[play_id]
# pos+=1
# if saved_ids_playlists == {}:
# print('Found %s user playlists:' % len(ids_playlists))
# for i,item in enumerate(ids_playlists.keys()):
# print(i+1,ids_playlists[item]+ ' ---> '+ item) #newest name used (but saved with oldest name) incase user changes playlist id
# del_list= []
# for item in ids_playlists.keys():
# if input('save %s?[y]' % ids_playlists[item]) != 'y':
# del_list.append(item)
# print('deleted')
# for item in del_list:
# del ids_playlists[item]
# else:
# print('Found %s user playlists for download:\n' % (str(len(ids_playlists))+'/'+ str(len(saved_ids_playlists))))
# for i in ids_playlists.keys():
# print(ids_playlists[i]) #newest name used (but saved with oldest name) incase user changes playlist idi actually resaved with new name
# print()
# print('Loading...',end='\r')
# return ids_playlists
## major change needed ? move to data
def update_saved_ids_playlists(saved_ids_playlists,update_dict): # replaces old playlist names with new ones
for i in list(update_dict.keys()):
saved_ids_playlists[i]= update_dict[i]
return saved_ids_playlists
## gui
def search(searchString):
searchString= searchString.lower()
results= []
for pos, data in enumerate(songData.values()):
if data[5] != 'true': # if not deleted
if searchString in data[1].lower(): # artist name
results.append(pos)
for artistName in data[2]:
if searchString in artistName.lower():
if pos not in results: results.append(pos) # could have already been adde
# Ids= list(songData.keys())
# for pos in results:
# song= songData[Ids[pos]]
# name= song[1]
# artist= song[2][0]
# currentPlaylist= ids_playlists[song[3][0][0]]
# print('%s, %s --- %s' % (name,artist, currentPlaylist))
results= [list(songData.keys())[pos] for pos in results] # turns list of positions into correlated song ids from songData
return results
def followArtistsInPlaylists(update_label, update_progress, playlistId): # follows artists that have more than one song in the playlist
tempArtists= []
toFollow= []
playlistSongs= []
sp= spotify.getSp(my_id)
length= len(songData)
spotify.update_ui(percent= 0, update_progress= update_progress)
spotify.update_ui(text= 'Collecting songs from playlist...', update_label= update_label)
for pos,Id in enumerate(songData):
data= songData[Id]
if data[3][0][0] == playlistId: # current playlist id
playlistSongs.append(Id)
spotify.update_ui(percent= round((pos/length)*100), update_progress= update_progress)
spotify.update_ui(text= 'Converting track ids to artist ids...', update_label= update_label)
spotify.update_ui(percent= 0, update_progress= update_progress)
length= len(playlistSongs)
if length > 50:
pos= 50
while pos <= length+ 49:
tempArtists.extend([ song['artists'][0]['id'] for song in sp.tracks(playlistSongs[pos-50:pos])['tracks']])
spotify.update_ui(percent= round((pos/length)*100), update_progress= update_progress)
pos+=50
else:
tempArtists= [ song['artists'][0]['id'] for song in sp.tracks(playlistSongs)['tracks']]
spotify.update_ui(percent= 100, update_progress= update_progress)
while tempArtists != []:
artistId= tempArtists.pop(0)
if artistId in tempArtists: # if multiple songs by artists exist in playlist
while True:
try:
tempArtists.remove(artistId)
except:
break
toFollow.append(artistId)
following= []
pos= 50
spotify.update_ui(text= 'Finding followed artists...', update_label= update_label)
while pos <= len(toFollow)+ 49:
following.extend(sp.current_user_following_artists(toFollow[pos-50:pos])) # has a limit even though docs do not mention it
pos+=50
total= 0
for i in following:
if not i: total+=1
print(total)
if total == 0:
spotify.update_ui(text= 'No artists to follow', update_label= update_label)
return
# self.sp.user_follow_artists(artists) # can do entire list of artists at once(probs max 50 at a time)
length= len(toFollow)
for pos, artistId in enumerate(toFollow):
if not following[pos]: # if not following artist
name= sp.artist(artistId)['name']
temp= 'Followed %s' % name
data.add_log(temp)
spotify.update_ui(text= temp, update_label= update_label)
spotify.update_ui(percent= round((pos/length)*100), update_progress= update_progress)
sp.user_follow_artists([artistId])
spotify.update_ui(percent= 100, update_progress= update_progress)
spotify.update_ui(text= 'Finished', update_label= update_label)
#on start
print('newly missing songs do not end up in unonfirmed area after running once deleteing then running again also happens when just deleted???')
seperator= "%$%"
seperator2= "$%$"
# if auto run just close if id and stuff is missing and dont run gui
my_id= data.get_last_user_id() # might do weird shit cus i changed this from user_id
if my_id == '': # if no last user means this is first open
if __name__ != '__main__':
data.add_log('!! NO LAST USER PROGRAM ENDED !!')
quit()
else: print('create a user')
data.get_id_user()
data.changeActiveUser(my_id) #updates user to latest
data.get_allowedDup()
if __name__ == '__main__':
# from followArtists import followArtists as fA
# fA('spotify:playlist:33JwDwoh3u3HjKix4i995j' ,songData, spotify.getSp())
# input('this is an input')
#gui
# spotify.updateSongs()
# data.totalSongs()
# while True:
# results= spotify.search(input())
# if results != []:
# for i in results:
# print('%s:%s' % (i,songData[i][1]))
# else: print('no songs')
app = QApplication([])
window = MainWindow()
window.show()
# run should also add search
# spotify.updateSongs()
# then have button
# print(data.totalSongs())
# then have button for accept or not
app.exec()
else:
spotify.updateSongs()
|
BadCodeswJamie/Sponitor
|
sponitor.py
|
sponitor.py
|
py
| 68,332 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24128542933
|
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
import numpy as np
import pickle
with open("/home/ekin/Desktop/workspace/RotatetObjectDetectionReview/test_data/gt_area.pickle", 'rb') as handle:
gt_area = pickle.load(handle)
np.sort(gt_area)
'''
plt.hist(gt_area, bins='auto', edgecolor='black')
plt.xlabel('Value')
plt.ylabel('Frequency')
plt.title('Histogram of Data')
plt.grid(True)
plt.show()
'''
# Reshape the data to have a single feature dimension
data_reshaped = np.array(gt_area).reshape(-1, 1)
# Number of clusters
num_clusters = 6
# Perform K-means clustering
kmeans = KMeans(n_clusters=num_clusters)
kmeans.fit(data_reshaped)
# Get the cluster labels
labels = kmeans.labels_
cluster_centers = kmeans.cluster_centers_
print(np.sort(cluster_centers,axis = 0))
# Plot the scatter plot
plt.scatter(range(len(gt_area)), gt_area, c=labels, cmap='viridis')
plt.xlabel('Data Point')
plt.ylabel('Value')
plt.title('K-means Clustering')
plt.savefig("/home/ekin/Desktop/workspace/RotatetObjectDetectionReview/figures/area.png")
|
ikoc/RotatetObjectDetectionReview
|
src/kMeansOfArea.py
|
kMeansOfArea.py
|
py
| 1,058 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11194307443
|
from typing import Tuple, Optional
import albumentations as A
import cv2
import numpy as np
import torch
import torchvision
from torch.utils.data import Dataset
import os
from PIL import Image
from tqdm import tqdm
import pandas as pd
import pywt
import logging
from utils.image_utils import random_crop_with_transforms, load_image, split_by_wavelets
from utils.tensor_utils import preprocess_image
class WaveletSuperSamplingDataset(Dataset):
def __init__(self, folder_path, window_size: int = 224, dataset_size: int = 1000):
images_names_list = os.listdir(folder_path)
images_names_list.sort()
self.images_paths = [
os.path.join(folder_path, image_name)
for image_name in images_names_list
]
self.window_size = window_size
self.dataset_size = dataset_size
self.images_count = len(self.images_paths)
self.interpolations = [
cv2.INTER_AREA,
cv2.INTER_LANCZOS4,
cv2.INTER_NEAREST,
cv2.INTER_LINEAR,
cv2.INTER_CUBIC,
None
]
def __len__(self):
return self.dataset_size
def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
image_idx = np.random.randint(0, self.images_count)
image = load_image(self.images_paths[image_idx])
if min(image.shape[:2]) < self.window_size:
logging.info('Image {} so small, resizing!'.format(self.images_paths[image_idx]))
image = cv2.resize(image, (self.window_size + 5, self.window_size + 5), interpolation=cv2.INTER_AREA)
crop = random_crop_with_transforms(
image1=image,
window_size=self.window_size
)
selected_inter_method: Optional[int] = self.interpolations[np.random.randint(0, len(self.interpolations))]
# TODO: Add transform which changed OpenCV image to LL wavelet representation
selected_inter_method = None
ycrcb_ll_crop: Optional[np.ndarray] = None
if selected_inter_method is not None:
lr_crop = cv2.resize(
crop,
(self.window_size // 2, self.window_size // 2),
interpolation=selected_inter_method
)
ycrcb_ll_crop = cv2.cvtColor(lr_crop, cv2.COLOR_RGB2YCrCb)
ycrcb_ll_crop = ycrcb_ll_crop.astype(np.float32) / 255.0 * self.window_size * 2
ycrcb_crop = cv2.cvtColor(crop, cv2.COLOR_RGB2YCrCb)
y, cr, cb = cv2.split(ycrcb_crop)
# LL, LH, HL, HH <- C
y_ll, y_lh, y_hl, y_hh = split_by_wavelets(y)
cr_ll, cr_lh, cr_hl, cr_hh = split_by_wavelets(cr)
cb_ll, cb_lh, cb_hl, cb_hh = split_by_wavelets(cb)
if selected_inter_method is None:
ycrcb_ll_crop = cv2.merge((y_ll, cr_ll, cb_ll))
# 9 channels
gt_wavelets = cv2.merge((y_lh, y_hl, y_hh, cr_lh, cr_hl, cr_hh, cb_lh, cb_hl, cb_hh))
return preprocess_image(ycrcb_ll_crop), preprocess_image(gt_wavelets, 0, 1), preprocess_image(ycrcb_crop)
class SuperSamplingDataset(WaveletSuperSamplingDataset):
def __init__(self, folder_path, window_size: int = 224, dataset_size: int = 1000):
super().__init__(folder_path, window_size, dataset_size)
self.interpolations = [
cv2.INTER_AREA,
cv2.INTER_LANCZOS4,
cv2.INTER_NEAREST,
cv2.INTER_LINEAR,
cv2.INTER_CUBIC
]
def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor]:
image_idx = np.random.randint(0, self.images_count)
image = load_image(self.images_paths[image_idx])
if min(image.shape[:2]) < self.window_size:
logging.info('Image {} so small, resizing!'.format(self.images_paths[image_idx]))
image = cv2.resize(image, (self.window_size + 5, self.window_size + 5), interpolation=cv2.INTER_AREA)
crop = random_crop_with_transforms(
image1=image,
window_size=self.window_size
)
selected_inter_method: int = self.interpolations[np.random.randint(0, len(self.interpolations))]
low_res_crop = cv2.resize(
crop,
(self.window_size // 2, self.window_size // 2),
interpolation=selected_inter_method
)
return preprocess_image(low_res_crop, 0, 1), preprocess_image(crop, 0, 1)
|
AlexeySrus/WPNet
|
research_pipelines/supersampling_with_wavelets/dataloader.py
|
dataloader.py
|
py
| 4,411 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30114979232
|
import itertools
import pandas as pd
import math
from pathlib import Path
def composite_SD(means, SDs, ncounts):
'''Calculate combined standard deviation via ANOVA (ANalysis Of VAriance)
See: http://www.burtonsys.com/climate/composite_standard_deviations.html
Inputs are:
means, the array of group means
SDs, the array of group standard deviations
ncounts, the array of number of samples in each group
Result is the overall standard deviation.
'''
num_groups = len(means)
if num_groups != len(SDs) or num_groups != len(ncounts):
raise Exception('inconsistent list lengths')
# calculate total number of samples, N, and grand mean, GM
N = sum(ncounts)
if N == 1:
return SDs[0]
GM = 0.0
for i in range(num_groups):
GM += means[i] * ncounts[i]
GM /= N
# calculate Error Sum of Squares
ESS = 0.0
for i in range(num_groups):
ESS += ((SDs[i]) ** 2) * (ncounts[i] - 1)
# calculate Total Group Sum of Squares
TGSS = 0.0
for i in range(num_groups):
TGSS += ((means[i] - GM) ** 2) * ncounts[i]
# calculate standard deviation as square root of grand variance
result = math.sqrt((ESS + TGSS)/(N - 1))
return result
def create_transunion_csv():
"""
This python script is used to merge all the parquet data files into one
single csv file. TransUnion data needs to be partitioned into 10 different
csv files due to the memory limitation.
"""
num_partition = 10
data_dir = Path("data/transunion/")
num_files = math.ceil(len(list(data_dir.glob("*.parquet"))) / num_partition)
for i in range(num_partition):
df = pd.concat(
pd.read_parquet(parquet_file, engine="pyarrow")
for parquet_file in itertools.islice(data_dir.glob("*.parquet"), i *
num_files, (i + 1) * num_files))
df.to_csv("data/transunion_{}.csv".format(i))
def expand_df(df, columns):
"""
Parameters:
----------
df: pd.series
Each cell holds a 2d array.
colums: list
Column names for the expanded DataFrame.
Return:
-------
A expanded DataFrame.
"""
df = df.explode()
df = df.apply(pd.Series)
df.rename(columns=lambda x: columns[x], inplace=True)
return df
|
superyang713/Synthetic_Data_Generation
|
utils.py
|
utils.py
|
py
| 2,363 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3814572161
|
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
import numpy as np
df = pd.read_csv('mail_data.csv')
# Data Preprocessing
df['Category'] = df['Category'].map({'spam': 0, 'ham': 1})
X = df['Message']
Y = df['Category']
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=3)
# Feature Extraction
feature_extraction = TfidfVectorizer(min_df=1, stop_words='english', lowercase=True)
X_train_features = feature_extraction.fit_transform(X_train)
X_test_features = feature_extraction.transform(X_test)
Y_train = Y_train.astype('int')
Y_test = Y_test.astype('int')
# Model Training
model = LogisticRegression()
model.fit(X_train_features, Y_train)
# Model Evaluation
prediction_on_training_data = model.predict(X_train_features)
accuracy_on_training_data = accuracy_score(Y_train, prediction_on_training_data)
print(f'Accuracy on Training Data: {accuracy_on_training_data}')
prediction_on_test_data = model.predict(X_test_features)
accuracy_on_test = accuracy_score(Y_test, prediction_on_test_data)
print(f'Accuracy on Test Data: {accuracy_on_test}')
# Input Mail Prediction
input_your_mail = ["Congratulations! You won 3000$ Walmart gift card. Go to http://bit.ly/123456 tp claim now."]
input_data_features = feature_extraction.transform(input_your_mail)
prediction = model.predict(input_data_features)
if prediction[0] == 1:
print('Ham')
else:
print('Spam')
print(prediction)
|
bhar1gitr/ML_Spam-Ham_Detector
|
pandassss.py
|
pandassss.py
|
py
| 1,653 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18846943134
|
import logging
from concurrent import futures
from threading import Timer
from functools import partial
import cloud.blockstore.public.sdk.python.protos as protos
from .error_codes import EResult
from .error import ClientError, _handle_errors, client_error_from_response
from .grpc_client import GrpcClient
from .http_client import HttpClient
from .durable import DurableClient
from .base_client import dispatch_nbs_client_methods
from .safe_client import _SafeClient
from .future import unit, bind
DEFAULT_HARD_TIMEOUT = 8*60 # 8 min
DEFAULT_DISCOVERY_LIMIT = 3
class _Executor(object):
def __init__(self, method, balancer, factory, limit, secure, log):
self.__response = futures.Future()
self.__method = method
self.__balancer = balancer
self.__factory = factory
self.__limit = limit
self.__secure = secure
self.__visited = None
self.__log = log
self.__hedged_timer = None
self.__pending = {}
self.__done = False
self.__instances = None
self.__instances_future = None
self.__idx = 0
self.__main_timer = None
self.__hedged_timer = None
def run(self, impl, addr, timeout, soft_timeout, create_timer):
self.__main_timer = create_timer(timeout, self._on_main_timer)
self.__main_timer.start()
if soft_timeout:
self.__hedged_timer = create_timer(soft_timeout, self._on_hedged_timer)
self.__hedged_timer.start()
self.__visited = addr
if impl is not None:
self._shoot(impl, addr)
else:
self._try_shoot()
return self.__response
def _cancel_all(self):
self.__done = True
self.__main_timer.cancel()
if self.__hedged_timer is not None:
self.__hedged_timer.cancel()
for f in self.__pending.values():
f.cancel()
def _set_result(self, result, impl, addr):
if self.__done:
return
self._cancel_all()
self.__response.set_result((impl, addr, result))
def _set_exception(self, e):
if self.__done:
return
self._cancel_all()
self.__response.set_exception(e)
def _on_main_timer(self):
if self.__done:
return
self._set_exception(
ClientError(EResult.E_TIMEOUT.value, "deadline exceeded"))
def _on_hedged_timer(self):
if self.__done:
return
self._try_shoot()
def _shoot(self, impl, addr):
r = self.__method(impl)
self.__pending[(addr.Host, addr.Port)] = r
def cb(f):
self._handle_response(f, impl, addr)
r.add_done_callback(cb)
def _on_discover_instances(self, f):
if self.__done:
return
e = f.exception()
if e is None:
self.__instances = f.result().Instances
self.__log.debug("success discovery: {}".format(
map(lambda x: x.Host + ":" + str(x.Port), self.__instances)))
self._try_shoot()
return
self.__log.error("error on discovery: {}".format(e))
if len(self.__pending) == 0:
self.set_exception(e)
def _try_shoot(self):
if self.__instances is None:
if self.__instances_future is None:
request = protos.TDiscoverInstancesRequest()
request.Limit = self.__limit
if self.__secure:
request.InstanceFilter = protos.EDiscoveryPortFilter.Value(
"DISCOVERY_SECURE_PORT")
self.__instances_future = self.__balancer.discover_instances_async(request)
self.__instances_future.add_done_callback(self._on_discover_instances)
return
while self.__idx < len(self.__instances):
inst = self.__instances[self.__idx]
self.__idx += 1
if self.__visited and \
inst.Host == self.__visited.Host and \
inst.Port == self.__visited.Port:
continue
try:
impl = self.__factory(inst.Host, inst.Port)
except Exception as e:
self.__log.warning("error on create client: {}".format(e))
continue
if impl is None:
continue
self._shoot(impl, inst)
return
if len(self.__pending) == 0:
self._set_exception(
ClientError(EResult.E_FAIL.value, "can't create client"))
def _handle_response(self, f, impl, addr):
if f.cancelled():
return
self.__log.debug("handle response from {}:{}".format(
addr.Host,
addr.Port))
del self.__pending[(addr.Host, addr.Port)]
if self.__done:
return
is_retriable = False
error = None
try:
response = f.result()
e = client_error_from_response(response)
if not e.succeeded:
raise e
except ClientError as e:
error = e
is_retriable = e.is_retriable
except Exception as e:
error = e
if not error:
self._set_result(response, impl, addr)
return
self.__log.error("{}:{} request error: {}".format(addr.Host, addr.Port, error))
if not is_retriable:
self._set_exception(error)
return
if len(self.__pending) == 0:
self._try_shoot()
@dispatch_nbs_client_methods
class _DiscoveryClient(object):
def __init__(
self,
balancer,
factory,
discovery_limit=None,
hard_timeout=None,
soft_timeout=None,
log=None,
secure=False):
self.__impl = None
self.__addr = None
self.__balancer = balancer
self.__factory = factory
self.__secure = secure
self.__limit = DEFAULT_DISCOVERY_LIMIT
if discovery_limit is not None:
self.__limit = discovery_limit
self.__timeout = DEFAULT_HARD_TIMEOUT
if hard_timeout is not None:
self.__timeout = hard_timeout
self.__soft_timeout = soft_timeout
if log is not None:
self.log = log
else:
self.log = logging.getLogger("discovery_client")
self.__create_timer = Timer
def close(self):
if self.__impl is not None:
self.__impl.close()
if self.__balancer.done() and not self.__balancer.cancelled():
self.__balancer.result().close()
def set_timer_factory(self, create_timer):
self.__create_timer = create_timer
@property
def timeout(self):
return self.__timeout
@property
def soft_timeout(self):
return self.__soft_timeout
@property
def limit(self):
return self.__limit
@_handle_errors
def _execute_request_async(
self,
method_name,
request,
idempotence_id,
timestamp,
trace_id,
request_timeout):
def method(impl):
m = getattr(impl, method_name + '_async')
return m(
request,
idempotence_id,
timestamp,
trace_id,
request_timeout)
def run(client):
e = _Executor(
method,
client,
self.__factory,
self.__limit,
self.__secure,
self.log)
return e.run(
self.__impl,
self.__addr,
self.__timeout,
self.__soft_timeout,
self.__create_timer)
def update(client):
self.__impl, self.__addr, r = client
return unit(r)
return bind(bind(self.__balancer, run), update)
def ping_async(
self,
request,
idempotence_id=None,
timestamp=None,
trace_id=None,
request_timeout=None):
def cb(client):
return client.ping_async(
request,
idempotence_id,
timestamp,
trace_id,
request_timeout)
return bind(self.__balancer, cb)
def ping(
self,
request,
idempotence_id=None,
timestamp=None,
trace_id=None,
request_timeout=None):
return self.ping_async(
request,
idempotence_id,
timestamp,
trace_id,
request_timeout).result()
def discover_instances_async(
self,
request,
idempotence_id=None,
timestamp=None,
trace_id=None,
request_timeout=None):
def cb(client):
return client.discover_instances_async(
request,
idempotence_id,
timestamp,
trace_id,
request_timeout)
return bind(self.__balancer, cb)
def discover_instances(
self,
request,
idempotence_id=None,
timestamp=None,
trace_id=None,
request_timeout=None):
return self.discover_instances_async(
request,
idempotence_id,
timestamp,
trace_id,
request_timeout).result()
def discover_instance_async(self):
future = futures.Future()
def ping_cb(f, impl, instances, i):
try:
f.result()
future.set_result(impl)
except Exception:
loop(instances, i)
def loop(instances, i):
while i < len(instances):
inst = instances[i]
i += 1
try:
impl = self.__factory(inst.Host, inst.Port)
except Exception as e:
self.__log.warning("error on create client: {}".format(e))
continue
if impl is None:
continue
f = impl.ping_async(protos.TPingRequest())
def cb(f):
ping_cb(f, impl, instances, i)
f.add_done_callback(cb)
return
future.set_exception(
ClientError(EResult.E_FAIL.value, "can't create client"))
def discover_instances_cb(f):
try:
instances = f.result().Instances
loop(instances, 0)
except Exception as e:
future.set_exception(e)
request = protos.TDiscoverInstancesRequest()
request.Limit = self.__limit
if self.__secure:
request.InstanceFilter = protos.EDiscoveryPortFilter.Value(
"DISCOVERY_SECURE_PORT")
f = self.discover_instances_async(request)
f.add_done_callback(discover_instances_cb)
return future
class DiscoveryClient(_SafeClient):
def __init__(self, impl):
super(DiscoveryClient, self).__init__(impl)
def discover_instance(self):
return self.discover_instance_async().result()
def discover_instance_async(self):
return self._impl.discover_instance_async()
def find_closest(clients, request_timeout=None):
result = futures.Future()
requests = dict()
def done(c, f):
if result.done():
return
del requests[c]
if f.exception():
if not requests:
result.set_exception(f.exception())
c.close()
else:
result.set_result(c)
while requests:
x, f = requests.popitem()
f.cancel()
x.close()
requests = {c: c.ping_async(
protos.TPingRequest(),
request_timeout=request_timeout) for c in clients}
for c, f in requests.copy().items():
f.add_done_callback(partial(done, c))
return result
def CreateDiscoveryClient(
endpoints,
credentials=None,
request_timeout=None,
retry_timeout=None,
retry_timeout_increment=None,
log=None,
executor=None,
hard_timeout=None,
soft_timeout=None,
discovery_limit=None):
def make_http_backend(endpoint):
return HttpClient(
endpoint,
credentials,
request_timeout,
log,
executor)
def make_grpc_backend(endpoint):
return GrpcClient(
endpoint,
credentials,
request_timeout,
log)
def make_backend(endpoint):
if endpoint.startswith('https://') or endpoint.startswith('http://'):
return make_http_backend(endpoint)
else:
return make_grpc_backend(endpoint)
def make_client(endpoint):
return DurableClient(
make_backend(endpoint),
retry_timeout,
retry_timeout_increment,
log)
def factory(host, port):
return make_client(host + ':' + str(port))
if not isinstance(endpoints, list):
endpoints = [endpoints]
balancer = find_closest(map(make_client, endpoints))
discovery_client = _DiscoveryClient(
balancer,
factory,
discovery_limit,
hard_timeout,
soft_timeout,
log,
credentials is not None)
return DiscoveryClient(discovery_client)
|
ydb-platform/nbs
|
cloud/blockstore/public/sdk/python/client/discovery.py
|
discovery.py
|
py
| 13,730 |
python
|
en
|
code
| 32 |
github-code
|
6
|
72165174909
|
# -*- coding:utf-8 -*-
# ! usr/bin/env python3
"""
Created on 28/12/2020 9:16
@Author: XINZHI YAO
"""
import os
import argparse
def pubtator_split(pubtator_file: str, num_per_file: int,
save_path: str):
if not os.path.exists(save_path):
os.mkdir(save_path)
split_file_idx = 0
file_save_num = 0
base_prefix = os.path.basename(pubtator_file).split('.')[0]
save_file = f'{save_path}/{base_prefix}.{split_file_idx}.txt'
wf = open(save_file, 'w')
with open(pubtator_file) as f:
for line in f:
l = line.strip().split('|')
if l == ['']:
pass
# wf.write('\n')
if len(l) > 2:
if l[1] == 't':
file_save_num += 1
if file_save_num % num_per_file == 0:
print(f'{base_prefix}.{split_file_idx}.txt save done.')
wf.close()
split_file_idx += 1
save_file = f'{save_path}/{base_prefix}.{split_file_idx}.txt'
wf = open(save_file, 'w')
wf.write(f'{line.strip()}\n')
elif l[1] == 'a':
wf.write(f'{line.strip()}\n')
else:
wf.write(f'{line.strip()}\n')
print(f'{base_prefix}.{split_file_idx}.txt save done.')
wf.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PubTator Split.')
parser.add_argument('-pf', dest='pubtator_file', type=str, required=True)
parser.add_argument('-pn', dest='pubtator_num_per_file', type=int,
default=2000, help='default: 2000')
parser.add_argument('-sp', dest='split_path', type=str, required=True)
args = parser.parse_args()
pubtator_split(args.pubtator_file, args.pubtator_num_per_file, args.split_path)
|
YaoXinZhi/BioNLP-Toolkit
|
Split_PubTator_File.py
|
Split_PubTator_File.py
|
py
| 1,971 |
python
|
en
|
code
| 2 |
github-code
|
6
|
19923413937
|
from random import randint
from time import sleep
from operator import itemgetter
jogadores = {'jogador1': randint(1, 6),
'jogador2': randint(1, 6),
'jogador3': randint(1, 6),
'jogador4': randint(1, 6)}
ranking = list()
for k, v in jogadores.items():
print(f'O {k} tirou o dado {v}')
sleep(1)
ranking = sorted(jogadores.items(), key=itemgetter(1), reverse=True)
print('=-' * 30)
for i, v in enumerate(ranking):
print(f'O {i+1}º lugar: {v[0]} tirou {v[1]} ')
|
samuelfranca7l/PythonExercises
|
exercicios/PythonExercicios_Desafio091.py
|
PythonExercicios_Desafio091.py
|
py
| 511 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
17139183231
|
#https://leetcode.com/problems/find-the-duplicate-number/
"""Given an array of integers nums containing n + 1 integers where each integer is in the range [1, n] inclusive.
There is only one repeated number in nums, return this repeated number.
You must solve the problem without modifying the array nums and uses only constant extra space.
"""
class Solution(object):
def findDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
slow=nums[0]
fast=nums[0]
while True:
slow=nums[slow]
fast=nums[nums[fast]]
if slow==fast:
break
slow=nums[0]
while slow!=fast:
slow=nums[slow]
fast=nums[fast]
return slow
|
Eswar133/Practice
|
Find the Duplicate Number.py
|
Find the Duplicate Number.py
|
py
| 794 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33937878041
|
"""
Simple animation for your shell
"""
from field import Field
import time
import random
from saver import Ball, Saver
class MaskSaver(Saver):
def __init__(self, balls=int(random.random() * 100), trail=" ", mask=None):
self.field = Field(title="Term Saver")
self.balls = [Ball(x=int(random.random() * self.field.x-1)+1, y=int(random.random() * self.field.y-1)+1) for x in range(balls)]
self.speed = 0.009
self.trail = trail
self.addMask(mask)
def addMask(self, mask):
"""
Given a 2D array depciting some image to mask out
e.g. a box or a name or a picture of peeve
shrink or fatten it up to fit the shape of our field/grid
dimensions must be at least.... 4 x 4 ? e.g.
. . . .
. x x .
. x x .
. . . .
The players on the field should never write to
the 'x'd out areas.
but our grid will probably be larger than this...
so what is the maths behind making this fit properly?
e.g. a 4 x 4 mask supplied for a 64 x 64 grid
let's start small and just double it
. . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . x x x x x x . . . . . . . . .
. . . . . . . . . x x x x x x . . . x x x x . .
. . . . . . . . . x x x x x x . . . x x x x . .
. . . . . . . . => . x x x x x x . or . . x x x x . .
. . . . . . . . . x x x x x x . . . x x x x . .
. . . . . . . . . x x x x x x . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . .
bad good
I think the result where we look at the proportionality works best.
The first transformation has a single border like the original,
and the second maintains the proportions (e.g. 50%).
What happens when it's more awkward?
. . . . . . . . . . . . . . . . . .
. . . . . . => . x x x x . or . . x x . .
. . . . . . . . . . . . . . . . . .
bad good
I still like the second transformation.
So I guess when taking 1/2 of an odd, round down?
"""
pass
def update(self):
for ball in self.balls:
hitWall = self.walled(ball)
if hitWall: # wall collision
ball.bounce(hitWall)
# ball collision
self.clearTrail(ball, self.trail, True)
ball.move()
self.field.write_at(item=ball.image, coords=ball.getPosition())
# clear the field randomly (.1% chance)
if random.choice(range(1000)) == 1:
self.field.clear()
self.field.deploy()
tails = lambda: random.choice([' >< ', ' # ', '*', ' * ', ' () ', ') (', '-_-', '[]', '][', '] ['])
s = MaskSaver(50, tails())
s.run()
|
cameronbriar/curses
|
examples/saver.mask.py
|
saver.mask.py
|
py
| 2,985 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1447221561
|
from django.shortcuts import render
from .forms import ProductCreationForm
from .models import Product
from django.contrib import messages
import random
# Create your views here.
def create(request):
if request.method == 'POST':
form = ProductCreationForm(request.POST, request.FILES)
if form.is_valid():
product = form.save(commit=False)
while True:
productNo = random.randint(100000, 999999)
try:
Product.objects.get(orderNo=productNo)
except:
break
product.productNo = productNo
try: product.save()
except:
messages.warning(request, 'Could not create product')
else:
messages.success(request, 'Product Created')
else:
form = ProductCreationForm()
context = {
"title": "Products",
"form": form
}
return render(request, 'products/create.html.django', context)
def product(request, productId):
product = Product.objects.get(id=productId)
context = {
"title": "Product - "+product.productName,
"product": product
}
return render(request, 'products/product.html.django', context)
|
Thorium0/IntelRobotics-webserver
|
products/views.py
|
views.py
|
py
| 1,284 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15191647327
|
import matplotlib.pyplot as plot
from pymongo import MongoClient
import numpy as np
from sys import argv
import random
from constants import CONNECTION_STRING, DATABASE_NAME, CLUSTER_COLLECTION_NAME, GENRE_K_DICT
from q2 import get_k_g, main as q2_main, client as client2
from q3 import main as q3_main, client as client3
client = MongoClient(CONNECTION_STRING)
db = client.get_database(DATABASE_NAME)
def get_clusters(g: str) -> list:
return list(db.get_collection(CLUSTER_COLLECTION_NAME).aggregate([
{
'$match': {
'genres': g
}
}, {
'$group': {
'_id': '$cluster',
'points': {
'$push': '$kmeansNorm'
}
}
}]))
def get_random_color(palette=[]):
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
shapes = ['o', 's', 'p', 'd', 'D', '*', '+']
color_index = random.randint(0, len(colors) - 1)
shape_index = random.randint(0, len(shapes) - 1)
result = f'{colors[color_index]}{shapes[shape_index]}'
while result in palette:
color_index = random.randint(0, len(colors) - 1)
shape_index = random.randint(0, len(shapes) - 1)
result = f'{colors[color_index]}{shapes[shape_index]}'
return result
def plot_points(clusters: list, g: str):
plot.title(g)
plot.xlabel('Normalized startYear')
plot.ylabel('Normalized avgRating')
plot.xticks(np.arange(0, 1.2, 0.1))
plot.yticks(np.arange(0, 1.2, 0.1))
for cluster in clusters:
cluster_colors = []
cluster_color = get_random_color(cluster_colors)
cluster_colors.append(cluster_color)
for point in cluster['points']:
plot.plot(point[0], point[1], cluster_color, markersize=5)
plot.savefig(f'./img/q5/{g}.jpg', format='jpg')
plot.clf()
def main():
if len(argv) == 1:
for g in GENRE_K_DICT:
q2_main(GENRE_K_DICT[g], g)
q3_main(g)
clusters = get_clusters(g)
plot_points(clusters, g)
else:
k, g = get_k_g()
q2_main(k, g)
q3_main(g)
clusters = get_clusters(g)
plot_points(clusters, g)
client2.close()
client3.close()
if __name__ == "__main__":
main()
|
GautamGadipudi/bd-assignment-8
|
q5.py
|
q5.py
|
py
| 2,250 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71359830907
|
from decimal import Decimal
import ffmpeg
import math
import gc
def get_aspect_ratio(width, height):
gcd = math.gcd(width, height)
lhs = int(width / gcd)
rhs = int(height / gcd)
return f"{lhs}x{rhs}"
def get_raw_duration(video):
duration_raw = None
# check framerate at index 0 and 1, because its given like '25/1'
# ToDo: add other sources for NUMBER_OF_FRAMES => check some files
try:
if 'NUMBER_OF_FRAMES-eng' in video['tags'] and 'avg_frame_rate' in video:
duration_raw = int(video['tags']['NUMBER_OF_FRAMES-eng']) / \
int((video['avg_frame_rate'][0] + video['avg_frame_rate'][1]))
except:
#raise TypeError('Some error happened during the calculation of the raw duration!')
return duration_raw
return duration_raw
def get_duration(video):
duration = None
try:
if 'DURATION-eng' in video['tags']:
# could also be DURATION => search for something with DURATION in its name; might be this one: [value for key, value in programs.items() if 'new york' in key.lower()]
duration = video['tags']['DURATION-eng'].split('.')[0]
elif 'DURATION-de' in video['tags']:
duration = video['tags']['DURATION-de'].split('.')[0]
elif 'DURATION' in video['tags']:
duration = video['tags']['DURATION'].split('.')[0]
else:
raise TypeError('Cant find duration in tags!')
except:
#raise TypeError('Some error happened during the calculation of the duration!')
return duration
return duration
def try_get_width(video):
width = None
if 'width' in video:
width = video['width']
elif 'coded_width' in video:
width = video['coded_width']
return width
def try_get_height(video):
height = None
if 'height' in video:
height = video['height']
elif 'coded_height' in video:
height = video['coded_height']
return height
def get_data(path):
# read the audio/video file from the command line arguments
media_file = str(path)
# uses ffprobe command to extract all possible metadata from the media file
probe = ffmpeg.probe(media_file)
bitrate = 0.00
if 'format' in probe:
bitrate = round(
Decimal(probe['format'].get('bit_rate'))/(1024*1024), 2)
streams = probe["streams"]
video = streams[0]
codec = video['codec_name']
# for other codecs => needs to be included in the output file!
other_codecs = []
first_cd = True
for cd in streams:
if first_cd:
first_cd = False
continue
# creates object with name, type, language, title
codec_name = cd.get('codec_name', '')
codec_type = cd.get('codec_type', '')
codec_language = str
codec_title = str
if 'tags' in cd:
codec_language = cd['tags'].get('language', '')
codec_title = cd['tags'].get("title", '')
other_codecs.append({"name": str(codec_name), "type": codec_type,
"language": codec_language, "title": codec_title})
# ToDo: add FPS, and think of a good output for other codecs (e.g. ac3, eac3, aac) => so just comma seperated names
# could also add audio language (comma seperated) and subtitle language
duration = get_duration(video)
duration_raw = get_raw_duration(video)
height = try_get_height(video)
width = try_get_width(video)
aspect_ratio = '0x0' # might look for a better option => 16:9 - excel will convert this to datetime
if width != None and height != None:
aspect_ratio = get_aspect_ratio(width, height)
# clear data
del streams, video
gc.collect()
return {"codec": codec, "other_codecs": other_codecs, "bitrate": bitrate, "duration": duration, "aspect_ratio": aspect_ratio, "dimensions": {"width": width, "height": height}, "raw": {"duration_raw": duration_raw}}
|
bennischober/MetaDataScraper
|
src/media/read_media.py
|
read_media.py
|
py
| 3,958 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35846798880
|
import sys
import cv2 as cv
__doc__ = """Wrapper to create new classifiers from OpenCV or other libraries.
"""
class NormalBayes(object):
"""Wraps a trained OpenCV Normal Bayes Classifier.
More info: http://docs.opencv.org/modules/ml/doc/normal_bayes_classifier.html
"""
def __init__(self):
self.model = cv.NormalBayesClassifier()
def train(self, dataset, responses):
"""Dataset and responses are assumed to be a 2D and 1D numpy matrix of type np.float32.
"""
self.model.train(dataset, responses)
def predict(self, samples):
"""Samples have to be a 2D numpy array of type np.float32.
Returns a list of prediction values.
"""
pred_results = self.model.predict(samples)
return [int(x[0]) for x in pred_results[1]]
class KNN(object):
"""Wraps a trained OpenCV k_nn classifier.
More info: http://docs.opencv.org/modules/ml/doc/k_nearest_neighbors.html
"""
def __init__(self):
self.model = cv.KNearest()
self.max_K = 32
def train(self, dataset, responses, params):
"""Dataset and responses are assumed to be a 2D and 1D numpy matrix of type np.float32.
Additionally, optional max_neighbors argument can be provided.
"""
if "nmax" in params:
self.max_K = params["nmax"]
else:
self.max_K = 32
self.model.train(dataset, responses, maxK=self.max_K)
def predict(self, samples, params):
"""Accepts samples for classification and K - number of neighbors to use.
Notice: K has to be <= maxK that was set while training.
Refer here: http://docs.opencv.org/modules/ml/doc/k_nearest_neighbors.html
for more info. Samples are 2D numpy array of type np.float32.
Returns a list of prediction values.
"""
if "nclass" in params:
K = params["nclass"]
else:
K = 7
if K > self.max_K:
print ("Bad argument: K")
return []
out = self.model.find_nearest(samples, K)
return [int(x[0]) for x in out[1]]
class RandomTrees(object):
"""Wraps a trained OpenCV RTrees classifier.
More info: http://docs.opencv.org/modules/ml/doc/random_trees.html
"""
def __init__(self):
self.model = cv.RTrees()
def train(self, dataset, responses, params):
"""Dataset and responses are assumed to be a 2D and 1D numpy matrix of type np.float32.
max_d corresponds to the max tree depth. Parameter criteria can be:
--CV_TERMCRIT_ITER Terminate learning by the max_num_of_trees_in_the_forest;
--CV_TERMCRIT_EPS Terminate learning by the forest_accuracy;
--CV_TERMCRIT_ITER + CV_TERMCRIT_EPS Use both termination criteria.
Refer here: http://docs.opencv.org/modules/ml/doc/random_trees.html
"""
if "maxdepth" in params:
max_d = params["maxdepth"]
else:
max_d = 4
if "criteria" in params:
criteria = params["criteria"]
else:
criteria=cv.TERM_CRITERIA_MAX_ITER+cv.TERM_CRITERIA_EPS
if "maxerror" in params:
max_error = params["maxerror"]
else:
max_error = 0.1
if "maxtrees" in params:
max_num_trees = params["maxtrees"]
else:
max_num_trees = 10
parameters = dict(max_depth=max_d, min_sample_count=1, use_surrogates=False,
calc_var_importance=True, max_categories=10, nactive_vars=0,
term_crit=(criteria, max_num_trees, max_error)) # not sure if max_error belongs here :D
self.model.train(dataset, cv.CV_ROW_SAMPLE, responses, params=parameters)
# print ("Num of trees: " + str(self.model.getVarImportance()))
def predict(self, samples):
"""Returns a list of prediction values for all samples.
Assuming samples are 2D numpy array of type np.float32.
"""
return [int(self.model.predict(s)) for s in samples]
|
mmikulic/ProjektRasUzo
|
src/classifier.py
|
classifier.py
|
py
| 4,064 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11314663313
|
from django.contrib.auth import get_user_model
from django.db import models
User = get_user_model()
class Group(models.Model):
title = models.CharField('название группы', max_length=200)
slug = models.SlugField('слаг', unique=True)
description = models.TextField('описание')
class Meta:
verbose_name = 'группа'
verbose_name_plural = 'группы'
def __str__(self):
return self.title
class Post(models.Model):
text = models.TextField(
'текст', help_text='Перед публикацией заполните поле.')
pub_date = models.DateTimeField(
'дата публикации', auto_now_add=True)
author = models.ForeignKey(
User, on_delete=models.CASCADE,
related_name='posts', verbose_name='автор')
group = models.ForeignKey(
Group, models.SET_NULL, blank=True,
null=True, related_name='posts', verbose_name='группа',
help_text='Выберите группу для публикации поста.')
image = models.ImageField(
'картинка', upload_to='posts/', blank=True, null=True,
help_text='Выберите картинку для публикации поста.')
class Meta:
verbose_name = 'пост'
verbose_name_plural = 'посты'
ordering = ['-pub_date']
def __str__(self):
return self.text[:15]
class Comment(models.Model):
post = models.ForeignKey(
Post, on_delete=models.CASCADE,
related_name='comments', verbose_name='пост')
author = models.ForeignKey(
User, on_delete=models.CASCADE,
related_name='comments', verbose_name='автор')
text = models.TextField(
'текст комментария', help_text='Перед публикацией заполните поле.')
created = models.DateTimeField(
'дата публикации', auto_now_add=True)
class Meta:
verbose_name = 'комментарий'
verbose_name_plural = 'комментарии'
ordering = ['-created']
def __str__(self):
return self.text[:15]
class Follow(models.Model):
user = models.ForeignKey(
User, on_delete=models.CASCADE,
related_name='follower', verbose_name='подписчик')
author = models.ForeignKey(
User, on_delete=models.CASCADE,
related_name='following', verbose_name='автор')
class Meta:
constraints = [
models.UniqueConstraint(
fields=['user', 'author'],
name='following_unique',
),
]
|
zzstop/hw05_final
|
posts/models.py
|
models.py
|
py
| 2,692 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2872612166
|
from flask import Flask, render_template, redirect, url_for, request
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, FloatField
from wtforms.validators import DataRequired
import requests
db = SQLAlchemy()
app = Flask(__name__)
app.config['SECRET_KEY'] = 'your_secret_key'
Bootstrap(app)
app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///movie-collection.db"
db.init_app(app)
movie_url = 'https://api.themoviedb.org/3/search/movie'
api_key = 'your_api_key'
parameters = {
'api_key': api_key,
'language': 'en-US'
}
class Movies(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(250), unique=True, nullable=False)
year = db.Column(db.Integer, nullable=False)
description = db.Column(db.String(1500), nullable=False)
rating = db.Column(db.Float, nullable=True)
ranking = db.Column(db.Integer, nullable=True)
review = db.Column(db.String(500), nullable=True)
img_url = db.Column(db.String(1500), nullable=False)
class MovieForm(FlaskForm):
rating = FloatField('Your rating out of 10', validators=[DataRequired()])
review = StringField('Your review', validators=[DataRequired()])
class AddForm(FlaskForm):
title = StringField('Movie Title', validators=[DataRequired()])
submit = SubmitField('Submit')
with app.app_context():
db.create_all()
@app.route("/")
def home():
all_movies = db.session.query(Movies).order_by(Movies.rating).all()
for i in range(len(all_movies)):
all_movies[i].ranking = len(all_movies) - i
db.session.commit()
return render_template("index.html", all_movies=all_movies)
@app.route('/edit', methods=['GET', 'POST'])
def edit():
id = request.args.get('id')
movie = Movies.query.get(id)
form = MovieForm(movie_id=id)
if request.method == 'POST':
id = request.form.get('id')
movie = Movies.query.get(id)
movie.rating = request.form.get('rating')
movie.review = request.form.get('review')
db.session.commit()
return redirect(url_for('home'))
return render_template('edit.html', movie=movie, form=form)
@app.route('/delete')
def delete():
id = request.args.get('id')
movie_to_delete = Movies.query.get(id)
db.session.delete(movie_to_delete)
db.session.commit()
return redirect(url_for('home'))
@app.route('/add', methods=['POST', 'GET'])
def add():
form = AddForm()
if request.method == 'POST':
parameters['query'] = form.title.data
response = requests.get(url=movie_url, params=parameters).json()
data = []
for movie in response['results']:
movie_data = {
'title': movie['title'],
'id': movie['id'],
'year': movie['release_date'].split('-')[0]
}
data.append(movie_data)
return render_template('select.html', movies=data)
return render_template('add.html', form=form)
@app.route('/add_movie')
def add_movie():
url = f'https://api.themoviedb.org/3/movie/{request.args.get("id")}'
params = {
'api_key': api_key,
'language': 'en-US',
}
response = requests.get(url=url, params=params).json()
new_movie = Movies(title=response['title'],
year=int(response['release_date'].split('-')[0]),
description=response['overview'],
rating=response['vote_average'],
img_url=f'https://image.tmdb.org/t/p/w500{response["poster_path"]}')
db.session.add(new_movie)
db.session.commit()
movie = Movies.query.filter_by(title=response['title']).first()
movie_id = movie.id
return redirect(url_for('edit', id=movie_id))
if __name__ == '__main__':
app.run(debug=True)
|
mgardner1011/UdemyProjects
|
movie_ranking_site/main.py
|
main.py
|
py
| 3,878 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12680443626
|
import os
import warnings
import pandas as pd
from sklearn.preprocessing import StandardScaler
from torch.utils.data import Dataset
from utils.timefeatures import time_features
warnings.filterwarnings('ignore')
class MyDataset(Dataset):
def __init__(self, root_path, data_path, data, flag, seq_len, label_len, pred_len, features, target, timeenc, freq,
percent):
self.seq_len = seq_len
self.label_len = label_len
self.pred_len = pred_len
type_map = {'train': 0, 'val': 1, 'test': 2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.timeenc = timeenc
self.freq = freq
self.percent = percent
self.root_path = root_path
self.data_path = data_path
self.data = data
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path, self.data_path))
if self.data == 'ETTh1' or self.data == 'ETTh2':
border1s = [0, 12 * 30 * 24 - self.seq_len, 12 * 30 * 24 + 4 * 30 * 24 - self.seq_len]
border2s = [12 * 30 * 24, 12 * 30 * 24 + 4 * 30 * 24, 12 * 30 * 24 + 8 * 30 * 24]
elif self.data == 'ETTm1' or self.data == 'ETTm2':
border1s = [0, 12 * 30 * 24 * 4 - self.seq_len, 12 * 30 * 24 * 4 + 4 * 30 * 24 * 4 - self.seq_len]
border2s = [12 * 30 * 24 * 4, 12 * 30 * 24 * 4 + 4 * 30 * 24 * 4, 12 * 30 * 24 * 4 + 8 * 30 * 24 * 4]
elif self.data == 'custom':
num_train = int(len(df_raw) * 0.7)
num_test = int(len(df_raw) * 0.2)
num_vali = len(df_raw) - num_train - num_test
border1s = [0, num_train - self.seq_len, len(df_raw) - num_test - self.seq_len]
border2s = [num_train, num_train + num_vali, len(df_raw)]
else:
border1s = None
border2s = None
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.set_type == 0:
border2 = (border2 - self.seq_len) * self.percent // 100 + self.seq_len
if self.features == 'M' or self.features == 'MS':
df_data = df_raw.iloc[:, 1:]
elif self.features == 'S':
df_data = df_raw[[self.target]]
else:
df_data = None
train_data = df_data[border1s[0]:border2s[0]]
self.scaler.fit(train_data.values)
data = pd.DataFrame(self.scaler.transform(df_data.values)).fillna(0).values
df_stamp = df_raw[['date']][border1:border2]
df_stamp['date'] = pd.to_datetime(df_stamp.date)
if self.timeenc == 0:
df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1)
df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)
df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)
df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)
data_stamp = df_stamp.drop(['date'], 1).values
elif self.timeenc == 1:
data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)
data_stamp = data_stamp.transpose(1, 0)
else:
data_stamp = None
self.data_x = data[border1:border2]
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
def __getitem__(self, index):
s_begin = index
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end]
seq_y = self.data_y[r_begin:r_end]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
return seq_x, seq_y, seq_x_mark, seq_y_mark
def __len__(self):
return len(self.data_x) - self.seq_len - self.pred_len + 1
def inverse_transform(self, data):
return self.scaler.inverse_transform(data)
|
ForestsKing/TSF-Library
|
data_provider/data_loader.py
|
data_loader.py
|
py
| 4,041 |
python
|
en
|
code
| 4 |
github-code
|
6
|
36060788445
|
from utils.parse_json import parse_json
from utils.save_json import save_json
import logging
def put_command(sala: str, nivel: int, chave: str):
data = parse_json('src/json/comandos.json')
data[sala][0]['outputs'][nivel]['status'] = chave
save_json('src/json/comandos.json', data)
def get_command(sala: str, nivel: int):
data = parse_json('src/json/comandos.json')
return data[sala][0]['outputs'][nivel]['status']
def swap_command(escolha_input: int, sala: str):
if (escolha_input == 1):
if (get_command(sala, 0) == "ON"):
put_command(sala, 0, 'OFF')
logging.info('Lamapada 01 Desligada')
else:
put_command(sala, 0, 'ON')
logging.info('Lamapada 01 Ligada')
if (escolha_input == 2):
if (get_command(sala, 1) == 'ON'):
put_command(sala, 1, 'OFF')
logging.info('Lamapada 02 Desligada')
else:
put_command(sala, 1, 'ON')
logging.info('Lamapada 02 Ligada')
if (escolha_input == 3):
if (get_command(sala, 2) == 'ON'):
put_command(sala, 2, 'OFF')
logging.info('Projetor Desligado')
else:
put_command(sala, 2, 'ON')
logging.info('Projetor Ligada')
if (escolha_input == 4):
if (get_command(sala, 3) == 'ON'):
put_command(sala, 3, 'OFF')
logging.info('Ar-condicionado Desligado')
else:
put_command(sala, 3, 'ON')
logging.info('Ar-condicionado Ligado')
if (escolha_input == 5):
put_command(sala, 0, 'OFF')
logging.info('Lamapada 01 Desligada')
put_command(sala, 1, 'OFF')
logging.info('Lamapada 02 Desligada')
put_command(sala, 2, 'OFF')
logging.info('Projetor Desligado')
put_command(sala, 3, 'OFF')
logging.info('Ar-condicionado Desligado')
|
AntonioAldisio/FSE-2022-2-Trabalho-1
|
src/utils/troca_comando.py
|
troca_comando.py
|
py
| 1,900 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29799264733
|
# -*- coding: utf-8 -*-
# https://blog.csdn.net/Tifficial/article/details/78116862
import os
import time
import tkinter.messagebox
from tkinter import *
from tkinter.filedialog import *
from PIL import Image, ImageTk
import pygame
class create_UI():
def __init__(self):
pass
def create_button(self, app):
button_functions = [
self.picSelect, self.writePoet, self.showPoet, quit
]
button_texts = ['选\n择\n图\n片', '为\n你\n写\n诗', '查\n看', '退\n出']
column_index = 3
button_num = len(button_functions)
for index in range(button_num):
button = Button(
app,
text=button_texts[index],
font=('方正舒体', 25),
bd=0,
bg='white',
command=button_functions[index])
button.grid(row=0, column=column_index, sticky='n')
column_index += 1
def ui(self):
app = Tk()
app.title("为你写诗")
app.resizable(0, 0) #禁止调整窗口大小
image = Image.open(r'9668839.jpeg')
background_image = ImageTk.PhotoImage(image)
w = background_image.width()
h = background_image.height()
app.geometry('%dx%d+0+0' % (w, h))
background_label = Label(app, image=background_image)
background_label.place(x=0, y=0, relwidth=1, relheight=1)
self.create_button(app)
app.mainloop()
def set_button_sound(self):
water_drop_pwd = r"SarahBrightman-ScarboroughFair.mp3"
pygame.mixer.init()
pygame.mixer.music.load(water_drop_pwd)
pygame.mixer.music.play()
time.sleep(200.5)
pygame.mixer.music.stop()
def picSelect(self):
self.set_button_sound()
default_dir = r"C:\Users\lenovon\Desktop" # 设置默认打开目录
fns = askopenfilename(
filetypes=[("all", "*.*"), ("text file", "*.txt")],
title=u"选择图片",
initialdir=(os.path.expanduser(default_dir)))
fns_list = list(fns)
print("fns list:", fns_list)
def writePoet(self):
self.set_button_sound()
tkinter.messagebox.showinfo('Message', '查看')
def showPoet(self):
self.set_button_sound()
tkinter.messagebox.showinfo('Message', '展示结果')
if __name__ == "__main__":
demo = create_UI()
demo.ui()
|
anna160278/tkinter-examples
|
examples/aaa/tst.py
|
tst.py
|
py
| 2,443 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43702400504
|
from django.conf.urls import include, url
from . import views
from rest_framework.urlpatterns import format_suffix_patterns
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^yolog/$', views.yolo_index, name='yolo_index'),
url(r'^result/$', views.result, name='result'),
url(r'^list/$', views.RestaurantListView.as_view(), name="rlistview"),
url(r'^restaurants/$', views.RestaurantAllListView.as_view(), name="rallview"),
url(r'^restaurant/(?P<venue_id>[\w-]+)/$', views.restaurantwithid,name='rwithid'),
url(r'^restaurants/map/$', views.RestaurantAllMapListView.as_view(), name="rlistmapview"),
url(r'^api/v1/$',views.RestaurantList.as_view()),
url(r'^api/v1/pizzalist/$',views.PizzaList.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
hassanabidpk/searchrestaurant
|
django/searchrestaurant/search/urls.py
|
urls.py
|
py
| 781 |
python
|
en
|
code
| 129 |
github-code
|
6
|
17953957335
|
import numpy as np
from collections import Counter
def euclideanDistance(x, y):
return np.sqrt(np.sum((x-y)**2))
class KNN:
def __init__(self, k=3):
self.k = k
def fit(self, X_train, y_train):
self.X_train = X_train
self.y_train = y_train
def predict(self, X):
predictions = [self.singlePredict(x) for x in X]
return predictions
def singlePredict(self, x):
distances = [euclideanDistance(x, x_train) for x_train in self.X_train]
idxDist = np.argsort(distances)[:self.k]
nearLabels = [self.y_train[idx] for idx in idxDist]
most_common = Counter(nearLabels).most_common(1) # [9,4,4,4,5,6] returns [(4,3), (5,1) ...]
return most_common[0][0]
if __name__ == "__main__":
# Imports
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.model_selection import train_test_split
cmap = ListedColormap(["#FF0000", "#00FF00", "#0000FF"])
def accuracy(y_true, y_pred):
accuracy = np.sum(y_true == y_pred) / len(y_true)
return accuracy
iris = datasets.load_iris()
X, y = iris.data, iris.target
print(y.max())
print(X[100:105])
print(y[100:105])
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=1234
)
k = 3
clf = KNN(k=k)
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
print("KNN classification accuracy", accuracy(y_test, predictions))
|
Helyousfi/Machine-learning
|
KNN.py
|
KNN.py
|
py
| 1,573 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9928969059
|
import numpy as np
import matplotlib.pyplot as plt
import cPickle
def plot_statistics(statistics, legends, title="", ylabel="", xlim=None, ylim=None, writeto="default.jpeg"):
plt.figure(num=None, figsize=(10, 6), dpi=80, facecolor='w', edgecolor='k')
plt.xlabel("Number of epochs")
plt.ylabel(ylabel)
plt.title(title)
for stat in statistics:
plt.plot(stat, linestyle="solid", marker=".")
plt.grid()
plt.legend(legends, loc='upper right')
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.savefig("./" + writeto)
def extract_records(path):
channels = cPickle.load(open(path, "rb"))
return channels
def compare_records(ls_files, writeto, xlim, ylim, dataset, measure):
"""
ls_files is a list of (path, description)
dataset can be a list, measure can't be a list
"""
measure_lgd = {"loss": "Negative Log Likelihodd", "err": "Error rate"}
writeto = writeto + "_" + measure + ".jpeg"
if not isinstance(dataset, list):
dataset = [dataset]
records = []
legends = []
for (path, descript) in ls_files:
for ds in dataset:
channels = extract_records(path)[ds][measure]
records.append(channels)
legends.append(descript + " (" + measure + "_" + ds + ")")
plot_statistics(records, legends=legends,
ylabel=measure_lgd[measure], xlim=xlim, ylim=ylim, writeto=writeto)
if __name__ == '__main__':
name = "multi_view/comp_pretrain_valid"
# ls_files = [
# # ("./results/lasagne/mod_7_1/", ""),
# # ("./results/lasagne/mod_7_smaller1/", "smaller"),
# # ("./results/lasagne/mod_7_bigger1/", "bigger"),
# ("./results/lasagne/mod_7_smaller21/", "smaller with 3x3"),
# # ("./results/lasagne/mod_7_smaller31/", "3x3 and less neurons"),
# ("./results/lasagne/mod_7_smaller2_nomaxpool1/", "no maxpool at the end"),
# ("./results/lasagne/mod_7_smaller2_nomaxpool_3every1/", "only 3x3"),
# ("./results/lasagne/mod_7_top1/", "only 3x3 top")]
ls_files = [
("./multi_view/c_1view.pkl", "1 view"),
("./multi_view/c_5views_mean.pkl", "5 views mean"),
# ("./multi_view/c_5views_dropout_branches.pkl", "5 views mean "
# "dropout "
# "branches"),
# ("./multi_view/c_5views_max.pkl", "5 views max"),
# ("./multi_view/c_5views_l2.pkl", "5 views l2"),
("./multi_view/c_5views_pretrained.pkl", "5 views mean "
"pretrained")
]
compare_records(ls_files, name, xlim=(0,200),
ylim=(0.06,0.15),
dataset=["valid"],
measure="err",)
|
adbrebs/dogs_vs_cats
|
results/utilities.py
|
utilities.py
|
py
| 3,026 |
python
|
en
|
code
| 5 |
github-code
|
6
|
21354510025
|
#
# @lc app=leetcode.cn id=337 lang=python3
#
# [337] 打家劫舍 III
#
from util import TreeNode
# @lc code=start
from functools import lru_cache
class Solution:
def rob(self, root: TreeNode) -> int:
nums = []
@lru_cache(None)
def dfs(node: TreeNode, can: bool) -> int:
if node is None:
return 0
node_sum = 0
if can:
t_sum = node.val
t_sum += dfs(node.left, False)
t_sum += dfs(node.right, False)
node_sum = t_sum
t_sum = 0
t_sum += dfs(node.left, True)
t_sum += dfs(node.right, True)
node_sum = max(node_sum, t_sum)
return node_sum
return dfs(root, True)
# @lc code=end
|
Alex-Beng/ojs
|
FuckLeetcode/337.打家劫舍-iii.py
|
337.打家劫舍-iii.py
|
py
| 811 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73000139069
|
import configparser
from wireguard_keys import *
PUB_KEY = '...' # здесь должен быть указан public key
if __name__ == "__main__":
try:
with open('curr_ip.txt', 'r') as f:
IP_N = int(f.readline())
except FileNotFoundError:
IP_N = int(input('не найден последний IP, введите его вручную: '))
#numbers of clients
N = int(input('введите количество генерируемых конфигов: '))
for i in range(1, N+1):
cur_ip = IP_N + i # increment IP-address
(privkey, pubkey, sharkey) = generate_wireguard_keys()
config = configparser.ConfigParser()
config['Interface'] = {
'PrivateKey': privkey,
'ListenPort': '51820',
'Address': f'172.26.1.{cur_ip}/24',
'DNS': '192.9.200.124, 192.9.200.132',
'#pubkey': f'{pubkey}'}
config['Peer'] = {
'PublicKey': f'{PUB_KEY}',
'PresharedKey': f'{sharkey}',
'AllowedIPs': '172.26.1.0/24, 192.9.200.0/24',
'Endpoint': '...:...', # здесь должен быть указан внешний адрес и порт
'PersistentKeepalive': 5
}
name_config = input('введите дескрипшн конфига: ')
with open(f'wg_lan_{cur_ip}_{name_config}.conf', 'w') as f:
config.write(f)
print('-------------------------------------')
print(f'ip: 172.26.1.{cur_ip}')
print(f'имя конфига: {name_config}')
print(f'pubkey: {pubkey}')
print(f'sharkey: {sharkey}')
print('-------------------------------------')
print()
#update last ip
with open('curr_ip.txt', 'w') as f:
f.write(str(cur_ip))
|
if13/utils
|
wireguard config generator/wireguard_export_lan.py
|
wireguard_export_lan.py
|
py
| 1,671 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
16593223409
|
#Challenge MeLi 2022 - Lautaro Stroia
from database import *
from google_api import *
def main():
#Database
try:
db = DataBaseHandler()
db.run()
except Exception:
print("Error with database")
return
#Google API service
gapi_handler = GoogleAPIHandler()
try:
files = gapi_handler.get_drive_files()
except Exception as e:
print("Error with GDrive API Service: {}".format(e))
return
if len(files) == 0 or not files:
print("Files not found")
return
for file in files:
db.save_drive_files(file)
if file['shared'] is True:
db.save_drive_logs(file)
file['shared'] = False
owner_perm_id = file['owners'][0]['permissionId']
for user in file['permissions']:
if user['id'] != owner_perm_id:
gapi_handler.modify_permissions(file['id'], user['id'])
db.change_file_visibility(file)
#send email
receiver = file['owners'][0]['emailAddress']
subject = 'Google Drive - a file has been modified'
text = "The visibility of your file {} has been modified for security reasons. Sorry for the incovenience.".format(file['name'])
gapi_handler.send_email(receiver, subject, text)
db.shutdown_database()
return None
if __name__ == '__main__':
main()
|
rg273/Challenge-MeLi-2022
|
main.py
|
main.py
|
py
| 1,219 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36201897714
|
from PIL import Image
from picamera.array import PiRGBArray
from picamera import PiCamera
from botocore.exceptions import ClientError
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient
from time import sleep, time
import sys
from uuid import uuid4
import os
import RPi.GPIO as GPIO
import json
import boto3
import io
################## GENERAL ##################
#SUPPORTED_BINS = ['trash', 'plastic', 'paper', 'metal', 'glass']
SUPPORTED_BINS = ['trash', 'paper']
#GPIO Mode (BOARD / BCM)
GPIO.setmode(GPIO.BCM)
bins = {'trash': {'ultrasound_pins': (24,23), 'servo_pin': 19},
'paper': {'ultrasound_pins': (21,20), 'servo_pin': 26},
'plastic': {'ultrasound_pins': (0,0), 'servo_pin': 0},
'metal': {'ultrasound_pins': (0,0), 'servo_pin': 0},
'glass': {'ultrasound_pins': (0,0), 'servo_pin': 0},
'cardboard': {'ultrasound_pins': (0,0), 'servo_pin': 0},
}
for bin_type in bins.copy():
if bin_type not in SUPPORTED_BINS:
del bins[bin_type]
bin_id_file = 'bin_id.txt'
bin_height = 20 #estimate bin height is 20cm
################## Button ##################
BIN_BUTTON_PIN = 27
GPIO.setup(BIN_BUTTON_PIN, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
################## Servo ##################
DEGREES_0 = 2.5
DEGREES_90 = 7.5
DEGREES_180 = 12.5
for bin_type, bin in bins.items():
servo_pin = bin['servo_pin']
GPIO.setup(servo_pin, GPIO.OUT)
################## ULTRASOUND ##################
def ultrasound_distance(GPIO_TRIGGER, GPIO_ECHO):
#set GPIO direction (IN / OUT)
GPIO.setup(GPIO_TRIGGER, GPIO.OUT)
GPIO.setup(GPIO_ECHO, GPIO.IN)
# set Trigger to HIGH
GPIO.output(GPIO_TRIGGER, True)
# set Trigger after 0.01ms to LOW
sleep(0.00001)
GPIO.output(GPIO_TRIGGER, False)
StartTime = time()
StopTime = time()
# save StartTime
while GPIO.input(GPIO_ECHO) == 0:
StartTime = time()
# save time of arrival
while GPIO.input(GPIO_ECHO) == 1:
StopTime = time()
# time difference between start and arrival
TimeElapsed = StopTime - StartTime
# multiply with the sonic speed (34300 cm/s)
# and divide by 2, because there and back
distance = (TimeElapsed * 34300) / 2
return distance
################## REKOGNITION ##################
def start_model(project_arn, model_arn, version_name, min_inference_units):
client=boto3.client('rekognition')
try:
# Start the model
print('Starting model: ' + model_arn)
response=client.start_project_version(ProjectVersionArn=model_arn,MinInferenceUnits=min_inference_units)
# Wait for the model to be in the running state
project_version_running_waiter = client.get_waiter('project_version_running')
project_version_running_waiter.wait(ProjectArn=project_arn,VersionNames=[version_name])
#Get the running status
describe_response=client.describe_project_versions(ProjectArn=project_arn,VersionNames=[version_name])
for model in describe_response['ProjectVersionDescriptions']:
print("Status: " + model['Status'])
print("Message: " + model['StatusMessage'])
except Exception as e:
print(e)
def show_custom_labels(model,bucket,photo, min_confidence):
client=boto3.client('rekognition')
# Load image from S3 bucket
s3_connection = boto3.resource('s3')
s3_object = s3_connection.Object(bucket,photo)
s3_response = s3_object.get()
stream = io.BytesIO(s3_response['Body'].read())
image=Image.open(stream)
#Call DetectCustomLabels
response = client.detect_custom_labels(Image={'S3Object': {'Bucket': bucket,'Name': photo}},MinConfidence=min_confidence,ProjectVersionArn=model)
highest_detected_label = None
highest_detected_confidence = 0
print('Detecting labels...')
for customLabel in response['CustomLabels']:
print('Label ' + str(customLabel['Name']))
print('Confidence ' + str(customLabel['Confidence']))
if customLabel['Confidence'] > highest_detected_confidence:
highest_detected_label = customLabel['Name'].lower()
highest_detected_confidence = customLabel['Confidence']
print('Done detection')
return highest_detected_label
################## S3 ##################
def upload_file(file_name, bucket, object_name=None):
"""Upload a file to an S3 bucket
:param file_name: File to upload
:param bucket: Bucket to upload to
:param object_name: S3 object name. If not specified then file_name is used
:return: True if file was uploaded, else False
"""
# If S3 object_name was not specified, use file_name
if object_name is None:
object_name = file_name
# Upload the file
s3_client = boto3.client('s3')
try:
response = s3_client.upload_file(file_name, bucket, object_name)
print("Successfully Uploaded!")
except ClientError as e:
return False
return True
################## MAIN ##################
# Custom MQTT message callback
def customCallback(client, userdata, message):
action = message.payload.decode()
if action == 'open':
print('Opening all bins...')
for trash_type, bin in bins.items():
servo = GPIO.PWM(bin['servo_pin'], 50)
servo.start(7.5)
sleep(0.1)
servo.ChangeDutyCycle(DEGREES_180) #open bin
sleep(1)
servo.stop()
if action == 'close':
print('Opening all bins...')
for trash_type, bin in bins.items():
servo = GPIO.PWM(bin['servo_pin'], 50)
servo.start(7.5)
sleep(0.1)
servo.ChangeDutyCycle(DEGREES_0) #close bin
sleep(1)
servo.stop()
#check if bin_id exists
if os.path.isfile(bin_id_file):
with open(bin_id_file, 'r') as f:
bin_id = f.read()
#if doesnt exist
else:
bin_id = 'smartbin-{}'.format(uuid4())
host="****************.us-east-1.amazonaws.com"
rootCAPath = os.path.join("certs", "rootca.pem")
certificatePath = os.path.join("certs", "certificate.pem.crt")
privateKeyPath = os.path.join("certs", "private.pem.key")
smartbin = AWSIoTMQTTClient(bin_id)
smartbin.configureEndpoint(host, 8883)
smartbin.configureCredentials(rootCAPath, privateKeyPath, certificatePath)
smartbin.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing
smartbin.configureDrainingFrequency(2) # Draining: 2 Hz
smartbin.configureConnectDisconnectTimeout(10) # 10 sec
smartbin.configureMQTTOperationTimeout(5) # 5 sec
# Connect and subscribe to AWS IoT
smartbin.connect()
if not os.path.isfile(bin_id_file):
smartbin.publish("bin/{}/add".format(bin_id), '{{"bin_id": "{}" }}'.format(bin_id), 1)
print('Published newly generated bin endpoint client ID: {}'.format(bin_id))
with open(bin_id_file, 'w') as f:
f.write(bin_id)
smartbin.subscribe("bin/{}/action".format(bin_id), 1, customCallback)
while True:
#If button is pushed take picture, analyze using rekognition and open the corresponding bin hole
if GPIO.input(BIN_BUTTON_PIN) == GPIO.HIGH:
print("Button was pushed!")
sleep(2)
# Take image from picamera and write to file
filename = str(uuid4())+".jpg"
write_image_file = open(filename, 'wb')
camera = PiCamera()
camera.resolution = (1024, 768)
camera.start_preview()
sleep(2)
camera.capture(write_image_file)
write_image_file.close()
camera.close()
print('Picture saved')
# Uploads image file to specified s3 bucket
bucket = "mysmartbin-image-bin"
upload_file(filename, bucket, object_name=None)
# Start rekognition model if is is not
project_arn='arn:aws:rekognition:us-east-1:****************'
model_arn='arn:aws:rekognition:us-east-1:****************'
min_inference_units=1
version_name='MySmartBin-Custom-Label-Training.2020-02-22T01.18.22'
start_model(project_arn, model_arn, version_name, min_inference_units)
# Analyse image based on the model above
min_confidence = 50
trash_type_detected = show_custom_labels(model_arn,bucket, filename, min_confidence)
os.remove(filename)
if trash_type_detected is None:
trash_type_detected = 'trash'
if trash_type_detected in SUPPORTED_BINS:
print('SUPPORTED TRASH TYPE!')
bin = bins[trash_type_detected]
servo = GPIO.PWM(bin['servo_pin'], 50)
servo.start(7.5)
sleep(0.1)
print('Opening bin...')
servo.ChangeDutyCycle(DEGREES_180) #open bin
sleep(5) #open for x number of seconds
print('Closing bin...')
servo.ChangeDutyCycle(DEGREES_0) #close bin
sleep(2)
servo.stop()
ultrasound_pins = bin['ultrasound_pins']
ultrasound_value = ultrasound_distance(ultrasound_pins[0], ultrasound_pins[1]) #gets ultrasonic sensor value
percentage = round(((bin_height - ultrasound_value)/bin_height)*100, 2)
mqtt_message = '{{"bin_id": "{}", "trash_type": "{}", "percentage": {} }}'.format(bin_id, trash_type_detected, percentage)
print(mqtt_message)
smartbin.publish("bin/{}/fullness".format(bin_id), mqtt_message, 1)
|
scriptkiddyisme/mysmartbin
|
Raspberry Pi/smartbin.py
|
smartbin.py
|
py
| 8,967 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20649229622
|
import pyswarms as ps
from pyswarms.utils.functions import single_obj as fx
from pyswarms.utils.plotters.plotters import plot_contour, plot_surface
from pyswarms.utils.plotters.formatters import Mesher, Designer
# Run optimizer
options = {'c1': 0.5, 'c2': 0.3, 'w': 0.9}
optimizer = ps.single.GlobalBestPSO(n_particles=10, dimensions=2, options=options)
# historia kosztów i pozycji
pos_history = optimizer.optimize(fx.sphere, iters=50)
# Plot the sphere function's mesh for better plots
m = Mesher(func=fx.sphere,
limits=[(-1,1), (-1,1)])
# Adjust figure limits
d = Designer(limits=[(-1,1), (-1,1), (-0.1,1)],
label=['x-axis', 'y-axis', 'z-axis'])
pos_history_3d = m.compute_history_3d(optimizer.pos_history) # preprocessing
animation3d = plot_surface(pos_history=pos_history_3d,
mesher=m, designer=d,
mark=(0, 0, 0))
animation3d.save('3d.gif', writer='imagemagick', fps=10)
|
igorpustovoy/inteligencja_obliczeniowa
|
lab04/zad3/3.py
|
3.py
|
py
| 960 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27535933658
|
import torch
from torch import nn
import torch.nn.functional as F
from timm.models.layers import to_2tuple, DropPath, trunc_normal_
import math
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class h_sigmoid(nn.Module):
def __init__(self, inplace=True):
super(h_sigmoid, self).__init__()
self.relu = nn.ReLU6(inplace=inplace)
def forward(self, x):
return self.relu(x + 3) / 6
class h_swish(nn.Module):
def __init__(self, inplace=True):
super(h_swish, self).__init__()
self.sigmoid = h_sigmoid(inplace=inplace)
def forward(self, x):
return x * self.sigmoid(x)
class ECALayer(nn.Module):
def __init__(self, channel, gamma=2, b=1, sigmoid=True):
super(ECALayer, self).__init__()
t = int(abs((math.log(channel, 2) + b) / gamma))
k = t if t % 2 else t + 1
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv1d(1, 1, kernel_size=k, padding=k // 2, bias=False)
if sigmoid:
self.sigmoid = nn.Sigmoid()
else:
self.sigmoid = h_sigmoid()
def forward(self, x):
y = self.avg_pool(x)
y = self.conv(y.squeeze(-1).transpose(-1, -2))
y = y.transpose(-1, -2).unsqueeze(-1)
y = self.sigmoid(y)
return x * y.expand_as(x)
class SELayer(nn.Module):
def __init__(self, channel, reduction=4):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel),
h_sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y
class LocalityFeedForward(nn.Module):
def __init__(self, in_dim, out_dim, stride, expand_ratio=4., act='hs+se', reduction=4,
wo_dp_conv=False, dp_first=False):
super(LocalityFeedForward, self).__init__()
hidden_dim = int(in_dim * expand_ratio)
kernel_size = 3
layers = []
layers.extend([
nn.Conv2d(in_dim, hidden_dim, kernel_size=1, stride=1, padding=0, bias=False),
h_swish() if act.find('hs') >= 0 else nn.ReLU6(inplace=True)])
if not wo_dp_conv:
dp = [
nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, kernel_size // 2, groups=hidden_dim, bias=False),
h_swish() if act.find('hs') >= 0 else nn.ReLU6(inplace=True)
]
if dp_first:
layers = dp + layers
else:
layers.extend(dp)
if act.find('+') >= 0:
attn = act.split('+')[1]
if attn == 'se':
layers.append(SELayer(hidden_dim, reduction=reduction))
elif attn.find('eca') >= 0:
layers.append(ECALayer(hidden_dim, sigmoid=attn == 'eca'))
else:
raise NotImplementedError('Activation type {} is not implemented'.format(act))
layers.extend([
nn.Conv2d(hidden_dim, out_dim, 1, 1, 0, bias=False)
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
x = x + self.conv(x)
return x
def window_partition(x, window_size):
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.window_size = window_size
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads))
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w], indexing="ij"))
coords_flatten = torch.flatten(coords, 1)
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
relative_coords = relative_coords.permute(1, 2, 0).contiguous()
relative_coords[:, :, 0] += self.window_size[0] - 1
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1)
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1)
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def extra_repr(self) -> str:
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
def flops(self, N):
flops = 0
flops += N * self.dim * 3 * self.dim
flops += self.num_heads * N * (self.dim // self.num_heads) * N
flops += self.num_heads * N * N * (self.dim // self.num_heads)
flops += N * self.dim * self.dim
return flops
class SwinTransformerBlock(nn.Module):
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm, is_local=True):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
self.is_local = is_local
if min(self.input_resolution) <= self.window_size:
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
if is_local:
self.conv = LocalityFeedForward(dim, dim, 1, mlp_ratio, act='hs+se', reduction=dim // 4)
else:
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if self.shift_size > 0:
H, W = self.input_resolution
img_mask = torch.zeros((1, H, W, 1))
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size)
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def forward(self, x):
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
x_windows = window_partition(shifted_x, self.window_size)
x_windows = x_windows.view(-1, self.window_size * self.window_size, C)
attn_windows = self.attn(x_windows, mask=self.attn_mask)
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W)
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
x = shortcut + self.drop_path(x)
if not self.is_local:
x = x + self.drop_path(self.mlp(self.norm2(x)))
else:
batch_size, num, embed_dim = x.shape
x = x.transpose(1, 2).view(batch_size, embed_dim, H, W)
x = self.conv(x).flatten(2).transpose(1, 2)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
def flops(self):
flops = 0
H, W = self.input_resolution
flops += self.dim * H * W
nW = H * W / self.window_size / self.window_size
flops += nW * self.attn.flops(self.window_size * self.window_size)
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
flops += self.dim * H * W
return flops
class BasicLayer(nn.Module):
def __init__(self, dim, input_resolution, depth, num_heads, window_size, pos_embed,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, is_local=True):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.pos_embed = pos_embed
self.blocks = nn.ModuleList([
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,
is_local=is_local
)
for i in range(depth)])
def forward(self, x):
for j, blk in enumerate(self.blocks):
x = blk(x)
if j == 0:
if self.pos_embed is not None:
x = self.pos_embed(x, self.input_resolution[0], self.input_resolution[1])
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
return flops
def bicubic_upsample(x, H, W):
B, N, C = x.size()
assert N == H * W
x = x.permute(0, 2, 1)
x = x.view(-1, C, H, W)
x = nn.functional.interpolate(x, scale_factor=2, mode='bicubic', align_corners=True)
B, C, H, W = x.size()
x = x.view(-1, C, H * W)
x = x.permute(0, 2, 1)
return x, H, W
def pixel_upsample(x, H, W):
B, N, C = x.size()
assert N == H * W
x = x.permute(0, 2, 1)
x = x.view(-1, C, H, W)
x = nn.PixelShuffle(2)(x)
B, C, H, W = x.size()
x = x.view(-1, C, H * W)
x = x.permute(0, 2, 1)
return x, H, W
class matmul(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x1, x2):
x = x1 @ x2
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.mat = matmul()
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (self.mat(q, k.transpose(-2, -1))) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = self.mat(attn, v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(
self,
input_resolution,
dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm,
is_local=True
):
super().__init__()
self.input_resolution = input_resolution
self.is_local = is_local
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
if not is_local:
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, drop=drop)
else:
self.conv = LocalityFeedForward(dim, dim, 1, mlp_ratio, act='hs+se', reduction=dim // 4)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
if not self.is_local:
x = x + self.drop_path(self.mlp(self.norm2(x)))
else:
batch_size, num, embed_dim = x.shape
cls_token, x = torch.split(x, [1, num - 1], dim=1)
x = x.transpose(1, 2).view(batch_size, embed_dim, self.input_resolution[0], self.input_resolution[1])
x = self.conv(x).flatten(2).transpose(1, 2)
x = torch.cat([cls_token, x], dim=1)
return x
class PosCNN(nn.Module):
def __init__(self, in_chans, embed_dim=768, s=1):
super(PosCNN, self).__init__()
self.proj = nn.Sequential(nn.Conv2d(in_chans, embed_dim, 3, s, 1, bias=True, groups=embed_dim), )
self.s = s
def forward(self, x, H, W):
B, N, C = x.shape
feat_token = x
cnn_feat = feat_token.transpose(1, 2).view(B, C, H, W)
if self.s == 1:
x = self.proj(cnn_feat) + cnn_feat
else:
x = self.proj(cnn_feat)
x = x.flatten(2).transpose(1, 2)
return x
def no_weight_decay(self):
return ['proj.%d.weight' % i for i in range(4)]
class SwinTransGenerator(nn.Module):
def __init__(self, embed_dim=256, bottom_width=8, bottom_height=8, window_size=4, depth=None,
is_local=True, is_peg=True):
super(SwinTransGenerator, self).__init__()
self.bottom_width = bottom_width
self.bottom_height = bottom_height
self.is_local = is_local
self.is_peg = is_peg
self.embed_dim = embed_dim
if depth is None:
depth = [4, 2, 2, 2]
self.window_size = 8
self.l1 = nn.Linear(256, (self.bottom_height * self.bottom_width) * self.embed_dim)
self.layer1 = BasicLayer(
dim=embed_dim,
input_resolution=[self.bottom_height, self.bottom_width],
depth=depth[0], num_heads=4, window_size=window_size,
pos_embed=PosCNN(embed_dim, embed_dim) if is_peg else None,
is_local=is_local
)
self.layer2 = BasicLayer(
dim=embed_dim,
input_resolution=[self.bottom_height * 2, self.bottom_width * 2],
depth=depth[1], num_heads=4, window_size=window_size,
pos_embed=PosCNN(embed_dim, embed_dim) if is_peg else None,
is_local=is_local
)
self.layer3 = BasicLayer(
dim=embed_dim // 4,
input_resolution=[self.bottom_height * 4, self.bottom_width * 4],
depth=depth[2], num_heads=4, window_size=window_size,
pos_embed=PosCNN(embed_dim // 4, embed_dim // 4) if is_peg else None,
is_local=is_local
)
self.layer4 = BasicLayer(
dim=embed_dim // 16,
input_resolution=[self.bottom_height * 8, self.bottom_width * 8],
depth=depth[3], num_heads=4, window_size=window_size,
pos_embed=PosCNN(embed_dim // 16, embed_dim // 16) if is_peg else None,
is_local=is_local
)
self.deconv = nn.Sequential(
nn.Conv2d(self.embed_dim // 16, 1, 1, 1, 0)
)
self.sigmoid = nn.Sigmoid()
if not is_peg:
self.pos_embed_1 = nn.Parameter(
torch.zeros(1, self.bottom_height * self.bottom_width, embed_dim)
)
self.pos_embed_2 = nn.Parameter(
torch.zeros(1, (self.bottom_height * 2) * (self.bottom_width * 2), embed_dim)
)
self.pos_embed_3 = nn.Parameter(
torch.zeros(1, (self.bottom_height * 4) * (self.bottom_width * 4), embed_dim // 4)
)
self.pos_embed_4 = nn.Parameter(
torch.zeros(1, (self.bottom_height * 8) * (self.bottom_width * 8), embed_dim // 16)
)
trunc_normal_(self.pos_embed_1, std=.02)
trunc_normal_(self.pos_embed_2, std=.02)
trunc_normal_(self.pos_embed_3, std=.02)
trunc_normal_(self.pos_embed_4, std=.02)
def forward(self, noise):
x = self.l1(noise)
x = x.reshape(-1, self.bottom_width * self.bottom_height, self.embed_dim)
if not self.is_peg:
x = x + self.pos_embed_1
H, W = self.bottom_height, self.bottom_width
x = self.layer1(x)
x, H, W = bicubic_upsample(x, H, W)
if not self.is_peg:
x = x + self.pos_embed_2
x = self.layer2(x)
x, H, W = pixel_upsample(x, H, W)
if not self.is_peg:
x = x + self.pos_embed_3
x = self.layer3(x)
x, H, W = pixel_upsample(x, H, W)
if not self.is_peg:
x = x + self.pos_embed_4
B, _, C = x.size()
x = self.layer4(x)
x = x.reshape(B, H, W, C).permute(0, 3, 1, 2)
x = self.deconv(x)
x = self.sigmoid(x)
return x
class SwinTransDiscriminator(nn.Module):
def __init__(self,
img_height=64, img_width=64, patch_size=4, in_channel=1,
embed_dim=512, depth: list = None,
num_heads=4, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0.,
attn_drop_rate=0., drop_path_rate=0., hybrid_backbone=None, norm_layer=nn.LayerNorm,
is_local=True, is_peg=True):
super(SwinTransDiscriminator, self).__init__()
self.img_height = img_height
self.img_width = img_width
self.patch_size = patch_size
self.window_size = patch_size
self.is_local = is_local
self.is_peg = is_peg
if depth is None:
depth = [4, 2, 2, 2]
self.PatchEmbed_1 = nn.Conv2d(in_channel, embed_dim // 4, kernel_size=patch_size, stride=patch_size, padding=0)
self.PatchEmbed_2 = nn.Conv2d(in_channel, embed_dim // 4, kernel_size=patch_size, stride=patch_size, padding=0)
self.PatchEmbed_3 = nn.Conv2d(in_channel, embed_dim // 2, kernel_size=patch_size, stride=patch_size, padding=0)
self.initial_height = img_height // patch_size
self.initial_width = img_width // patch_size
if not is_peg:
num_patches_1 = (img_height // patch_size) * (img_width // patch_size)
num_patches_2 = (img_height // (2 * patch_size)) * (img_width // (2 * patch_size))
num_patches_3 = (img_height // (4 * patch_size)) * (img_width // (4 * patch_size))
self.pos_embed_1 = nn.Parameter(torch.zeros(1, num_patches_1, embed_dim // 4))
self.pos_embed_2 = nn.Parameter(torch.zeros(1, num_patches_2, embed_dim // 2))
self.pos_embed_3 = nn.Parameter(torch.zeros(1, num_patches_3, embed_dim))
trunc_normal_(self.pos_embed_1, std=.02)
trunc_normal_(self.pos_embed_2, std=.02)
trunc_normal_(self.pos_embed_3, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
self.blocks_1 = BasicLayer(
dim=embed_dim // 4,
input_resolution=[self.initial_height, self.initial_width],
depth=depth[0], num_heads=4,
window_size=self.window_size,
pos_embed=PosCNN(embed_dim // 4, embed_dim // 4) if is_peg else None,
is_local=is_local
)
self.blocks_2 = BasicLayer(
dim=embed_dim // 2,
input_resolution=[self.initial_height // 2, self.initial_width // 2],
depth=depth[1], num_heads=4,
window_size=self.window_size,
pos_embed=PosCNN(embed_dim // 2, embed_dim // 2) if is_peg else None,
is_local=is_local
)
self.blocks_3 = BasicLayer(
dim=embed_dim,
input_resolution=[self.initial_height // 4, self.initial_width // 4],
depth=depth[2], num_heads=4,
window_size=self.window_size,
pos_embed=PosCNN(embed_dim, embed_dim) if is_peg else None,
is_local=is_local
)
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth[3])]
self.last_block = nn.Sequential(
Block(
input_resolution=[self.initial_height // 4, self.initial_width // 4],
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[0], norm_layer=norm_layer,
is_local=is_local
)
)
self.norm = norm_layer(embed_dim)
self.out = nn.Linear(embed_dim, 1)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
x_1 = self.PatchEmbed_1(x).flatten(2).permute(0, 2, 1)
x_2 = self.PatchEmbed_2(nn.AvgPool2d(2)(x)).flatten(2).permute(0, 2, 1)
x_3 = self.PatchEmbed_3(nn.AvgPool2d(4)(x)).flatten(2).permute(0, 2, 1)
if not self.is_peg:
x_1 = x_1 + self.pos_embed_1
x = self.pos_drop(x_1)
B, _, C = x.size()
x = self.blocks_1(x)
x = x.permute(0, 2, 1).reshape(B, C, self.initial_height, self.initial_width)
x = nn.AvgPool2d(2)(x)
_, _, H, W = x.shape
x = x.flatten(2)
x = x.permute(0, 2, 1)
x = torch.cat([x, x_2], dim=-1)
if not self.is_peg:
x = x + self.pos_embed_2
x = self.blocks_2(x)
_, _, C = x.shape
x = x.permute(0, 2, 1).view(B, C, H, W)
x = nn.AvgPool2d(2)(x)
_, _, H, W = x.shape
x = x.flatten(2).permute(0, 2, 1)
x = torch.cat([x, x_3], dim=-1)
if not self.is_peg:
x = x + self.pos_embed_3
x = self.blocks_3(x)
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
x = self.last_block(x)
x = self.norm(x)
x = self.out(x[:, 0])
return x
def test_dis():
x = torch.randn((16, 1, 64, 64))
d = SwinTransDiscriminator()
out = d(x)
print(out.shape)
def test_gen():
x = torch.randn((16, 256))
g = SwinTransGenerator(embed_dim=256)
out = g(x)
print(out.shape)
if __name__ == '__main__':
test_gen()
test_dis()
|
fym1057726877/Defense
|
TransGAN/TransGanModel.py
|
TransGanModel.py
|
py
| 26,999 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10420483101
|
"""
.. moduleauthor:: Martí Congost <[email protected]>
"""
from typing import Any, Optional, Set, Tuple
from httplib2 import Http
from base64 import urlsafe_b64encode
from json import loads, dumps
from cocktail.modeling import overrides
from .exceptions import CacheKeyError
from .cachekey import CacheKey
from .cachestorage import CacheStorage
from .cacheserializer import CacheSerializer
from .picklecacheserializer import Base64PickleCacheSerializer
from .scope import whole_cache, Scope
ENCODING = "utf-8"
class RESTCacheStorage(CacheStorage):
def __init__(
self,
address: str,
serializer: Optional[CacheSerializer] = None):
self.__address = address.rstrip("/")
if serializer is None:
serializer = Base64PickleCacheSerializer()
self.__serializer = serializer
@property
def address(self) -> str:
return self.__address
@property
def serializer(self) -> CacheSerializer:
return self.__serializer
def _key_request(self, key: str, *args, **kwargs) -> str:
url = (
self.__address
+ "/keys/"
+ urlsafe_b64encode(key.encode(ENCODING)).decode(ENCODING)
)
extra_path = kwargs.pop("extra_path", None)
if extra_path:
url += "/" + extra_path
http = Http()
response, content = http.request(url, *args, **kwargs)
if (400 <= response.status < 500):
raise CacheKeyError(key)
if content and response.get("content-type") == "application/json":
content = loads(content.decode(ENCODING))
return content
@overrides(CacheStorage.exists)
def exists(self, key: CacheKey) -> bool:
try:
self._key_request(key, "HEAD")
except CacheKeyError:
return False
else:
return True
@overrides(CacheStorage.retrieve)
def retrieve(self, key: CacheKey) -> Any:
value = self._key_request(key, "GET", extra_path = "value")
return self.serializer.unserialize(value)
@overrides(CacheStorage.retrieve_with_metadata)
def retrieve_with_metadata(
self,
key: CacheKey) -> Tuple[Any, int, Set[str]]:
data = self._key_request(key, "GET")
return (
self.serializer.unserialize(data["value"].encode(ENCODING)),
data["expiration"],
data["tags"]
)
@overrides(CacheStorage.store)
def store(
self,
key: CacheKey,
value: Any,
expiration: Optional[int] = None,
tags: Optional[Set[str]] = None):
self._key_request(
key,
"POST",
headers = {
"Content-Type": "application/json"
},
body = dumps({
"value": self.__serializer.serialize(value).decode(ENCODING),
"expiration": expiration,
"tags": None if tags is None else list(tags)
})
)
@overrides(CacheStorage.get_expiration)
def get_expiration(self, key: CacheKey) -> Optional[int]:
return self._key_request(key, "GET", extra_path = "expiration")
@overrides(CacheStorage.set_expiration)
def set_expiration(self, key: CacheKey, expiration: Optional[int]):
self._key_request(
key + "/expiration",
"POST",
headers = {
"Content-Type": "application/json"
},
body = dumps(expiration)
)
@overrides(CacheStorage.discard)
def discard(self, key: CacheKey) -> bool:
try:
self._key_request(key, "DELETE")
except CacheKeyError:
return False
else:
return True
@overrides(CacheStorage.clear)
def clear(self, scope: Scope = whole_cache):
url = self.__address + "/clear"
http = Http()
response, content = http.request(
url,
"POST",
headers = {
"Content-Type": "application/json"
},
body = dumps(
None if scope is whole_cache
else list(scope)
)
)
|
marticongost/cocktail
|
cocktail/caching/restcachestorage.py
|
restcachestorage.py
|
py
| 4,258 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9988555176
|
import traceback, re, json, logging
from ..file_utilities.filepath import Filepath
from ..entitlements.entitlement_manager import Entitlement_Manager
from .file_manager import File_Manager
from ..client_config import COLLECTIONS_WITH_BAD_LEVEL_IMAGES, UNLOCK_ALL_BUDDIES
from .. import shared
logger_errors = logging.getLogger('VIM_errors')
logger = logging.getLogger('VIM_main')
logger_inv = logging.getLogger('VIM_inventory')
class Buddy_Manager:
@staticmethod
def generate_blank_buddy_database():
if shared is not None:
client = shared.client
weapon_data = client.all_weapon_data
payload = {}
File_Manager.update_individual_inventory(payload, "buddies")
@staticmethod
async def update_inventory(**kwargs):
payload = json.loads(kwargs.get("payload"))
buddy_uuid = payload["buddyUuid"]
new_data = payload["newData"]
inventory = File_Manager.fetch_individual_inventory()["buddies"]
for uuid,buddy in inventory.items():
if uuid == buddy_uuid:
inventory[uuid] = new_data
break
File_Manager.update_individual_inventory(inventory, "buddies")
await shared.client.broadcast_loadout()
return inventory
@staticmethod
async def favorite_all(**kwargs):
payload = json.loads(kwargs.get("payload"))
favorite = payload["favorite"]
inventory = File_Manager.fetch_individual_inventory()["buddies"]
for uuid,buddy in inventory.items():
for instance_uuid,instance in buddy["instances"].items():
if not instance["locked"]:
instance["favorite"] = favorite
File_Manager.update_individual_inventory(inventory, "buddies")
await shared.client.broadcast_loadout()
return inventory
@staticmethod
def refresh_buddy_inventory():
valclient = shared.client.client
client = shared.client
old_data = None
try:
old_data = File_Manager.fetch_individual_inventory()["buddies"]
except KeyError:
old_data = None
except Exception as e:
logger_errors.error(traceback.format_exc())
logger.debug("making fresh buddy database")
Buddy_Manager.generate_blank_skin_database()
buddy_entitlements = Entitlement_Manager.fetch_entitlements(valclient, "buddy")["Entitlements"]
sanitized_buddy_entitlements = {}
for entitlement in buddy_entitlements:
if not entitlement["ItemID"] in sanitized_buddy_entitlements.keys():
sanitized_buddy_entitlements[entitlement["ItemID"]] = []
sanitized_buddy_entitlements[entitlement["ItemID"]].append(entitlement["InstanceID"])
inventory = {}
# iterate through each buddy
for buddy in client.all_buddy_data:
buddy_owned = False
owned_level_id = ""
levels = [level["uuid"] for level in buddy["levels"]]
if UNLOCK_ALL_BUDDIES:
buddy_owned = True
for level in levels:
if level in sanitized_buddy_entitlements.keys():
buddy_owned = True
owned_level_id = level
break
if buddy_owned:
buddy_payload = {}
existing_buddy_data = None
if old_data is not None:
try:
existing_buddy_data = old_data[buddy["uuid"]]
except:
pass
buddy_payload["display_name"] = buddy["displayName"]
buddy_payload["uuid"] = buddy["uuid"]
buddy_payload["display_icon"] = buddy["displayIcon"]
buddy_payload["level_uuid"] = owned_level_id
buddy_payload["instance_count"] = len(sanitized_buddy_entitlements[owned_level_id])
buddy_payload["instances"] = {}
for instance in sanitized_buddy_entitlements[owned_level_id]:
try:
buddy_payload["instances"][instance] = {
"uuid": instance,
"favorite": existing_buddy_data["instances"][instance]["favorite"] if existing_buddy_data is not None else False,
"super_favorite": existing_buddy_data["instances"][instance]["super_favorite"] if existing_buddy_data is not None else False,
"locked": existing_buddy_data["instances"][instance]["locked"] if existing_buddy_data is not None else False,
"locked_weapon_uuid": existing_buddy_data["instances"][instance]["locked_weapon_uuid"] if existing_buddy_data is not None else "",
"locked_weapon_display_name": existing_buddy_data["instances"][instance]["locked_weapon_display_name"] if existing_buddy_data is not None else "",
}
# remove me later
except:
buddy_payload["instances"][instance] = {
"uuid": instance,
"favorite": False,
"super_favorite": False,
"locked": False,
"locked_weapon_uuid": "",
"locked_weapon_display_name": "",
}
# check for invalid favorite/lock combinations
for instance in buddy_payload["instances"].values():
if instance["locked"]:
instance["favorite"] = False
if instance["locked_weapon_uuid"] == "" or instance["locked_weapon_display_name"] == "":
instance["locked"] = False
instance["locked_weapon_uuid"] = ""
instance["locked_weapon_display_name"] = ""
inventory[buddy["uuid"]] = buddy_payload
sort = sorted(inventory.items(), key=lambda x: x[1]["display_name"].lower())
inventory = {k: v for k, v in sort}
logger_inv.debug(f"buddy inventory:\n{json.dumps(inventory)}")
File_Manager.update_individual_inventory(inventory,"buddies")
return True
|
colinhartigan/valorant-inventory-manager
|
server/src/inventory_management/buddy_manager.py
|
buddy_manager.py
|
py
| 6,398 |
python
|
en
|
code
| 150 |
github-code
|
6
|
41345912194
|
import json
from functools import wraps
import requests
from service_now_api_sdk.settings import (
SERVICENOW_API_PASSWORD,
SERVICENOW_API_TOKEN,
SERVICENOW_API_USER,
SERVICENOW_URL,
)
def headers_replace(f):
@wraps(f)
def decorated_function(*args, **kwargs):
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
}
if SERVICENOW_API_TOKEN:
headers["Authorization"] = (f"Bearer {SERVICENOW_API_TOKEN}",)
if kwargs.get("headers"):
headers = {**headers, **kwargs.get["headers"]}
kwargs["headers"] = headers
return f(*args, **kwargs)
return decorated_function
class Client:
base_url = SERVICENOW_URL
default_path = ""
@headers_replace
def __http_request(
self,
method: str,
path: str,
headers: dict = None,
data=None,
params: dict = None,
timeout: int = None
):
if data is None:
data = {}
if params is None:
params = {}
if SERVICENOW_API_TOKEN:
return requests.request(
method=method,
url=f"{self.base_url}/{path}",
headers=headers,
data=json.dumps(data),
params=params,
timeout=timeout
)
if SERVICENOW_API_USER and SERVICENOW_API_PASSWORD:
return requests.request(
method=method,
url=f"{self.base_url}/{path}",
headers=headers,
data=json.dumps(data),
params=params,
auth=(SERVICENOW_API_USER, SERVICENOW_API_PASSWORD),
timeout=timeout
)
def post(
self, path: str, headers: dict = None, data: dict = None, params: dict = None, timeout: int = None
):
return self.__http_request(
method="POST", path=path, headers=headers, data=data, params=params, timeout=timeout
)
def get(self, path: str, headers: dict = None, params: dict = None, timeout: int = None):
return self.__http_request(
method="GET", path=path, headers=headers, params=params, timeout=timeout
)
def put(
self, path: str, headers: dict = None, data: dict = None, params: dict = None, timeout: int = None
):
return self.__http_request(
method="PUT", path=path, headers=headers, data=data, params=params, timeout=timeout
)
def patch(
self, path: str, headers: dict = None, data: dict = None, params: dict = None, timeout: int = None
):
return self.__http_request(
method="PATCH", path=path, headers=headers, data=data, params=params, timeout=timeout
)
def delete(self, path: str, headers: dict = None, data: dict = None, timeout: int = None):
return self.__http_request(
method="DELETE", path=path, headers=headers, data=data, timeout=timeout
)
|
people-analytics-tech/service-now-api-sdk
|
service_now_api_sdk/sdk/servicenow/helpers/client.py
|
client.py
|
py
| 3,159 |
python
|
en
|
code
| 1 |
github-code
|
6
|
20281068214
|
op = 'S'
num = []
cont5 = 0
while True:
if op in 'Nn':
print(f'Foram digitados {len(num)} valores: {num}')
num.sort(reverse = True)
print(f'Lista de valores ordenada de forma decrescente: {num}')
if 5 in num:#verifica se tem o valor 5 na lista
print('O valor 5 foi encontrado na lista.')
else:
print('O valor 5 nao foi encontrado na lista')
break
else:
num.append(int(input('Digite um numero: ')))
op = str(input('Quer Continuar?[S/N] '))
|
JoooNatan/CursoPython
|
Mundo03/Exs/Ex081.py
|
Ex081.py
|
py
| 536 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
41061708200
|
from PySide6.QtWidgets import (
QWidget,
QToolBar,
QLabel,
QLineEdit,
QTextEdit,
QVBoxLayout,
QHBoxLayout,
)
import core.terminal_commands as tc
class WidgetGitUtils(QWidget):
"""
A custom QWidget that provides a user interface for Git utilities.
This widget contains a toolbar with actions for generating local and global Git configurations,
as well as resetting the configuration. It also has input fields for entering a username and email,
and a read-only text field for displaying output.
"""
def __init__(self):
"""
Initializes the WidgetGitUtils instance.
This method creates the user interface elements and adds them to the layout.
"""
super().__init__()
self._git_utils_toolbar = QToolBar()
self._git_utils_toolbar.addAction(
"Generate Local Config", self.generate_local_config
)
self._git_utils_toolbar.addAction(
"Generate Global Config", self.generate_global_config
)
self._git_utils_toolbar.addAction("Reset", self.reset)
self._username_label = QLabel("Username")
self._email_label = QLabel("Email")
self._username_line_edit = QLineEdit()
self._email_line_edit = QLineEdit()
self._username_pair = QHBoxLayout()
self._username_pair.addWidget(self._username_label)
self._username_pair.addWidget(self._username_line_edit)
self._username_widget = QWidget()
self._username_widget.setLayout(self._username_pair)
self._email_pair = QHBoxLayout()
self._email_pair.addWidget(self._email_label)
self._email_pair.addWidget(self._email_line_edit)
self._email_widget = QWidget()
self._email_widget.setLayout(self._email_pair)
self._text_edit = QTextEdit()
self._text_edit.setReadOnly(True)
self._main_layout = QVBoxLayout()
self._main_layout.addWidget(self._git_utils_toolbar)
self._main_layout.addWidget(self._username_widget)
self._main_layout.addWidget(self._email_widget)
self._main_layout.addWidget(self._text_edit)
self.setLayout(self._main_layout)
def generate_local_config(self):
"""
Generates local Git configuration commands.
This method retrieves the username and email entered in the input fields,
and uses them to generate Git configuration commands for setting the local
user.name and user.email. The generated commands are displayed in the read-only
text field.
"""
username: str = self._username_line_edit.text().strip()
email: str = self._email_line_edit.text().strip()
if len(username) > 0 and len(email) > 0:
result: str = tc.generate_git_config_commands(
username, email, is_global=False
)
self._text_edit.setPlainText(result)
def generate_global_config(self):
"""
Generates global Git configuration commands.
This method retrieves the username and email entered in the input fields,
and uses them to generate Git configuration commands for setting the global
user.name and user.email. The generated commands are displayed in the read-only
text field.
"""
username: str = self._username_line_edit.text().strip()
email: str = self._email_line_edit.text().strip()
if len(username) > 0 and len(email) > 0:
result: str = tc.generate_git_config_commands(
username, email, is_global=True
)
self._text_edit.setPlainText(result)
def reset(self):
"""
Resets the input fields and text field.
This method clears the text in the username and email input fields,
as well as the read-only text field.
"""
self._username_line_edit.setText("")
self._email_line_edit.setText("")
self._text_edit.setPlainText("")
|
sanyokkua/dev_common_tools_py
|
ui/widgets/widget_git_utils.py
|
widget_git_utils.py
|
py
| 4,017 |
python
|
en
|
code
| 1 |
github-code
|
6
|
33480868557
|
from django.shortcuts import render
from .models import Hardware, Software, Employees
from rest_framework import generics
from .serializers import HardwareSerializer, SoftwareSerializer, EmployeesSerializer
from django.db.models.query import Q
# Create your views here.
class CreateHardware(generics.CreateAPIView):
QuerySet = Hardware.objects.all(),
serializer_class = HardwareSerializer
class UpdateHardware(generics.RetrieveUpdateAPIView):
QuerySet = Hardware.objects.all(),
serializer_class = HardwareSerializer
class DeleteHardware(generics.RetrieveDestroyAPIView):
QuerySet = Hardware.objects.all(),
serializer_class = HardwareSerializer
class ListHardware(generics.ListAPIView):
# queryset = Hardware.objects.all(),
serializer_class = HardwareSerializer
def get_queryset(self):
qs = Hardware.objects.all()
qs = qs.filter(~Q(pk__in = '5'))
qs = qs.exclude(name = '')
#qs = [q for q in qs if q.name != '']
#qs = qs.filter(Q('name') != '')
# query = self.request.GET.get('q')
# if query is not None:
# qs = qs.filter().distinct()
return qs
class DetailHardware(generics.RetrieveAPIView):
QuerySet = Hardware.objects.all(),
serializer_class = HardwareSerializer
|
vuedatavivek/productsample
|
crm_project/organization/views.py
|
views.py
|
py
| 1,292 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44663849656
|
import re
import sys
def parse_word(w):
return w.replace(" ","_")
def parse_word_contained(w):
x = re.match("(\d+) (\w+ \w+) bags?",w)
if x is None:
print(w)
num = x.group(1)
word = parse_word(x.group(2))
return (word,num)
def parse_contained(str):
if str == "no other bags":
return []
lst = str.split(', ')
return list(map(parse_word_contained,lst))
def extract_color(contained):
return [contained[0]] * int(contained[1])
def gen_rule(container,contained):
just_colors = list(map(extract_color, contained))
just_colors = [x for sub_list in just_colors for x in sub_list]
color_text = '[' + ', '.join(just_colors) +']'
return "in({}, {}).".format(container,color_text)
if len(sys.argv) < 2:
print("Usage: day7.py <day7_input_filename>")
exit(-1)
filename = sys.argv[1]
f = open (filename,"r")
lines = f.readlines()
for line in lines:
m = re.match("(\w+ \w+) bags contain (.*).",line)
if m is None:
continue
container = parse_word(m.group(1))
contained = parse_contained(m.group(2))
print(gen_rule(container,contained))
print("""
color_in(X,Y) :- in(X,Z), member(Y,Z).
:- table transitive_in/2.
transitive_in(X,Y) :- color_in(X,Y).
transitive_in(X,Y) :- transitive_in(X,Z), transitive_in(Z,Y).
expand([],[]).
expand([BAG|BAG_LIST],EXPANSION) :-
expand(BAG_LIST,LIST_EXPANSION),
in(BAG,CONTENTS),
append(CONTENTS,LIST_EXPANSION,EXPANSION).
tracing_transitive_expand(X,[],[]) :- expand(X,[]).
tracing_transitive_expand(X,Y,TRACE) :-
expand(X,Z),
tracing_transitive_expand(Z,Y,TRACE1),
append(Z,TRACE1,TRACE).
size(X,Z) :-
tracing_transitive_expand(X,_,TRACE),
length(TRACE,Z).
""")
|
smagill/aoc2020
|
day7.py
|
day7.py
|
py
| 1,730 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22416435881
|
import sys
import numpy as np
class gridmap2d(object):
"""
@brief 2D matrix for grid map
@param mapsize: (width, height) of the 2d grid map; unit is m
@param resolution: unit is m
@param dtype: data type
"""
def __init__(self, mapsize = (50.0, 50.0),
resolution = 0.1,
probrange = (-20.0, 120.0),
dtype = np.float32):
self.mapsize = mapsize
self.resolution = resolution
self.dtype = dtype
self.probrange = probrange
self.width = int(self.mapsize[0] / self.resolution) + 1
self.height = int(self.mapsize[1] / self.resolution) + 1
self.mapdata = np.zeros((self.width, self.height), dtype = self.dtype)
def world2pixel(self, world_location):
res = world_location / self.resolution
return res.astype(int)
def pixel2world(self, pixel_location):
res = pixel_location * self.resolution
return res.astype(np.float32)
def get_prob_by_pixel(self, key):
if key[0] < 0 or key[1] < 0 or key[0] >= self.width or key[1] >= self.height:
print(key, 'is out of boundary ', self.width, self.height)
row = max(0, key[0])
row = min(self.width - 1, key[0])
col = max(0, key[1])
col = min(self.height - 1, key[1])
return self.mapdata[row, col]
def get_prob_by_world(self, key):
return self.get_prob_by_pixel(self.world2pixel(key))
def set_prob_by_pixel(self, key, value):
if key[0] < 0 or key[1] < 0 or key[0] >= self.width or key[1] >= self.height:
print(key, 'is out of boundary ', self.width, self.height)
row = max(0, key[0])
row = min(self.width - 1, key[0])
col = max(0, key[1])
col = min(self.height - 1, key[1])
self.mapdata[row, col] = value
def set_prob_by_world(self, key, value):
self.set_prob_by_pixel(self.world2pixel(key), value)
|
democheng/PythonRobotics
|
SLAM/gridmap2d.py
|
gridmap2d.py
|
py
| 1,980 |
python
|
en
|
code
| 15 |
github-code
|
6
|
37366659638
|
#!/usr/bin/env python3
from pylab import *
from numpy import *
import matplotlib.cm as cm
from common import *
idx_vec = range(1, num_k+1)
if with_FVD_solution == True :
if num_k > 1 :
fig, ax = plt.subplots(2, num_k, figsize=(9, 5.5))
else :
fig, ax = plt.subplots(1, 2, figsize=(9, 5.5))
fig.suptitle('Eigenfunctions, %s' % task_name)
else :
fig, ax = plt.subplots(1, num_k, figsize=(9, 5.5))
fig.suptitle('Eigenfunctions, %s' % task_name)
tot_min = -0.3
tot_max = 0.3
if with_FVD_solution :
for i in range(len(idx_vec)) :
if conjugated_eigvec_flag == 1 :
data_file = open('../%s/data/%s_FVD_%d_conjugated.txt' % (working_dir_name, eig_file_name_prefix, idx_vec[i]), 'r')
else :
data_file = open('../%s/data/%s_FVD_%d.txt' % (working_dir_name, eig_file_name_prefix, idx_vec[i]), 'r')
xmin, xmax, nx = [ float (x) for x in data_file.readline().split() ]
ymin, ymax, ny = [ float (x) for x in data_file.readline().split() ]
Z = np.loadtxt(data_file)
x = np.linspace(xmin, xmax, int(nx))
y = np.linspace(ymin, ymax, int (ny))
if num_k > 1 :
fvd_ax = ax[0, i]
else :
fvd_ax = ax[i]
im = fvd_ax.imshow( Z , cmap=cm.jet, extent = [xmin, xmax, ymin, ymax], vmin=tot_min , vmax=tot_max , origin='lower', interpolation='none' )
fvd_ax.set_title('FVD, %dth' % (idx_vec[i]))
if i == 0:
yticks(np.linspace(xmin, xmax, 5))
else :
plt.setp(fvd_ax.get_yticklabels(), visible=False)
sign_list = [1 for i in range(num_k)]
sign_list[0] = 1
if num_k > 1 :
sign_list[1] = -1
if num_k > 2 :
sign_list[2] = -1
for i in range(len(idx_vec)) :
base_name = '../%s/data/%s' % (working_dir_name, eig_file_name_prefix)
if conjugated_eigvec_flag == 1 :
data_file = open('%s_%d_conjugated.txt' % (base_name, idx_vec[i]), 'r')
else :
data_file = open('%s_%d.txt' % (base_name, idx_vec[i]), 'r')
xmin, xmax, nx = [ float (x) for x in data_file.readline().split() ]
ymin, ymax, ny = [ float (x) for x in data_file.readline().split() ]
Z = np.loadtxt(data_file, skiprows=0)
x = np.linspace(xmin, xmax, int (nx))
y = np.linspace(ymin, ymax, int (ny))
X, Y = np.meshgrid(x,y)
# tot_min = Z.min()
# tot_max = Z.max()
# print (tot_min, tot_max)
if with_FVD_solution :
if num_k > 1 :
nn_ax = ax[1, i]
else :
nn_ax = ax[num_k+i]
else :
if num_k > 1 :
nn_ax = ax[i]
else :
nn_ax = ax
im = nn_ax.imshow( sign_list[i] * Z , cmap=cm.jet, extent = [xmin, xmax, ymin, ymax], vmin=tot_min , vmax=tot_max , origin='lower', interpolation='none' )
nn_ax.set_title('NN, %dth' % (idx_vec[i]) )
if i == 0:
yticks(np.linspace(xmin, xmax, 5))
else :
plt.setp(nn_ax.get_yticklabels(), visible=False)
cax = fig.add_axes([0.92, 0.12, .04, 0.79])
#fig.colorbar(im, cax=cax, orientation='horizontal',cmap=cm.jet)
fig.colorbar(im, cax=cax, cmap=cm.jet)
#cax.tick_params(labelsize=10)
base_name = '../%s/fig/eigvec_nn_and_FVD' % (working_dir_name)
if conjugated_eigvec_flag == 1 :
fig_name = '%s_%d_conjugated.eps' % (base_name, num_k)
else :
fig_name = '%s_%d.eps' % (base_name, num_k)
savefig(fig_name)
print ("output figure: %s" % fig_name)
|
zwpku/EigenPDE-NN
|
plot_scripts/plot_2d_evs_nn_and_FVD.py
|
plot_2d_evs_nn_and_FVD.py
|
py
| 3,318 |
python
|
en
|
code
| 3 |
github-code
|
6
|
457933717
|
'''
rest_framework reverse 补丁
'''
from rest_framework import relations
original_reverse = relations.reverse
def hack_reverse(alias, **kwargs):
namespace = kwargs['request'].resolver_match.namespace
if bool(namespace):
name = "%s:%s" % (namespace, alias)
return original_reverse(name, **kwargs)
else:
return original_reverse(alias, **kwargs)
relations.reverse = hack_reverse
original_resolve = relations.resolve
def hack_resolve(path, urlconf=None):
match = original_resolve(path, urlconf=urlconf)
if bool(match.app_name):
preffix = match.app_name + ':'
if match.view_name.startswith(preffix):
match.view_name = match.view_name[len(preffix):]
return match
relations.resolve = hack_resolve
|
dowhilefalse/Donation-Platform
|
api/__init__.py
|
__init__.py
|
py
| 771 |
python
|
en
|
code
| 3 |
github-code
|
6
|
23873826885
|
import cv2
import time
import numpy as np
import supervision as sv#this is a Roboflow open source libray
from ultralytics import YOLO
from tqdm import tqdm #this is a tool for visualising progress bars in console. Remove for production code as might slow things down
COLORS = sv.ColorPalette.default()
#Define entry and exit areas on image (got the cordinates by drawing zones using https://blog.roboflow.com/polygonzone/)
#Zone_in is garden bottom half and front of house bottom half - red colour
ZONE_IN_POLYGONS = [
np.array([[640, 154],[0, 242],[0, 360],[640, 360]]),
np.array([[650, 162],[986, 158],[990, 360],[646, 360]]),
]
#Zone_out is garden top half and front of house top half - green colour
ZONE_OUT_POLYGONS = [
np.array([[642, 0],[978, 0],[982, 142],[654, 146]]),
np.array([[0, 0],[634, 0],[638, 146],[2, 222]]),
]
def initiate_poylgon_zones(polygons:list[np.ndarray],frame_resolution_wh:tuple[int,int],triggering_position:sv.Position=sv.Position.CENTER)->list[sv.PolygonZone]:
return[sv.PolygonZone(polygon,frame_resolution_wh,triggering_position)for polygon in polygons]
class DetectionsManager:
def __init__(self) -> None:
self.tracker_id_to_zone_id: Dict[int, str] = {}
self.total_count: int = 5
#update function takes the list of detections triggered by a zone and maps the tracker ID to either in or out
def update(self,detections: sv.detection, detections_zone_in: list[sv.detection], detections_zone_out: list[sv.detection]) -> sv.detection:
for detection in detections_zone_in:
#print('Zone in detection ', detection)
if np.any(detection.tracker_id):#this tests if there are any tracker id's. If not the for loop below crashes
for tracker_id in detection.tracker_id:
if tracker_id in self.tracker_id_to_zone_id:
#print(self.tracker_id_to_zone_id[tracker_id])
if self.tracker_id_to_zone_id[tracker_id] == 'out':#if current value is out then this detection has crossed zones
self.total_count += 1 #add one to the count as an 'out' has become an 'in'
self.tracker_id_to_zone_id[tracker_id] = 'in' # and update zone in dictionary to reflect this
else:
self.tracker_id_to_zone_id[tracker_id] = 'in' #this means tracker ID is new so add to the dictionary
for detection in detections_zone_out:
#print('Zone out detections ', detection)
if np.any(detection.tracker_id): #this tests if there are any tracker id's. If not the for loop below crashes
for tracker_id in detection.tracker_id:
if tracker_id in self.tracker_id_to_zone_id:
#print(self.tracker_id_to_zone_id[tracker_id])
if self.tracker_id_to_zone_id[tracker_id] == 'in':#if current value is in then this detection has crossed zones
self.total_count -= 1 #minus one to the count as an 'in' has become an 'out'
self.tracker_id_to_zone_id[tracker_id] = 'out' # and update zone in dictionary to reflect this
else:
self.tracker_id_to_zone_id[tracker_id] = 'out' #this means tracker ID is new so add to the dictionary
#Need new statement which filters the detections so it only shows those from within a zone - although not sure that matters for this use case as zones cover whole field of view
#detections.class_id = np.vectorize(lambda x: self.tracker_id_to_zone_id.get(x, -1))(detections.tracker_id)#i don't understand what this is doing so need to come back to it
return self.total_count
class VideoProcessor:
def __init__(self, source_weights_path: str, source_video_path: str, target_video_path: str = None,
confidence_threshold: float = 0.1, iou_threshold: float = 0.7,) -> None:
self.source_weights_path = source_weights_path
self.conf_threshold = confidence_threshold
self.iou_threshold = iou_threshold
self.source_video_path = source_video_path
self.target_video_path = target_video_path
self.model = YOLO(self.source_weights_path)
self.tracker = sv.ByteTrack()
self.box_annotator = sv.BoxAnnotator(color=COLORS)
self.trace_annotator = sv.TraceAnnotator(color=COLORS, position=sv.Position.CENTER, trace_length=100, thickness=2)
self.video_info = sv.VideoInfo.from_video_path(source_video_path)
self.video_info.fps = 25 # setting the frames per second for writing the video to 25 instead of 30 as original cameras are at 25fps
print(self.video_info)
self.zone_in = initiate_poylgon_zones(ZONE_IN_POLYGONS,self.video_info.resolution_wh,sv.Position.CENTER)
self.zone_out = initiate_poylgon_zones(ZONE_OUT_POLYGONS,self.video_info.resolution_wh,sv.Position.CENTER)
self.detections_manager = DetectionsManager()
def process_video(self):
frame_generator = sv.get_video_frames_generator(self.source_video_path)
if self.target_video_path:
with sv.VideoSink(self.target_video_path, self.video_info) as f:
for frame in tqdm(frame_generator, total=self.video_info.total_frames):
t1 = cv2.getTickCount()
processed_frame = self.process_frame(frame)
t2 = cv2.getTickCount()
ticks_taken = (t2 - t1) / cv2.getTickFrequency()
FPS = 1 / ticks_taken
cv2.putText(processed_frame, 'FPS: {0:.2f}'.format(FPS), (30, 50), cv2.FONT_HERSHEY_SIMPLEX, 1,(255, 255, 0), 2, cv2.LINE_AA)
f.write_frame(processed_frame)
else:
for frame in frame_generator:
t1 = cv2.getTickCount()
processed_frame = self.process_frame(frame)
t2 = cv2.getTickCount()
ticks_taken = (t2 - t1) / cv2.getTickFrequency()
FPS = 1 / ticks_taken
cv2.putText(processed_frame,'FPS: {0:.2f}'.format(FPS), (30, 50), cv2.FONT_HERSHEY_SIMPLEX, 1,(255, 255, 0), 2, cv2.LINE_AA)
cv2.imshow("Count of Customers Indoors", processed_frame)
if cv2.waitKey(1) & 0xFF ==ord("q"):
break
cv2.destroyAllWindows()
def process_frame(self,frame: np.ndarray)-> np.ndarray:
#consider resizing the frame tp 180x640 for both training and inference to see of this speeds things up
result = self.model(frame, verbose = False, conf=self.conf_threshold,iou=self.iou_threshold)[0]#add the device parameter to run this on the Mac's GPU which sognificantly speeds up inference
detections = sv.Detections.from_ultralytics(result)#pass the YOLO8 inference results through supervision to use their detections object which is easier to process
detections = detections[detections.class_id == 0]#filter the list of detections so it only shows category '0' which is people
detections = self.tracker.update_with_detections(detections)#pass the detections through the tracker to add tracker ID as additional field to detections object
#filter out detections not triggered within a zone and add the deteections to lists for zone in and zone out
detections_zone_in = []
detections_zone_out = []
for zone_in, zone_out in zip(self.zone_in,self.zone_out):
detection_zone_in = detections[zone_in.trigger(detections)]#this is an Supervision function to test if a detection occured within a zone
detections_zone_in.append(detection_zone_in)
detection_zone_out = detections[zone_out.trigger(detections)]#this is an Supervision function to test if a detection occured within a zone
detections_zone_out.append(detection_zone_out)
total_count = self.detections_manager.update(detections,detections_zone_in,detections_zone_out)#call to the detections manager class 'rules engine' for working out which zone a detection was triggered in
return self.annotate_frame(frame,detections,total_count)
def annotate_frame(self,frame: np.ndarray, detections: sv.Detections,total_count:int)-> np.ndarray:
annotated_frame = frame.copy()
for i,(zone_in,zone_out) in enumerate(zip(self.zone_in,self.zone_out)):#use enumerate so you get the index [i] automatically
annotated_frame = sv.draw_polygon(annotated_frame,zone_in.polygon,COLORS.colors[0])#draw zone in polygons
annotated_frame = sv.draw_polygon(annotated_frame,zone_out.polygon,COLORS.colors[1])#draw zone out polygons
if detections:#need to check some detections are found before adding annotations, otherwise list comprehension below breaks
labels = [f"#{tracker_id}" for tracker_id in detections.tracker_id]#list comprehension to return list of tracker_ID's to use in label
annotated_frame = self.box_annotator.annotate(annotated_frame,detections,skip_label=True)#add in labels = labels if want tracker ID annotated on frame
annotated_frame = self.trace_annotator.annotate(annotated_frame,detections)
annotated_frame = sv.draw_text(scene=annotated_frame, text="Count of People Currently In", text_anchor=sv.Point(x=1130, y=150), text_scale=0.6, text_thickness=1,background_color=COLORS.colors[0])
annotated_frame = sv.draw_text(scene=annotated_frame,text=str(total_count),text_anchor=sv.Point(x=1118, y=226),text_scale=2,text_thickness=5,background_color=COLORS.colors[0],text_padding=40)
return annotated_frame
processor = VideoProcessor(
source_weights_path='yolov8nPeopleCounterV2.pt',
source_video_path='/Users/tobieabel/Desktop/video_frames/Youtube/v3_a demo.mp4',
#target_video_path='/Users/tobieabel/Desktop/video_frames/Youtube/v3_b demo_annotated.mp4',
)
processor.process_video()
|
tobieabel/demo-v3-People-Counter
|
Demo v3.py
|
Demo v3.py
|
py
| 10,021 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13954591653
|
'''● Realizar una función, tal que resuelva el cuadrado de los N primeros números
naturales.
● Realizar una función, tal que realice una sumatoria desde 1 hasta un número
N ingresado por el usuario.
● Realizar una función, tal que realice el factorial de un número N ingresado
por el usuario.'''
def factorial(x):
i=x
while i >1:
x = x * (i-1)
i=i-1
print("El numero es:", x)
return x
def cuadrados(y):
i=0
while i!=y+1:
print(i**2)
i+=1
return cuadrados
def sumatoria(z):
i=0
suma=0
while i!=z+1:
suma=suma+i
i+=1
return suma
x=factorial(int(input("Ingrese un X\n")))
print(x)
y=cuadrados(int(input("Ingrese un Y\n")))
print(y)
z=sumatoria(int(input("Ingrese un z\n")))
print(z)
|
eSwayyy/UCM-projects
|
python/lab/ppt9_(funciones)/ejercicio2_ppt9.py
|
ejercicio2_ppt9.py
|
py
| 788 |
python
|
es
|
code
| 1 |
github-code
|
6
|
28792809187
|
scores = input("enter list of student scores: ").split()
for n in range(0, len(scores)):
scores[n] = int(scores[n])
maxScore = 0
for score in scores:
if score > maxScore:
maxScore = score
print("the max score is : ",maxScore)
|
Mohamed-Rirash/100-days-python-challenge
|
day5/heiest_score.py
|
heiest_score.py
|
py
| 245 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3836899158
|
from benchmark_task_manager import *
import itertools
iteration = 1
TM = [0,2]
toggle = itertools.cycle(TM)
while True:
t1 = time.time()
z = next(toggle)
eval('TaskManager{0}()._schedule()'.format(z))
groupid = z
elapsed = time.time() - t1
with open("tm_dump", "w") as fid:
fid.write("{0},{1},{2}".format(elapsed, groupid, iteration))
iteration += 1
time.sleep(1)
|
fosterseth/awx-junk-drawer
|
serve_TM_data.py
|
serve_TM_data.py
|
py
| 405 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29464951423
|
# Implement a pseudo-encryption algorithm which given a string S and an integer N concatenates
# all the odd-indexed characters of S with all the even-indexed characters of S, this process
# should be repeated N times.
# Examples:
# encrypt("012345", 1) => "135024"
# encrypt("012345", 2) => "135024" -> "304152"
# encrypt("012345", 3) => "135024" -> "304152" -> "012345"
# encrypt("01234", 1) => "13024"
# encrypt("01234", 2) => "13024" -> "32104"
# encrypt("01234", 3) => "13024" -> "32104" -> "20314"
# Together with the encryption function, you should also implement a decryption function which
# reverses the process.
# If the string S is an empty value or the integer N is not positive, return the first argument
# without changes.
def decrypt(encrypted_text, n):
if n <= 0:
return encrypted_text
text_list = list(encrypted_text)
length = len(text_list)
if length % 2 == 0:
split_part = length // 2
else:
split_part = (length - 1) // 2
first = text_list[0:split_part]
second = text_list[split_part:length]
result_list = [ second[i // 2] if i % 2 == 0 else first[(i - 1) // 2] for i in range(0, length) ]
result = ''.join(result_list)
return decrypt(result, n - 1)
def encrypt(text, n):
if n <= 0:
return text
text_list = list(text)
first = text_list[::2]
second = text_list[1::2]
encrypted = second + first
result = ''.join(encrypted)
return encrypt(result, n - 1)
# test.describe('Basic Tests')
# test.assert_equals(encrypt("This is a test!", 0), "This is a test!")
# test.assert_equals(encrypt("This is a test!", 1), "hsi etTi sats!")
# test.assert_equals(encrypt("This is a test!", 2), "s eT ashi tist!")
# test.assert_equals(encrypt("This is a test!", 3), " Tah itse sits!")
# test.assert_equals(encrypt("This is a test!", 4), "This is a test!")
# test.assert_equals(encrypt("This is a test!", -1), "This is a test!")
# test.assert_equals(encrypt("This kata is very interesting!", 1), "hskt svr neetn!Ti aai eyitrsig")
# test.assert_equals(decrypt("This is a test!", 0), "This is a test!")
# test.assert_equals(decrypt("hsi etTi sats!", 1), "This is a test!")
# test.assert_equals(decrypt("s eT ashi tist!", 2), "This is a test!")
# test.assert_equals(decrypt(" Tah itse sits!", 3), "This is a test!")
# test.assert_equals(decrypt("This is a test!", 4), "This is a test!")
# test.assert_equals(decrypt("This is a test!", -1), "This is a test!")
# test.assert_equals(decrypt("hskt svr neetn!Ti aai eyitrsig", 1), "This kata is very interesting!")
# test.assert_equals(encrypt("", 0), "")
# test.assert_equals(decrypt("", 0), "")
# test.assert_equals(encrypt(None, 0), None)
# test.assert_equals(decrypt(None, 0), None)
|
tuyojr/code_wars-hacker_rank-leetcode
|
code_wars/alternating_split.py
|
alternating_split.py
|
py
| 2,792 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26246603211
|
# Cmput 455 sample code
# Boolean Negamax for TicTacToe, with transposition table
# Written by Martin Mueller
from game_basics import EMPTY, BLACK, WHITE, opponent, winnerAsString
from tic_tac_toe import TicTacToe
from transposition_table_simple import TranspositionTable
from boolean_negamax_tt import negamaxBoolean
import time
def call_search(state):
tt = TranspositionTable() # use separate table for each color
return negamaxBoolean(state, tt)
def solve(state):
state.setDrawWinner(opponent(state.toPlay))
win = call_search(state)
if win:
return state.toPlay
# loss or draw, do second search to find out
state.setDrawWinner(state.toPlay)
if call_search(state):
return EMPTY # draw
else: # loss
return opponent(state.toPlay)
def test_solve_with_tt():
t = TicTacToe()
start = time.process_time()
result = solve(t)
time_used = time.process_time() - start
print("Result: {}\nTime used: {:.4f}".format(
winnerAsString(result), time_used))
test_solve_with_tt()
|
wllmwng1/CMPUT455_Assignment_2
|
TicTacToe/tic_tac_toe_solve_with_tt.py
|
tic_tac_toe_solve_with_tt.py
|
py
| 1,054 |
python
|
en
|
code
| 1 |
github-code
|
6
|
44966506931
|
# import cv2
#
# filename="imgmirror.jpg"
# img= cv2.imread('image.jpg')
# res= img.copy()
# for i in range(img.shape[0]):
# for j in range(img.shape[1]):
# res[i][img.shape[1]-j-1]= img[i][j]
#
# cv2.imshow('image', res)
# cv2.imwrite(filename,res)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# import cv2
#
# img = cv2.imread("no entry.png")
#
# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#
# cv2.imshow("image ori", img)
# cv2.imshow("image gray", gray)
# filename="noentrygray.jpg"
# cv2.imwrite(filename,gray)
# cv2.waitKey(0)
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from keras.layers import Dropout, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
import numpy as np
import cv2
#############################################
#frameWidth = 640 # CAMERA RESOLUTION
#frameHeight = 480
#brightness = 180
#threshold = 0.75 # PROBABLITY THRESHOLD
font = cv2.FONT_HERSHEY_SIMPLEX
##############################################
# SETUP THE VIDEO CAMERA
cap = cv2.VideoCapture(0)
#cap.set(3, frameWidth)
#cap.set(4, frameHeight)
#cap.set(10, brightness)
imageDimesions = (32, 32, 3)
noOfClasses = 3
sampleNum=0
no_Of_Filters = 60
size_of_Filter = (5, 5) # THIS IS THE KERNEL THAT MOVE AROUND THE IMAGE TO GET THE FEATURES.
# THIS WOULD REMOVE 2 PIXELS FROM EACH BORDER WHEN USING 32 32 IMAGE
size_of_Filter2 = (3, 3)
size_of_pool = (2, 2) # SCALE DOWN ALL FEATURE MAP TO GERNALIZE MORE, TO REDUCE OVERFITTING
no_Of_Nodes = 500 # NO. OF NODES IN HIDDEN LAYERS
model = Sequential()
model.add((Conv2D(no_Of_Filters, size_of_Filter, input_shape=(imageDimesions[0], imageDimesions[1], 1),
activation='relu'))) # ADDING MORE CONVOLUTION LAYERS = LESS FEATURES BUT CAN CAUSE ACCURACY TO INCREASE
model.add((Conv2D(no_Of_Filters, size_of_Filter, activation='relu')))
model.add(MaxPooling2D(pool_size=size_of_pool)) # DOES NOT EFFECT THE DEPTH/NO OF FILTERS
model.add((Conv2D(no_Of_Filters // 2, size_of_Filter2, activation='relu')))
model.add((Conv2D(no_Of_Filters // 2, size_of_Filter2, activation='relu')))
model.add(MaxPooling2D(pool_size=size_of_pool))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(no_Of_Nodes, activation='relu'))
model.add(Dropout(0.5)) # INPUTS NODES TO DROP WITH EACH UPDATE 1 ALL 0 NONE
model.add(Dense(noOfClasses, activation='softmax')) # OUTPUT LAYER
# COMPILE MODEL
model.compile(Adam(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'])
model.load_weights('91model.h5')
def grayscale(img):
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return img
def equalize(img):
img = cv2.equalizeHist(img)
return img
def preprocessing(img):
img = grayscale(img)
img = equalize(img)
img = img / 255
return img
# def getCalssName(classNo):
# if classNo == 0:
# return 'No Entry'
# elif classNo == 1:
# return 'Turn Right'
# elif classNo == 2:
# return 'Turn Left'
# elif classNo == 3:
# return 'Go Ahead'
# cascLeft = "all.xml"
# cascRight = "all.xml"
# cascStop = "all.xml"
cascLeft = "turnLeft_ahead.xml"
cascRight = "turnRight_ahead.xml"
cascStop = "stopsign_classifier.xml"
#speedLimit = "lbpCascade.xml"
leftCascade = cv2.CascadeClassifier(cascLeft)
rightCascade = cv2.CascadeClassifier(cascRight)
stopCascade = cv2.CascadeClassifier(cascStop)
#speedCascade = cv2.CascadeClassifier(speedLimit)
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
left = leftCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
)
right = rightCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
)
stop = stopCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
)
# speed = speedCascade.detectMultiScale(
# gray,
# scaleFactor=1.1,
# minNeighbors=5,
# minSize=(30, 30)
# )
# Draw a rectangle around the faces
for (x, y, w, h) in left:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (32, 32)), -1), 0)
prediction = model.predict(cropped_img)
#sampleNum = sampleNum + 1
rambu = ('Stop', 'Turn Right', 'Turn Left')
maxindex = rambu[int(np.argmax(prediction))]
cv2.putText(frame, maxindex, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)
#cv2.imwrite("TrainingImage\ " + str(sampleNum) + ".jpg", frame)
# if probabilityValue > threshold:
# cv2.putText(frame, str(tessss) + "%", (x, y + h), cv2.FONT_HERSHEY_SIMPLEX, 1,
# (0, 255, 0), 2, cv2.LINE_AA)
for (x, y, w, h) in right:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (32, 32)), -1), 0)
prediction = model.predict(cropped_img)
#sampleNum = sampleNum + 1
rambu = ('Stop', 'Turn Right', 'Turn Left')
maxindex = rambu[int(np.argmax(prediction))]
cv2.putText(frame, maxindex, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)
#cv2.imwrite("TrainingImage\ " + str(sampleNum) + ".jpg", frame)
#probabilityValue = np.amax(prediction)
# if probabilityValue > threshold:
# cv2.putText(frame, str(round(probabilityValue * 100, 2)) + "%", (x, y+h), cv2.FONT_HERSHEY_SIMPLEX, 1,
# (0, 255, 0), 2, cv2.LINE_AA)
for (x, y, w, h) in stop:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (32, 32)), -1), 0)
prediction = model.predict(cropped_img)
#sampleNum = sampleNum + 1
rambu = ('Stop', 'Turn Right', 'Turn Left')
maxindex = rambu[int(np.argmax(prediction))]
cv2.putText(frame, maxindex, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)
#cv2.imwrite("TrainingImage\ " + str(sampleNum) + ".jpg", frame)
# for (x ,y, w, h) in speed:
# cv2.rectangle(frame, (x ,y), (x+w, y+h), (0, 255, 0), 2)
# roi_gray = gray[y:y + h, x:x + w]
# cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (32, 32)), -1), 0)
# prediction = model.predict(cropped_img)
#
# rambu = ('Stop', 'Turn Right', 'Turn Left', 'Max Speed 50')
# maxindex = rambu[int(np.argmax(prediction))]
#
# cv2.putText(frame, maxindex, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
|
nicolafeby/Self-driving-car-robot-cnn
|
testcamex.py
|
testcamex.py
|
py
| 7,221 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5503849048
|
def get_set():
return set(map(int, input().split()))
def is_super_set(main, sets):
for set in sets:
if not main.issuperset(set):
return False
return True
A = get_set()
queries = int(input())
sets = []
for _ in range(queries):
sets.append(get_set())
print(is_super_set(A, sets))
|
Nikit-370/HackerRank-Solution
|
Python/is-strict-superset.py
|
is-strict-superset.py
|
py
| 321 |
python
|
en
|
code
| 10 |
github-code
|
6
|
19797979191
|
import functools
from typing import Callable, Union
from aiohttp import web
from .exceptions import AuthRequiredException, ForbiddenException, AuthException
def login_required(func):
"""
If not authenticated user tries to reach to a `login_required` end-point
returns UNAUTHORIZED response.
"""
def wrapper(request):
if not isinstance(request, web.Request):
raise TypeError(f"Invalid Type '{type(request)}'")
if not getattr(request, "user", None):
return AuthRequiredException.make_response(request)
return func(request)
return wrapper
def permissions(
*required_scopes: Union[set, tuple], algorithm="any"
) -> web.json_response:
"""
Open the end-point for any user who has the permission to access.
"""
assert required_scopes, "Cannot be used without any permission!"
def request_handler(view: Callable) -> Callable:
@functools.wraps(view)
async def wrapper(request: web.Request):
if not isinstance(request, web.Request):
raise TypeError(f"Invalid Type '{type(request)}'")
authenticator = request.app["authenticator"]
try:
provided_scopes = await authenticator.get_permissions(request)
has_permission = await authenticator.check_permissions(
provided_scopes, required_scopes, algorithm=algorithm
)
if not has_permission:
raise ForbiddenException()
return await view(request)
except AuthException as e:
return e.make_response(request)
return wrapper
return request_handler
|
mgurdal/aegis
|
aegis/decorators.py
|
decorators.py
|
py
| 1,714 |
python
|
en
|
code
| 13 |
github-code
|
6
|
16638837739
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
"""
__author__ = 'qing.li'
"""
from django import template
from django.conf import settings
import re
from collections import OrderedDict
from django.conf import settings
register = template.Library()
@register.inclusion_tag('rbac/menu.html')
def menu(request):
menu_order = OrderedDict()
menu_list = request.session.get(settings.MENU_SESSION_KEY)
for key in sorted(menu_list, key=lambda x: menu_list[x]['weight'], reverse=True):
print(key)
menu_order[key] = menu_list[key]
menu_order[key]['class'] = 'hide'
for i in menu_order[key]['children']:
if i['id'] == request.current_menu_id:
menu_order[key]['class'] = ''
if re.match('^{}$'.format(i['url']), request.path_info):
i['class'] = 'active'
print("request.current_menu_id", request.current_menu_id)
# if i['id'] == request.current_menu_id:
# menu_order[key]['class'] = ''
# for menu in menu_list.values():
# for i in menu['children']:
# if re.match('^{}$'.format(i['url']), request.path_info):
# i['class'] = 'active'
# for i in menu_list:
# url = i['url']
# if re.match('^{}$'.format(url), request.path_info):
# i['class'] = 'active'
return {'menu_list': menu_order}
@register.inclusion_tag('rbac/breadcrumb.html')
def breadcrumb(request):
return {'breadcrumb_list': request.breadcrumb_list}
@register.filter
def has_permission(request, permission):
print("here", type(str(permission)), str(permission), list(request.session.get(settings.PERMISSION_SESSION_KEY).keys()))
if str(permission) in list(request.session.get(settings.PERMISSION_SESSION_KEY).keys()):
return True
@register.simple_tag
def gen_role_url(request, rid):
params = request.GET.copy()
params._mutable = True
params['rid'] = rid
print(params.urlencode())
return params.urlencode()
|
QingqinLi/nb_crm
|
rbac/templatetags/rabc.py
|
rabc.py
|
py
| 2,027 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72162560509
|
import sqlite3 as lite
import sys
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class CrawlerPipeline(object):
def __init__(self):
con = lite.connect('crawler.db')
with con:
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS Results(Id INTEGER PRIMARY KEY AUTOINCREMENT, "
"Keyword TEXT, Title TEXT, Link TEXT, Description TEXT, BestContent TEXT, BestVote INTEGER, BestView INTEGER)")
def process_item(self, item, spider):
con = lite.connect('crawler.db')
with con:
cur = con.cursor()
cur.execute("INSERT INTO Results (Keyword, Title, Link, Description, BestContent, BestVote, BestView) " \
"VALUES (?,?,?,?,?,?,?)", (item['keyword'], item['title'], item['link'], item['desc'], item['bestContent'], item['bestVote'], item['bestView']))
return item
|
yaoxiuh/WebCrawler
|
crawler/pipelines.py
|
pipelines.py
|
py
| 1,056 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17533905717
|
rows, columns = [int(x) for x in input().split()]
a = [[x for x in input().split()] for _ in range(rows)]
while True:
command = input().split()
action = command[0]
if action == 'END':
break
if action != 'swap' or len(command) != 5:
print("Invalid input!")
continue
# better is by validation instead of try-except
try:
row1, col1, row2, col2 = [int(command[i]) for i in range(1, 5)]
a[row1][col1], a[row2][col2] = a[row2][col2], a[row1][col1]
[print(' '.join([str(x) for x in row])) for row in a]
except: # don't define the kind of error
print("Invalid input!")
# -------------
# def shuffle_matrix(row1, col1, row2, col2):
# matrix[row1][col1], matrix[row2][col2] = matrix[row2][col2], matrix[row1][col1]
#
#
# rows, columns = [int(x) for x in input().split()]
# matrix = [input().split() for x in range(rows)]
#
# while True:
# command = input()
# if command == "END":
# break
# if not command.startswith("swap") or len(command.split()) != 5:
# print("Invalid input!")
# continue
# row_1, col_1, row_2, col_2 = [int(x) for x in command.split()[1:]]
# if row_1 in range(rows) and col_1 in range(columns) and row_2 in range(rows) and col_2 in range(columns):
# shuffle_matrix(row_1, col_1, row_2, col_2)
# [print(" ".join(element)) for element in matrix]
# else:
# print("Invalid input!")
|
emilynaydenova/SoftUni-Python-Web-Development
|
Python-Advanced-Sept2023/Exercises/03.Multidimensional_lists/Multidimensional_lists_First/06.Matrix_shuffling.py
|
06.Matrix_shuffling.py
|
py
| 1,452 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19160774674
|
import sys, os
from turtle import home
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../')
import time
import pytest
import allure
from allure_commons.types import AttachmentType
from Tests.test_Base import BaseTest
from Locators.Locators import Locators
from Config.config import TestData
from Pages.LoginPage import LoginPage
from Locators.EnumsPackage.Enums import Sort_Productss
class Test_Home(BaseTest):
@pytest.mark.order()
def test_verify_home_page_title(self):
self.loginPage = LoginPage(self.driver)
homePage = self.loginPage.do_login()
title = homePage.get_title()
assert title == TestData.HOME_PAGE_TITLE
allure.attach(self.driver.get_screenshot_as_png(),attachment_type=AttachmentType.PNG)
@pytest.mark.order()
def test_verify_home_page_header(self):
self.loginPage = LoginPage(self.driver)
homePage = self.loginPage.do_login()
header = homePage.get_header_value()
allure.attach(self.driver.get_screenshot_as_png(), attachment_type=AttachmentType.JPG)
assert header == TestData.HOME_PAGE_HEADER
@pytest.mark.order()
def test_verify_cart_icon_visible(self):
self.loginPage = LoginPage(self.driver)
homePage = self.loginPage.do_login()
notification = homePage.is_cart_icon_exist()
assert notification
allure.attach(self.driver.get_screenshot_as_png(),attachment_type=AttachmentType.JPG)
@pytest.mark.order()
def test_verify_product_sort_container(self):
self.loginPage = LoginPage(self.driver)
homePage = self.loginPage.do_login()
homePage.product_sort_container()
for getValue in Sort_Productss:
sortingNames = self.driver.find_element_by_xpath(
"//*[@class='product_sort_container']//option[contains(text(),'%s')]" % str(getValue.value))
assert sortingNames.text == getValue.value
@pytest.mark.order()
def test_verify_shopping(self):
self.loginPage = LoginPage(self.driver)
homePage = self.loginPage.do_login()
homePage.do_shopping()
allure.attach(self.driver.get_screenshot_as_png(),attachment_type=AttachmentType.PNG)
@pytest.mark.order()
def test_verify_sorting_Zto_A(self):
self.loginPage = LoginPage(self.driver)
homePage = self.loginPage.do_login()
homePage.product_sort_container()
homePage.sort_product_High_to_Low()
allure.attach(self.driver.get_screenshot_as_png(),attachment_type=AttachmentType.PNG)
@pytest.mark.order()
def test_verify_logout_into_app(self):
self.loginPage = LoginPage(self.driver)
homePage = self.loginPage.do_login()
homePage.do_logout()
allure.attach(self.driver.get_screenshot_as_png(),attachment_type=AttachmentType.PNG)
|
sawrav-sharma/py_new_dd
|
Tests/test_HomePage.py
|
test_HomePage.py
|
py
| 2,879 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36540773216
|
# Import libraries
from requests import get
from json import dumps
# Your own local host's url
URL = "http://127.0.0.1:5000/"
# Names of active pages
mine_block = "mine_block"
get_chain = "get_chain"
is_valid = "is_valid"
# Define function for to check if API works and use the API.
def check_request_and_get_result(url, target_page_name, checked=False, needed_json_dumps=True):
target_url = url + target_page_name
request = get(target_url)
response = request.status_code
if checked:
return dumps(request.json(), sort_keys=True, indent=4) if needed_json_dumps else request.json()
else:
return "Congratulation, API works!" if response == 200 else "Something went wrong."
print(check_request_and_get_result(URL, get_chain, True))
|
mrn01/Blockchain_Project
|
blockchain_davidcoin/Module 1 - Create a Blockchain/use_your_own_API.py
|
use_your_own_API.py
|
py
| 795 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71449750907
|
from multiprocessing import Process, Lock, Queue, Semaphore
import time
from random import random
buffer = Queue(10)
empty = Semaphore(2) # 缓存空余数
full = Semaphore(0) # 缓存占用数
lock = Lock()
class Consumer(Process):
def run(self):
global empty, buffer, full, lock
while True:
full.acquire()
lock.acquire() # 占用空间先acquire
num = buffer.get()
time.sleep(1)
print(f"Consumer remove an element..{num}")
lock.release()
empty.release()
class Producer(Process):
def run(self):
global empty, full, buffer, lock
while True:
empty.acquire()
lock.acquire()
num = random()
buffer.put(num)
time.sleep(1)
print("Producer append an element... {}".format(num))
lock.release()
full.release()
if __name__ == "__main__":
consumer = Consumer()
producer = Producer()
producer.daemon = consumer.daemon = True
producer.start()
consumer.start()
producer.join()
consumer.join()
print("Main process ended!!!")
|
haidongsong/spider_learn
|
zhang_xiaobo_spider_practice/producer_custom.py
|
producer_custom.py
|
py
| 1,177 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1965038380
|
# -*- coding: utf-8 -*-
import json
import requests
import os
import time
import log21
from kafka import KafkaConsumer
access_token = os.environ.get("ACCESS_TOKEN")
kafka_host = os.environ.get("KAFKA_HOST")
kafka_port = os.environ.get("KAFKA_PORT", "9092")
kafka_topic = os.environ.get("KAFKA_TOPIC")
def dingtalk_robot(text):
url = "https://oapi.dingtalk.com/robot/send?access_token=" + access_token
headers = {'Content-Type': 'application/json'}
data_dict = {
"msgtype": "markdown",
"markdown": {
"title": "日志告警",
"text": text
}
}
json_data = json.dumps(data_dict)
response = requests.post(url, data=json_data, headers=headers)
print(response.text) # {"errcode":0,"errmsg":"ok"}
def test_to_json(message):
data = json.loads(message, strict=False)
return data.get('text').get('content')
def kafka_to_dingtalk():
if kafka_port == '':
bootstrap_server = '{}:{}'.format(kafka_host,'9092')
else:
bootstrap_server = '{}:{}'.format(kafka_host, kafka_port)
consumer = KafkaConsumer(
kafka_topic,
bootstrap_servers=bootstrap_server,
auto_offset_reset='latest',
api_version=(0, 10, 2)
)
log21.print(type(consumer))
for msg in consumer:
dingtalk_massage = test_to_json(msg.value.decode())
time.sleep(4)
dingtalk_robot(dingtalk_massage)
if __name__ == '__main__':
if access_token == '':
log21.print(log21.get_color('#FF0000') + '未提供钉钉机器人ACCESS_TOKEN' )
if kafka_host == '':
log21.print(log21.get_color('#FF0000') + '未配置Kafka的环境变量KAFKA_HOST' )
if kafka_host == '':
log21.print(log21.get_color('#FF0000') + '未配置Kafka的环境变量KAFKA_TOPIC' )
kafka_to_dingtalk()
|
zxzmcode/oTools
|
python/Alnot/Dingtalk/kafka_to_Dingtalk/dingtalk.py
|
dingtalk.py
|
py
| 1,832 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14716216800
|
import torch
from torch import nn
import torch.nn.functional as F
from models.Segformer import mit_b0,mit_b1,mit_b2#,mit_b3,mit_b4,mit_b5
class SK(nn.Module):
def __init__(self, in_channel, mid_channel, out_channel, fuse, len=32, reduce=16):
super(SK, self).__init__()
len = max(mid_channel // reduce, len)
self.fuse = fuse
self.conv1 = nn.Sequential(
nn.Conv2d(in_channel, mid_channel, kernel_size=1, bias=False),
nn.BatchNorm2d(mid_channel),
)
self.conv2 = nn.Sequential(
nn.Conv2d(mid_channel, out_channel,kernel_size=3,stride=1,padding=1,bias=False),
nn.BatchNorm2d(out_channel),
)
if fuse:
#https://github.com/syt2/SKNet
self.gap = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Sequential(
nn.Conv2d(mid_channel, len, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(len),
nn.ReLU(inplace=True)
)
self.fc1 = nn.Sequential(
nn.Conv2d(mid_channel, len, kernel_size=1, stride=1, bias=False),
nn.ReLU(inplace=True)
)
self.fcs = nn.ModuleList([])
for i in range(2):
self.fcs.append(
nn.Conv2d(len, mid_channel, kernel_size=1, stride=1)
)
self.softmax = nn.Softmax(dim=1)
nn.init.kaiming_uniform_(self.conv1[0].weight, a=1)
nn.init.kaiming_uniform_(self.conv2[0].weight, a=1)
def forward(self, x, y=None, shape=None):
x = self.conv1(x)
if self.fuse:
shape = x.shape[-2:]
b = x.shape[0]
y = F.interpolate(y, shape, mode="nearest")
feas_U = [x,y]
feas_U = torch.stack(feas_U,dim=1)
attention = torch.sum(feas_U, dim=1)
attention = self.gap(attention)
if b ==1:
attention = self.fc1(attention)
else:
attention = self.fc(attention)
attention = [fc(attention) for fc in self.fcs]
attention = torch.stack(attention, dim=1)
attention = self.softmax(attention)
x = torch.sum(feas_U * attention, dim=1)
# output
y = self.conv2(x)
return y, x
class SKF(nn.Module):
def __init__(
self,student, in_channels, out_channels, mid_channel, embed
):
super(SKF, self).__init__()
self.student = student
skfs = nn.ModuleList()
for idx, in_channel in enumerate(in_channels):
skfs.append(SK(in_channel, mid_channel, out_channels[idx], idx < len(in_channels)-1))
self.skfs = skfs[::-1]
self.embed = embed
if self.embed == 5:
self.embed1_linearproject = nn.Linear(in_channels[0], out_channels[0])
self.embed2_linearproject = nn.Linear(in_channels[1], out_channels[1])
self.embed3_linearproject = nn.Linear(in_channels[2], out_channels[2])
self.embed4_linearproject = nn.Linear(in_channels[3], out_channels[3])
elif self.embed == 1:
self.embed1_linearproject = nn.Linear(in_channels[0], out_channels[0])
elif self.embed == 2:
self.embed1_linearproject = nn.Linear(in_channels[1], out_channels[1])
elif self.embed == 3:
self.embed1_linearproject = nn.Linear(in_channels[2], out_channels[2])
elif self.embed == 4:
self.embed1_linearproject = nn.Linear(in_channels[3], out_channels[3])
def forward(self, x):
student_features = self.student(x,is_feat=True)
embed = student_features[2]
logit = student_features[1]
x = student_features[0][::-1]
results = []
embedproj = []
out_features, res_features = self.skfs[0](x[0])
results.append(out_features)
for features, skf in zip(x[1:], self.skfs[1:]):
out_features, res_features = skf(features, res_features)
results.insert(0, out_features)
if self.embed ==5:
embedproj = [*embedproj, self.embed1_linearproject(embed[0])]
embedproj = [*embedproj, self.embed2_linearproject(embed[1])]
embedproj = [*embedproj, self.embed3_linearproject(embed[2])]
embedproj = [*embedproj, self.embed4_linearproject(embed[3])]
return results, logit, embedproj
elif self.embed == 0:
return results, logit
elif self.embed == 1:
embedproj = [*embedproj, self.embed1_linearproject(embed[0])]
return results, logit, embedproj
elif self.embed == 2:
embedproj = [*embedproj, self.embed1_linearproject(embed[1])]
return results, logit, embedproj
elif self.embed == 3:
embedproj = [*embedproj, self.embed1_linearproject(embed[2])]
return results, logit, embedproj
elif self.embed == 4:
embedproj = [*embedproj, self.embed1_linearproject(embed[3])]
return results, logit, embedproj
else:
assert 'the number of embeddings not supported'
def build_kd_trans(model,embed,in_channels = [32, 64, 160, 256], out_channels = [64, 128, 320, 512]):
mid_channel = 64
student = model
model = SKF(student, in_channels, out_channels, mid_channel,embed)
return model
def hcl(fstudent, fteacher):
loss_all = 0.0
for fs, ft in zip(fstudent, fteacher):
n,c,h,w = fs.shape
loss = F.mse_loss(fs, ft, reduction='mean')
cnt = 1.0
tot = 1.0
for l in [4,2,1]:
if l >=h:
continue
tmpfs = F.adaptive_avg_pool2d(fs, (l,l))
tmpft = F.adaptive_avg_pool2d(ft, (l,l))
cnt /= 2.0
loss += F.mse_loss(tmpfs, tmpft, reduction='mean') * cnt
tot += cnt
loss = loss / tot
loss_all = loss_all + loss
return loss_all
class ChannelNorm(nn.Module):
def __init__(self):
super(ChannelNorm, self).__init__()
def forward(self,featmap):
n,c,h,w = featmap.shape
featmap = featmap.reshape((n,c,-1))
featmap = featmap.softmax(dim=-1)
return featmap
class CriterionCWD(nn.Module):
def __init__(self,norm_type='none',divergence='mse',temperature=1.0):
super(CriterionCWD, self).__init__()
# define normalize function
if norm_type == 'channel':
self.normalize = ChannelNorm()
elif norm_type =='spatial':
self.normalize = nn.Softmax(dim=1)
elif norm_type == 'channel_mean':
self.normalize = lambda x:x.view(x.size(0),x.size(1),-1).mean(-1)
else:
self.normalize = None
self.norm_type = norm_type
self.temperature = 1.0
# define loss function
if divergence == 'mse':
self.criterion = nn.MSELoss(reduction='sum')
elif divergence == 'kl':
self.criterion = nn.KLDivLoss(reduction='sum')
self.temperature = temperature
self.divergence = divergence
def forward(self,preds_S, preds_T):
n,c,h,w = preds_S.shape
#import pdb;pdb.set_trace()
if self.normalize is not None:
norm_s = self.normalize(preds_S/self.temperature)
norm_t = self.normalize(preds_T.detach()/self.temperature)
else:
norm_s = preds_S[0]
norm_t = preds_T[0].detach()
if self.divergence == 'kl':
norm_s = norm_s.log()
loss = self.criterion(norm_s,norm_t)
#item_loss = [round(self.criterion(norm_t[0][0].log(),norm_t[0][i]).item(),4) for i in range(c)]
#import pdb;pdb.set_trace()
if self.norm_type == 'channel' or self.norm_type == 'channel_mean':
loss /= n * c
# loss /= n * h * w
else:
loss /= n * h * w
return loss * (self.temperature**2)
######################################################################################################################
class EmbedChannelNorm(nn.Module):
def __init__(self):
super(EmbedChannelNorm, self).__init__()
def forward(self,embed):
n,c,_ = embed.shape
embed = embed.softmax(dim=-1)
return embed
class CriterionEmbedCWD(nn.Module):
def __init__(self,norm_type='none',divergence='mse',temperature=1.0):
super(CriterionEmbedCWD, self).__init__()
# define normalize function
if norm_type == 'channel':
self.normalize = EmbedChannelNorm()
elif norm_type =='spatial':
self.normalize = nn.Softmax(dim=1)
elif norm_type == 'channel_mean':
self.normalize = lambda x:x.view(x.size(0),x.size(1),-1).mean(-1)
else:
self.normalize = None
self.norm_type = norm_type
self.temperature = 1.0
# define loss function
if divergence == 'mse':
self.criterion = nn.MSELoss(reduction='sum')
elif divergence == 'kl':
self.criterion = nn.KLDivLoss(reduction='sum')
self.temperature = temperature
self.divergence = divergence
def forward(self,embed_S, embed_T):
embed_S = embed_S.transpose(1, 2).contiguous()
embed_T = embed_T.transpose(1, 2).contiguous()
n,c,_ = embed_S.shape
#import pdb;pdb.set_trace()
if self.normalize is not None:
norm_s = self.normalize(embed_S/self.temperature)
norm_t = self.normalize(embed_T.detach()/self.temperature)
else:
norm_s = embed_S[0]
norm_t = embed_T[0].detach()
if self.divergence == 'kl':
norm_s = norm_s.log()
loss = self.criterion(norm_s,norm_t)
if self.norm_type == 'channel' or self.norm_type == 'channel_mean':
loss /= n * c
return loss * (self.temperature**2)
def hcl_feaw(fstudent, fteacher):
loss_all = 0.0
fea_weights = [0.1,0.1,0.5,1]
for fs, ft,fea_w in zip(fstudent, fteacher,fea_weights):
n,c,h,w = fs.shape
loss = F.mse_loss(fs, ft, reduction='mean')
cnt = 1.0
tot = 1.0
for l in [4,2,1]:
if l >=h:
continue
tmpfs = F.adaptive_avg_pool2d(fs, (l,l))
tmpft = F.adaptive_avg_pool2d(ft, (l,l))
cnt /= 2.0
loss += F.mse_loss(tmpfs, tmpft, reduction='mean') * cnt
tot += cnt
loss = loss / tot
loss_all = loss_all + fea_w*loss
return loss_all
|
RuipingL/TransKD
|
train/CSF.py
|
CSF.py
|
py
| 10,763 |
python
|
en
|
code
| 10 |
github-code
|
6
|
39463845510
|
import time
import picamera
import sqlite3
import signal
import os
import shutil
pidDB = sqlite3.connect('/home/pi/System/PID.db')
pidCursor = pidDB.cursor()
actualPID = os.getpid()
print("I'm PID " + str(actualPID))
pidCursor.execute("""UPDATE PID SET value = ? WHERE name = ?""", (actualPID, "camera"))
pidDB.commit()
"""Function to take timelapse"""
def CameraFootage(signum, stack):
print("Received:" + str(signum))
if signum == 10:
print("Beginning timelapse")
with picamera.PiCamera() as camera:
camera.start_preview()
camera.annotate_text = time.strftime('%Y-%m-%d %H:%M:%S')
time.sleep(1)
shutil.rmtree('/home/dev/www/public/media/')
os.mkdir('/home/dev/www/public/media')
i = 0
for filename in camera.capture_continuous('/home/dev/www/public/media/img{counter:03d}.jpg'):
if i < 20:
print("Captured %s" %filename)
time.sleep(1)
i = i +1
else:
i = 0
break
signal.signal(signal.SIGUSR1, CameraFootage)
while True:
time.sleep(3)
|
jeremyalbrecht/Alarm-RPI
|
camera.py
|
camera.py
|
py
| 1,001 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9003224390
|
import json
from django.http import HttpResponse
__author__ = 'diraven'
class HttpResponseJson(HttpResponse):
def __init__(self, data=None, is_success=False, message=''):
response_data = {
'data': data,
'message': message,
'success': is_success
}
super(HttpResponseJson, self).__init__(json.dumps(response_data), content_type="application/json")
|
diraven/streamchats2
|
base/classes/HttpResponseJson.py
|
HttpResponseJson.py
|
py
| 411 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26664521611
|
from rsa_class import RSAUtil
def main():
# 寫入與真實使用的金鑰並不相同,因為檔案是有加入 passphrase 作保護
RSA = RSAUtil()
RSA.new_keys(2048)
RSA.save_key("private","./keys/authorize_private.bin")
RSA.save_key("public","./keys/authorize_public.pem")
if __name__ == "__main__":
main()
|
kangaroo-0000/cythonize-in-one-click
|
rsa_authorize/utils/rsa/generator.py
|
generator.py
|
py
| 337 |
python
|
en
|
code
| 1 |
github-code
|
6
|
6196779715
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2022/9/12 2:08 下午
# @Author : LiangJun
# @Filename : test_demo2.py
import unittest
from ddt import ddt, data
test_datas = [
{'id': 1, 'title': '测试用例1'},
{'id': 2, 'title': '测试用例2'},
{'id': 3, 'title': '测试用例3'}
]
@ddt
class TestDemo(unittest.TestCase):
@data(*test_datas)
def test_demo1(self, i):
print(i)
|
lj5092/py14_Test_Open
|
py14_04day/dome/test_demo2.py
|
test_demo2.py
|
py
| 427 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16566955673
|
# 도시 분할 계획
# n개의 집과 m개의 도로가 있는 마을이 있는데, 이 마을을 두개의 마을로 분할하고 도로를 최소 비용으로 설치할 경우를 구하라.
# 내 답안1
import sys
input = sys.stdin.readline
n, m = map(int, input().split())
graph = []
parent = [i for i in range(n+1)]
for _ in range(m):
a, b, c = map(int, input().split())
graph.append((c,a,b))
graph.sort()
def find_parent(x):
while parent[x] != x:
x = parent[x]
return x
def union_parent(a, b, c):
a = find_parent(a)
b = find_parent(b)
if a == b:
return 0
elif a > b:
parent[a] = b
else:
parent[b] = a
return c
ans = 0
last = 0
for c, a, b in graph:
c = union_parent(a, b, c)
if c != 0:
last = c
ans += c
print(ans - last)
|
dngus1683/codingTestStudy
|
알고리즘/Disjointset /백준 / python/1647.py
|
1647.py
|
py
| 829 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
26041286196
|
from __future__ import annotations
import itertools
import logging
import os
from typing import Callable, Iterable, cast
from packaging.utils import canonicalize_name as canonicalize_project_name
from pants.backend.python.goals.lockfile import synthetic_lockfile_target_name
from pants.backend.python.macros.common_fields import (
ModuleMappingField,
TypeStubsModuleMappingField,
)
from pants.backend.python.subsystems.setup import PythonSetup
from pants.backend.python.target_types import (
PythonRequirementModulesField,
PythonRequirementResolveField,
PythonRequirementsField,
PythonRequirementTarget,
PythonRequirementTypeStubModulesField,
)
from pants.core.target_types import (
TargetGeneratorSourcesHelperSourcesField,
TargetGeneratorSourcesHelperTarget,
)
from pants.engine.addresses import Address
from pants.engine.fs import DigestContents, GlobMatchErrorBehavior, PathGlobs
from pants.engine.internals.target_adaptor import TargetAdaptor, TargetAdaptorRequest
from pants.engine.rules import Get
from pants.engine.target import (
Dependencies,
GenerateTargetsRequest,
InvalidFieldException,
SingleSourceField,
)
from pants.engine.unions import UnionMembership
from pants.util.pip_requirement import PipRequirement
from pants.util.strutil import softwrap
logger = logging.getLogger(__name__)
ParseRequirementsCallback = Callable[[bytes, str], Iterable[PipRequirement]]
async def _generate_requirements(
request: GenerateTargetsRequest,
union_membership: UnionMembership,
python_setup: PythonSetup,
parse_requirements_callback: ParseRequirementsCallback,
) -> Iterable[PythonRequirementTarget]:
generator = request.generator
requirements_rel_path = generator[SingleSourceField].value
requirements_full_path = generator[SingleSourceField].file_path
overrides = {
canonicalize_project_name(k): v
for k, v in request.require_unparametrized_overrides().items()
}
# Pretend this is just another generated target, for typing purposes.
file_tgt = cast(
"PythonRequirementTarget",
TargetGeneratorSourcesHelperTarget(
{TargetGeneratorSourcesHelperSourcesField.alias: requirements_rel_path},
Address(
request.template_address.spec_path,
target_name=request.template_address.target_name,
relative_file_path=requirements_rel_path,
),
union_membership,
),
)
req_deps = [file_tgt.address.spec]
resolve = request.template.get(
PythonRequirementResolveField.alias, python_setup.default_resolve
)
lockfile = (
python_setup.resolves.get(resolve) if python_setup.enable_synthetic_lockfiles else None
)
if lockfile:
lockfile_address = Address(
os.path.dirname(lockfile),
target_name=synthetic_lockfile_target_name(resolve),
)
target_adaptor = await Get(
TargetAdaptor,
TargetAdaptorRequest(
description_of_origin=f"{generator.alias} lockfile dep for the {resolve} resolve",
address=lockfile_address,
),
)
if target_adaptor.type_alias == "_lockfiles":
req_deps.append(f"{lockfile}:{synthetic_lockfile_target_name(resolve)}")
else:
logger.warning(
softwrap(
f"""
The synthetic lockfile target for {lockfile} is being shadowed by the
{target_adaptor.type_alias} target {lockfile_address}.
There will not be any dependency to the lockfile.
Resolve by either renaming the shadowing target, the resolve {resolve!r} or
moving the target or the lockfile to another directory.
"""
)
)
digest_contents = await Get(
DigestContents,
PathGlobs(
[requirements_full_path],
glob_match_error_behavior=GlobMatchErrorBehavior.error,
description_of_origin=f"{generator}'s field `{SingleSourceField.alias}`",
),
)
module_mapping = generator[ModuleMappingField].value
stubs_mapping = generator[TypeStubsModuleMappingField].value
def generate_tgt(
project_name: str, parsed_reqs: Iterable[PipRequirement]
) -> PythonRequirementTarget:
normalized_proj_name = canonicalize_project_name(project_name)
tgt_overrides = overrides.pop(normalized_proj_name, {})
if Dependencies.alias in tgt_overrides:
tgt_overrides[Dependencies.alias] = list(tgt_overrides[Dependencies.alias]) + req_deps
return PythonRequirementTarget(
{
**request.template,
PythonRequirementsField.alias: list(parsed_reqs),
PythonRequirementModulesField.alias: module_mapping.get(normalized_proj_name),
PythonRequirementTypeStubModulesField.alias: stubs_mapping.get(
normalized_proj_name
),
# This may get overridden by `tgt_overrides`, which will have already added in
# the file tgt.
Dependencies.alias: req_deps,
**tgt_overrides,
},
request.template_address.create_generated(project_name),
union_membership,
)
requirements = parse_requirements_callback(digest_contents[0].content, requirements_full_path)
grouped_requirements = itertools.groupby(
requirements, lambda parsed_req: parsed_req.project_name
)
result = tuple(
generate_tgt(project_name, parsed_reqs_)
for project_name, parsed_reqs_ in grouped_requirements
) + (file_tgt,)
if overrides:
raise InvalidFieldException(
softwrap(
f"""
Unused key in the `overrides` field for {request.template_address}:
{sorted(overrides)}
"""
)
)
return result
|
pantsbuild/pants
|
src/python/pants/backend/python/macros/common_requirements_rule.py
|
common_requirements_rule.py
|
py
| 6,084 |
python
|
en
|
code
| 2,896 |
github-code
|
6
|
26986909966
|
# -*- coding: utf-8 -*-
import pytest
from nameko.testing.utils import get_extension
from nameko.testing.waiting import wait_for_call
from nameko_grpc.client import Client
from nameko_grpc.entrypoint import GrpcServer
class TestCloseSocketOnClientExit:
@pytest.fixture(params=["server=nameko"])
def server_type(self, request):
return request.param[7:]
def test_close_socket(self, server, load_stubs, spec_dir, grpc_port, protobufs):
"""Regression test for https://github.com/nameko/nameko-grpc/issues/39"""
stubs = load_stubs("example")
client = Client(
"//localhost:{}".format(grpc_port),
stubs.exampleStub,
"none",
"high",
False,
)
proxy = client.start()
container = server
grpc_server = get_extension(container, GrpcServer)
connection_ref = grpc_server.channel.conn_pool.connections.queue[0]
connection = connection_ref()
response = proxy.unary_unary(protobufs.ExampleRequest(value="A"))
assert response.message == "A"
with wait_for_call(connection.sock, "close"):
client.stop()
|
nameko/nameko-grpc
|
test/test_connection.py
|
test_connection.py
|
py
| 1,178 |
python
|
en
|
code
| 57 |
github-code
|
6
|
72946561467
|
#! -*- coding=utf-8 -*-
import os
import sys
filepath = os.path.abspath(__file__)
sys.path.append(os.path.dirname(os.path.dirname(filepath)))
import threading
import time
from datetime import datetime
from multiprocessing import Process
from machines.machineVPN import MachineVPN
# from machines.machineWujiVPN import MachineVPN
from machines.machineXposeHook import MachineXHook as Machine008
from appium4droid import webdriver
from bootstrap import setup_boostrap
from TotalMachine import WorkMachine
from appium4droid.support.ui import WebDriverWait
from machines.StateMachine import Machine
import random
import requests
import re
class TotalMachine(WorkMachine):
def load_task_info(self):
return []
def setup_machine(self):
dr = self.driver
self.machine008 = Machine008(dr)
self.machine008.task_schedule = ["record_file", "clear_data", "modify_data_suiji"] # 007 task list
self.appname = "testsdk"
def main_loop(self):
dr = self.driver
m008 = self.machine008
while True:
try:
dr.press_keycode(3)
time.sleep(1)
dr.press_keycode(3)
time.sleep(1)
#清后台
# dr.press_keycode(82)
# time.sleep(1)
# WebDriverWait(dr, 10).until(lambda d: d.find_element_by_id("com.android.systemui:id/clearButton")).click()
# time.sleep(1)
MachineVPN(dr).run()
m008.run()
# dr.press_keycode(3)
# time.sleep(1)
# dr.press_keycode(3)
# time.sleep(1)
# WebDriverWait(dr, 30).until(lambda d: d.find_element_by_name(self.appname)).click()
# time.sleep(5)
# 开启加速
# dr.press_keycode(3)
# time.sleep(1)
# WebDriverWait(dr, 30).until(lambda d: d.find_element_by_name("GMD Speed Time")).click()
# time.sleep(1)
# WebDriverWait(dr, 30).until(lambda d: d.find_element_by_id("com.gmd.speedtime:id/buttonStart")).click()
# time.sleep(2)
dr.press_keycode(3)
time.sleep(1)
WebDriverWait(dr, 30).until(lambda d: d.find_element_by_name(self.appname)).click()
time.sleep(15)
#记录ip
self.log_ip()
dr.press_keycode(3)
time.sleep(5)
WebDriverWait(dr, 30).until(lambda d: d.find_element_by_name(self.appname)).click()
time.sleep(1)
#关闭加速
# dr.press_keycode(3)
# time.sleep(1)
# WebDriverWait(dr, 30).until(lambda d: d.find_element_by_name("嘀嗒拼车")).click()
# time.sleep(5)
# WebDriverWait(dr, 30).until(lambda d: d.find_element_by_id("com.gmd.speedtime:id/buttonStop")).click()
# time.sleep(1)
# dr.press_keycode(3)
# time.sleep(1)
except Exception as e:
print("somting wrong")
print(e)
finally:
pass
print("Again\n")
return self.exit
def log_ip(self):
WEB_URL = 'http://ip.chinaz.com/getip.aspx'
r = requests.get(WEB_URL)
print(r.text)
match = re.search(r'ip:\'(.+)\'\,address:\'(.+)\'', r.text)
if match:
print(match.group(1))
print(match.group(2))
ip = match.group(1)
addr = match.group(2)
with open('/sdcard/1/ip.log', 'a') as f:
f.write('\n%s %s' % (ip, addr))
if __name__ == "__main__":
TM = TotalMachine()
TM.run()
|
cash2one/brush-1
|
slave/scripts/test/testht.py
|
testht.py
|
py
| 3,824 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7807511248
|
import unittest
from metagame_balance.vgc.competition import get_pkm_points, STANDARD_TOTAL_POINTS
from metagame_balance.vgc.util.generator.PkmRosterGenerators import RandomPkmRosterGenerator
class TestEncodingMethods(unittest.TestCase):
def test_random_roster_generator(self):
gen = RandomPkmRosterGenerator()
roster = gen.gen_roster()
for tmpl in roster:
pkm = tmpl.gen_pkm([0, 1, 2, 3])
print(pkm)
points = get_pkm_points(pkm)
print(points)
self.assertLess(points, STANDARD_TOTAL_POINTS + 1)
|
nianticlabs/metagame-balance
|
test/TestRandomRosterGenerator.py
|
TestRandomRosterGenerator.py
|
py
| 587 |
python
|
en
|
code
| 3 |
github-code
|
6
|
40646452965
|
import numpy as np
import array
def ros2dict(msg):
if type(msg) in (str, bool, int, float):
return msg
output = {}
for field in msg.get_fields_and_field_types():
value = getattr(msg, field)
if type(value) in (str, bool, int, float):
output[field] = value
elif type(value) is list:
output[field] = [ros2dict(el) for el in value]
elif type(value) in (np.ndarray, array.array):
output[field] = [ros2dict(el) for el in value.tolist()]
else:
output[field] = ros2dict(value)
return output
|
foxpoint-se/eel
|
src/eel/eel/utils/radio_helpers/ros2dict.py
|
ros2dict.py
|
py
| 602 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13543436023
|
import pandas as pd
import numpy as np
import scipy.stats as stats
import pylab as pl
import re
import seaborn as sns
import matplotlib.pyplot as plt
import random
sns.set(font_scale = 1.5)
pd.set_option('display.max_columns', 15)
pd.set_option('display.max_rows', 40)
filepath = '\\Coding\\DataAnalystInterview\\MarketValue\\ResidentialHouse2019Data.csv'
filepath1 = '\\Coding\\DataAnalystInterview\\MarketValue\\ResidentialCondo2019Data.csv'
DataHouse = pd.read_csv(filepath, header = 0, sep = ',')
DataCondo = pd.read_csv(filepath1,header=0,sep=',')
filepath2 = '\\Coding\\DataAnalystInterview\\Neighbourhoods.csv'
Neighbourhoods = pd.read_csv(filepath2, header = None, sep = ',')
Interquartile = Neighbourhoods[Neighbourhoods[1] > 1.5*(10**8)]
Interquartile = Interquartile[Interquartile[1] < 6*(10**8)]
Interquartile = Interquartile[0].tolist()
Interquartilesample = random.choices(Interquartile, k=5)
print (Interquartilesample)
#Lotsize vs assesed value without removing outliers. Determined Condo v. House using "unit" in legal description
plt.figure()
#sns.scatterplot(x='Lot_Size',y='Assessed_Value',data=DataHouse)
plt.figure()
#sns.scatterplot(x='Lot_Size',y='Assessed_Value',data=DataCondo)
'''Removing lot size outliers/Year Built Outliers'''
DataHouse = pd.read_csv(filepath, header = 0, sep = ',')
DistributionHouse = (DataHouse['Lot_Size'].quantile([0.1, 0.25, 0.75, 1]))
(tophouse,bottomhouse) = 623 +((623-394) * 1.5), 394 - ((623-394) * 1.5)
test = (DataHouse['Assessed_Value'].quantile([0.1, 0.25, 0.75, 1]))
print(test)
DataHouse = DataHouse[DataHouse['Lot_Size'] > bottomhouse]
DataHouse = DataHouse[DataHouse['Lot_Size'] < tophouse]
DataHouse = DataHouse[DataHouse['Actual_Year_Built'] > 1600]
DataHouseNeighbourhood = DataHouse[DataHouse['Neighbourhood'].isin(Interquartilesample)]
'''HOUSES Lot Size vs. Assessed Value'''
plt.figure()
sns.lmplot(x='Lot_Size',y='Assessed_Value', hue = 'Neighbourhood',data=DataHouseNeighbourhood, height = 10)
plt.ylim(0,)
plt.xlim(0,)
#P-Value is the test that the hypothesis is Null (slope = 0) R-Value is the correlation. This gives a weak R and a strong P
slope, intercept, r_value, p_value, std_err = stats.linregress(DataHouseNeighbourhood['Lot_Size'],DataHouseNeighbourhood['Assessed_Value'])
print ('DataNeighborhood : lotsize v. assessed value', slope, intercept, r_value, p_value, std_err)
plt.figure()
sns.lmplot(x='Lot_Size',y='Assessed_Value',data=DataHouse, height = 10)
#P-Value is the test that the hypothesis is Null (slope = 0) R-Value is the correlation. This gives a weak R and a strong P
slope, intercept, r_value, p_value, std_err = stats.linregress(DataHouse['Lot_Size'],DataHouse['Assessed_Value'])
print ('DataHouse: lotsize v. assessed value', slope, intercept, r_value, p_value, std_err)
'''Economies of Scale, Lot Size vs. PPSF'''
plt.figure()
slope, intercept, r_value, p_value, std_err = stats.linregress(DataHouseNeighbourhood['Lot_Size'],DataHouseNeighbourhood['PricePerSquareMeter'])
print ('DataNeighborhood: Economies of Scale', slope, intercept, r_value, p_value, std_err)
sns.lmplot(x='Lot_Size',y='PricePerSquareMeter', hue = 'Neighbourhood', height = 10, data=DataHouseNeighbourhood)
plt.figure()
sns.lmplot(x='Lot_Size',y='PricePerSquareMeter',data=DataHouse, height = 10)
plt.ylim(0,)
plt.xlim(0,)
#P-Value is the test that the hypothesis is Null (slope = 0) R-Value is the correlation. This gives a weak R and a strong P
slope, intercept, r_value, p_value, std_err = stats.linregress(DataHouse['Lot_Size'],DataHouse['PricePerSquareMeter'])
print ('DataHouse: Economies of Scale', slope, intercept, r_value, p_value, std_err)
''' Year Built '''
plt.figure()
sns.lmplot(x='Actual_Year_Built',y='Assessed_Value',hue = 'Neighbourhood', height = 10, data=DataHouseNeighbourhood)
plt.ylim(0,)
plt.xlim(1940,2020)
slope, intercept, r_value, p_value, std_err = stats.linregress(DataHouseNeighbourhood['Actual_Year_Built'],DataHouseNeighbourhood['Assessed_Value'])
print ('DataNeighborhood: Actual Year Built', slope, intercept, r_value, p_value, std_err)
plt.figure()
sns.lmplot(x='Actual_Year_Built',y='Assessed_Value', data = DataHouse, height = 10)
plt.ylim(0,)
plt.xlim(1940,2020)
#P-Value is the test that the hypothesis is Null (slope = 0) R-Value is the correlation. This gives a weak R and a strong P
slope, intercept, r_value, p_value, std_err = stats.linregress(DataHouse['Actual_Year_Built'],DataHouse['Assessed_Value'])
print ('DataHouse: Actual Year Built', slope, intercept, r_value, p_value, std_err)
plt.figure()
sns.lmplot(x='Actual_Year_Built',y='Lot_Size', data = DataHouse, height = 10)
plt.ylim(0,)
plt.xlim(1940,2020)
#P-Value is the test that the hypothesis is Null (slope = 0) R-Value is the correlation. This gives a weak R and a strong P
slope, intercept, r_value, p_value, std_err = stats.linregress(DataHouse['Actual_Year_Built'],DataHouse['Lot_Size'])
print ('DataHouse: Actual Year Built', slope, intercept, r_value, p_value, std_err)
'''Neighbourhood Group'''
DataHouseNeighbourhood.boxplot('Assessed_Value','Neighbourhood',figsize=(27,8))
'''
dummy = pd.get_dummies(DataHouse['Neighbourhood'])
print(dummy.head())
dummy.to_csv(r'C:\\Users\\aviel\\Desktop\\Coding\\Data Analyst Interview\\MarketValue\\test.csv', index = False)
'''
|
avielchow/Property-Assessment-Analysis
|
Analysis.py
|
Analysis.py
|
py
| 5,400 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28868669946
|
import unittest
from babarbackend.models import *
from babarbackend.api import *
class UserTestCase(unittest.TestCase):
"""
"""
def setUp(self):
self.manager = TaskManager()
def tearDown(self):
User.objects.all().delete()
def testCreateUser(self):
username = 'sara'
email = '[email protected]'
snooze_seconds = 90
user_id = self.manager.create_user(username=username, email=email, snooze_seconds=snooze_seconds)
user = User.objects.get(id=user_id)
self.assertEquals(user.username, username)
self.assertEquals(user.email, email)
self.assertEquals(user.snooze_seconds, snooze_seconds)
# same username raises integrity error
self.assertRaises(Exception, self.manager.create_user, username=username, email=email, snooze_seconds=snooze_seconds)
|
codergirl/babar
|
babarbackend/tests.py
|
tests.py
|
py
| 862 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18523322737
|
import xlrd
import xlwt
from featureComp import *
from createmat import *
def findRank(path2):
for i in range(1,39):
path3=path2+str(i)+'.xlsx'
matchday=xlrd.open_workbook(path3)
sheet1=matchday.sheet_by_index(0)
#print path3,'\n'
for j in range(1,21):
team_rank[sheet1.cell(j,2).value.strip()].append(sheet1.cell(j,0).value)
def resetMatches(matches_played):
for k in matches_played.keys():
matches_played[k]=0
teams={}
teamprofile={}
matches_played={}
team_rank={}
train_book=xlwt.Workbook()
sheet1=train_book.add_sheet("sheet 1")
book = xlrd.open_workbook("Season_table.xlsx")
first_sheet = book.sheet_by_index(0)
form_table=([0.75,0.15,20],[0.6,0.25,16],[0.4,0.4,12],[0.15,0.6,10])
for i in range(1,37):
teams[first_sheet.cell(i,0).value.strip()]=[]
teamprofile[first_sheet.cell(i,0).value.strip()]=[]
matches_played[first_sheet.cell(i, 0).value.strip()]=0
team_rank[first_sheet.cell(i,0).value.strip()]=[]
num=2005
match=1
featureobj=Feature()
for j in range(10):
path='Fixtures/'+str(num)+'.xlsx'
path2='Match Days/'+str(num)+'/Match'
fbook=xlrd.open_workbook(path)
first_sheet = fbook.sheet_by_index(0)
findRank(path2)
AQDQmat(first_sheet,teams)
FORMmat(first_sheet, team_rank, teams, matches_played, form_table)
resetMatches(matches_played)
featureobj.featureCompute(first_sheet,sheet1,teams,matches_played,teamprofile)
num+=1
train_book.save("training.xls")
rtrain_book=xlrd.open_workbook('training.xlsx')
svmdatasheet=rtrain_book.sheet_by_index(0)
with open('svmdataformat', 'w') as f:
featureobj.SVMformat(svmdatasheet,f)
f.closed
'''
for k,v in teams.iteritems():
print k
print '------------------'
print v
'''
teamslist=[]
for i in range(1,37):
for j in (9,10):
if int(book.sheet_by_index(0).cell(i,j).value)==1:
teamslist.append(book.sheet_by_index(0).cell(i,0).value.strip())
for names in teamslist:
train_book=xlwt.Workbook()
sheet1=train_book.add_sheet("sheet 1")
for i in range(len(teamprofile[names])):
for j in range(4):
sheet1.row(i).write(j,teamprofile[names][i][j])
train_book.save(str(names)+".xlsx")
|
kushg18/football-match-winner-prediction
|
main.py
|
main.py
|
py
| 2,107 |
python
|
en
|
code
| 3 |
github-code
|
6
|
31534303974
|
## LESSON 6 Q1: AUDITING - ITERATIVE PARSING/SAX PARSE using ITERPARSE
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Your task is to use the iterative parsing to process the map file and
find out not only what tags are there, but also how many, to get the
feeling on how much of which data you can expect to have in the map.
The output should be a dictionary with the tag name as the key
and number of times this tag can be encountered in the map as value.
Note that your code will be tested with a different data file than the 'example.osm'
"""
import xml.etree.ElementTree as ET
import pprint
def count_tags(filename):
# YOUR CODE HERE
tagdict = {}
for event, elem in ET.iterparse(filename):
try:
if elem.tag in tagdict:
tagdict[elem.tag] += 1
else:
tagdict[elem.tag] = 1
elem.clear()
except 'NoneType':
pass
return tagdict
def test():
tags = count_tags('examples.osm')
pprint.pprint(tags)
assert tags == {'bounds': 1,
'member': 3,
'nd': 4,
'node': 20,
'osm': 1,
'relation': 1,
'tag': 7,
'way': 1}
if __name__ == "__main__":
test()
|
rjshanahan/Data_Wrangling_with_MongoDB
|
Lesson 1_Udacity_MongoDB_CSV+JSON.py
|
Lesson 1_Udacity_MongoDB_CSV+JSON.py
|
py
| 1,349 |
python
|
en
|
code
| 2 |
github-code
|
6
|
31629715534
|
from flask import Flask, render_template, redirect, request
from flask import Blueprint
from models.visit import Visit
import repositories.visit_repository as visit_repository
import repositories.country_repository as country_repository
import repositories.user_repository as user_repository
visits_blueprint = Blueprint("visits", __name__)
@visits_blueprint.route("/users/<user_id>")
def visited_countries(user_id):
visited_countries = visit_repository.show_all(user_id)
countries = country_repository.select_all()
user = user_repository.select_by_id(user_id)
return render_template("visits/index.html", all_visits = visited_countries, all_countries = countries, user = user)
@visits_blueprint.route("/visits/<user_id>", methods=['POST'])
def add_visited_country(user_id):
user = user_repository.select_by_id(user_id)
country_user_id = request.form['select_country']
country = country_repository.select_by_id(country_user_id)
visit = Visit(user, country, True)
visit_repository.save(visit)
return redirect('/users/'+ user_id)
@visits_blueprint.route("/visits/<visit_id>/<user_id>/delete", methods= ['GET'])
def delete_visit(visit_id, user_id):
visit_repository.delete(visit_id)
return redirect('/users/' + user_id)
# @visits_blueprint.route("/countries", methods= ['POST'])
# def update_country(name, continent,flag):
# name = request.form['name']
# continent = request.form['continent']
# flag = request.form['flag']
# visit_repository.update()
# return render_template('/countries')
|
paolaguerralibrero/bucket_list_python_project_w5
|
controllers/visit_controller.py
|
visit_controller.py
|
py
| 1,562 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43599317125
|
from __future__ import division
import h5py
import numpy as np
'''
PARAMETERS
'''
#savefig()
outFile='all_data.hdf5'
def main():
f=h5py.File(outFile,'r')
ds = f['data'][:,0:6,:]
data = f['interpo']
import_features=['Weight_Index', 'Waist(CM)', 'Hip(CM)', 'Waist_Hip_Ratio','systolic_pressure', 'diastolic_pressure', 'Hb', 'Cr', 'Ch', 'TG', 'HDL', 'LDL', 'FBG', 'PBG', 'INS0', 'CP0', 'Ch', 'TG', 'HDL', 'LDL', 'FBG', 'PBG', 'HbA1c', 'INS0','HOMAIR', 'HOMAB', 'CP0', 'CRP', 'FFA', 'visceral_fat', 'subcutaneous_fat','FT3', 'FT4', 'TSH']
all_features=['Weight(KG)','Weight_Index','Waist(CM)','Hip(CM)','Waist_Hip_Ratio','Heart_rate','systolic_pressure','diastolic_pressure','WBC','Hb','ALT','AST','rGT','ALP','prealbumin','bile_acid','total_bilirubin','direct_bilirubin','BUN','Cr','uric_acid','RBP','CysC','K','Na','Mg','Ca','P','Ch','TG','HDL','LDL','FBG','PBG','HbA1c','GA','INS0','INS30','INS120','HOMAIR','HOMAB','CP0','CP30','CP120','HOMAcp','ALB1','ALB2','ALB3','Average_uric_ALB','GFR','ACR','CRP','folic_acid','VitB12','PTH','OH25D','Serum_Fe','serum_Fe_protein','CA199','FFA','visceral_fat','subcutaneous_fat','FT3','FT4','TSH','Reversed_T3','BG30','AAINS0','AAINS2','AAINS4','AAINS6','AAINS_index','AACP0','AACP2','AACP4','AACP6','AACP_index','urinary_uric_acid','Urine_creatinine']
all_ids = f['ids'][0:-2]
build_cm(data,import_features,all_features,all_ids)
return
def is_important(feature,important):
if feature in important:
return True
else:
return False
def build_cm(data,import_features,all_features,all_ids):
pt_idx = data.shape[0] - 1
ft_idx = data.shape[2] - 1
pt_list = all_ids
ft_list = all_features
temp_mat = data[:,:,:]
path_f = open('path.txt','w')
order_f = open('order.txt','w')
rem_f = open('removal_order.txt','w')
cm = np.zeros([data.shape[0],data.shape[2]])
f_cm = open('cm.txt','w')
while pt_idx != 0 and ft_idx != 0:
path_f.write('(%d,%d)\n' % (pt_idx,ft_idx))
p_order, f_order, p_max, f_max = sort_by_nan(temp_mat,pt_list,ft_list,import_features)
temp_mat = temp_mat[p_order,:,:]
temp_mat = temp_mat[:,:,f_order]
pt_list = [pt_list[p_order[x]] for x in range(len(p_order))]
ft_list = [ft_list[f_order[x]] for x in range(len(f_order))]
for i in range(pt_idx,-1,-1):
cm[i,ft_idx] = np.count_nonzero(np.isnan(temp_mat[0:i+1,:,0:ft_idx+1]))#/(data.shape[-1]*data.shape[1])
for i in range(ft_idx,-1,-1):
cm[pt_idx,i] = np.count_nonzero(np.isnan(temp_mat[0:pt_idx+1,:,0:i+1]))#/(data.shape[0]*data.shape[1])
order_f.write('%s' % (pt_list[p_order[0]]))
for i in range(1,len(p_order)):
order_f.write(', %s' % (pt_list[p_order[i]]))
order_f.write('\n')
order_f.write('%s' % (ft_list[f_order[0]]))
for i in range(1,len(f_order)):
order_f.write(', %s' % (ft_list[f_order[i]]))
order_f.write('\n')
order_f.write('\n')
if p_max > f_max or ft_idx == 0:
rem_f.write('%s\n' % (pt_list[-1]))
temp_mat = temp_mat[0:pt_idx,:,:]
pt_idx -= 1
pt_list = pt_list[0:-1]
else:
rem_f.write('%s\n' % (ft_list[-1]))
temp_mat = temp_mat[:,:,0:ft_idx]
ft_idx -= 1
ft_list = ft_list[0:-1]
for i in range(cm.shape[0]):
f_cm.write('%f' % (cm[i,0]))
for j in range(1,cm.shape[1]):
f_cm.write(', %f' % (cm[i,j]))
f_cm.write('\n')
f_cm.close()
def sort_by_nan(data,patients,features,important):
pt_pcts = np.zeros(len(patients))
ft_pcts = np.zeros(len(features))
n_pts = data.shape[0]
n_feats = data.shape[2]
n_tpts = data.shape[1]
#percent (# empty) / (total #) for each patient
for i in range(n_pts):#patient id
pt_pcts[i] = float(np.count_nonzero(np.isnan(data[i,:,:])))/(n_feats*n_tpts)
#percent (# empty) / (total #) for each feature
for i in range(n_feats):
ft_pcts[i] = float(np.count_nonzero(np.isnan(data[:,:,i])))/(n_pts*n_tpts)
p_order = np.argsort(pt_pcts)
f_order = np.argsort(ft_pcts)
p_max = np.nanmax(pt_pcts)
f_max = np.nanmax(ft_pcts)
# count = 0
# for i in range(len(f_order)):
# if is_important(features[f_order[i]],important):
# continue
# else:
# if count != i and count < len(important):
# j = i
# while j < n_feats and is_important(features[f_order[j]],important):
# j += 1
# if j == len(f_order):
# break
# temp = f_order[j]
# for k in range(j,i,-1):
# f_order[k] = f_order[k-1]
# f_order[i] = temp
# count += 1
return p_order, f_order, p_max, f_max
main()
|
taylorsmith-UKY/diabetes
|
get_path.py
|
get_path.py
|
py
| 4,368 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8411903253
|
url = "http://dantri.com.vn/"
output_file_name = "news.xlsx"
#Step 1: Download information on the Dantri website
from urllib.request import urlopen
from bs4 import BeautifulSoup
#1.1: Open a connection
conn = urlopen(url)
#1.2: read
raw_data = conn.read() #byte
#1.3: Decode
html_content = raw_data.decode('utf-8')
# Faster way
# from urllib.request import urlopen
# html_content = urlopen(url).read().decode('utf-8')
# print(html_content)
# print(html_content)
#How to save html_content as a file (in case internet is weak)
# html_file = open("dantri.html","wb") #write: byte
# html_file.write(raw_data)
# html_file.close()
#Step 2: Extract ROI (Region of interest)
#Create a soup
soup = BeautifulSoup(html_content, "html.parser")
# print(soup.prettify)
ul = soup.find("ul", "ul1 ulnew")
# find chi dung cho tim 1 cai
# print(ul.prettify())
li_list = ul.find_all("li")
#find_all dung cho tim tat ca
# for li in li_list:
# print(li)
# print("***" * 10)
#Step 3: Extract News
news_list = []
for li in li_list:
# li = li_list[0]
# h4 = li.h4 #h4 = li.find("h4")
# a = h4.a
#better way:
# a = li.h4.a (or li.a)
a = li.h4.a
href = url + a["href"]
title = a.string
news = {
"title": title,
"link": href
}
news_list.append(news)
print(news_list)
|
taanh99ams/taanh-lab-c4e15
|
Lab 2/dan_tri_extract.py
|
dan_tri_extract.py
|
py
| 1,322 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70204805949
|
import requests
from fake_useragent import UserAgent
import re
import base64
import sys
from fontTools.ttLib import TTFont
from lxml import etree
import pymysql
# Unicode => ASCII => hex
from unicode_to_hex import get_hex_back
# 继承重写TTFont,直接使用字节串数据,避免在动态字体加密中重复打开关闭woff文件
class MyTTFont(TTFont):
"""
主要目的:实现直接读取字节串数据,避免每次存取文件
fontTools-version: 4.22.0
"""
def __init__(self, file_content, checkChecksums=0, fontNumber=-1, _tableCache=None):
from fontTools.ttLib.sfnt import SFNTReader
from io import BytesIO
# 用content数据代替原码的open()
file = BytesIO(file_content)
super().__init__()
# 继承父类的初始化,但是没有传值会影响后续赋值,
self._tableCache = _tableCache
self.reader = SFNTReader(file, checkChecksums, fontNumber=fontNumber)
self.sfntVersion = self.reader.sfntVersion
self.flavor = self.reader.flavor
self.flavorData = self.reader.flavorData
class TongchengSpider:
def __init__(self):
# 匹配字体文件的base64数据正则
self.regex = r"charset=utf-8;base64,(.*?)'\) format\('truetype'\)"
self.pattern = re.compile(self.regex, re.S)
# 存入mysql数据库
self.db = pymysql.connect(
host='192.168.31.63',
port=3306,
user='root',
password='123456',
database='ershouche'
)
self.cursor = self.db.cursor()
self.ins = 'insert into carinfo(brand,detail,price) values(%s,%s,%s)'
def get_requests_data(self, url):
"""简单封装了随机UA的get请求"""
ua = UserAgent().chrome
headers = {'User-Agent': ua}
html = requests.get(url=url, headers=headers).text
# print(html)
return html
# 提取网页中的base64_font数据
def parse_font(self, html):
"""传入HTML text数据提取font文件的base64数据"""
font_base64 = self.pattern.findall(html)
if font_base64:
font_base64 = font_base64[0].encode()
# 返回base64解码后的字节串数据
return base64.b64decode(font_base64)
else:
sys.exit('没有匹配到字体数据')
# 创建价格字符到实际价格的映射
def create_font_dict(self, font):
"""
根据font对象创建字典,针对只有0-9且顺序排列的字体文件
:param font:font对象
:return:hex到font数字的映射
"""
font_names = font.getGlyphOrder()
font_dict = {}
number = 0
# 这种字体动态加密较为简单,虽然字体文件在变换,但是GlyphOder和字体的对应并没有改变
for font_name in font_names[1:]:
font_name = font_name[3:]
font_dict[font_name] = str(number)
number += 1
return font_dict
# 提取二手车页面中的品牌、车型、价格字符,以及字体还原
def parse_ershouche_data(self, html, font_dict):
p = etree.HTML(html)
info_title = p.xpath('//li[@class="info"]/div/a')
result_list = []
for msg in info_title:
car_brand = msg.xpath('.//span[@class="info_link"]/font/text()')[0]
car_info = msg.xpath('.//span[@class="info_link"]/text()')[0].strip()
car_price_obj = msg.xpath('.//div[@class="info--price"]/b/text()')[0]
price_info = get_hex_back(car_price_obj)
price_info = self.decode_real_price(price_info, font_dict) + '万元'
result_list.append((car_brand, car_info, price_info))
return result_list
# 解析拼接出实际显示的价格数据
def decode_real_price(self, price_info_dict, font_dict):
"""
将网页源码中的16进制码转换为实际显示字体对应的数字
:param price_info_dict: 整数部分和小数部分字典 {'int_part': ['2f'], 'decimal_part': ['2d']}
:param font_dict: hex到font字体的查询字典 {'8D77': 0, '5143': 1,...}
:return:拼接好的价格数据,不带单位,单位为:万元
"""
# 获取整数和小数部分编码
int_part_list = price_info_dict['int_part']
decimal_part_list = price_info_dict['decimal_part']
# 查询转换整数部分
int_part = self.query_hex_codes(int_part_list, font_dict)
# 如果list内元素为0而不是16进制码,代表没有数据,注意,实际价格若为0,也应该有编码查询到font字体的‘0’
if not decimal_part_list[0]:
return int_part
else:
# 查询转换小数部分
decimal_part = self.query_hex_codes(decimal_part_list, font_dict)
return int_part + '.' + decimal_part
# 把一长串价格字符查找拼接成价格数字,不包含小数点
def query_hex_codes(self, hex_list, font_dict):
"""
遍历列表中的hex,查询对应的font字体
:param hex_list: 网页源码中价格加密的hex
:param font_dict: hex到font字体的映射
:return:
"""
price_str = ''
for item in hex_list:
price_slices = font_dict.get(item)
price_str += price_slices
return price_str
def save_mysql(self,result_list):
self.cursor.executemany(self.ins,result_list)
self.db.commit()
def run(self):
# 以目标网站前5页内容为例
for i in range(5):
url = 'https://cd.58.com/ershouche/pn%s/' % (i+1)
html = self.get_requests_data(url)
# 构建出font查询字典:
font_content = self.parse_font(html)
font = MyTTFont(font_content)
# 转为xml文件,重写的MyTTFont可以实现原有功能
# font.saveXML('1.xml')
font_dict = self.create_font_dict(font)
# print(font_dict)
font.close()
result_list = self.parse_ershouche_data(html, font_dict)
print(result_list)
self.save_mysql(result_list)
self.cursor.close()
self.db.close()
if __name__ == '__main__':
spider = TongchengSpider()
spider.run()
|
xiaohao-a/58_ershouche_font
|
58ershouche.py
|
58ershouche.py
|
py
| 6,376 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.