blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
922ff5d630c9b04ec7b8d6b206e71f56a91e60c2 | f4dedea53630c9cbdc6297ae4a7e2a8195fd7691 | /10 Advanced Techniques/19 Signal Processing.py | c172f714e1656b011d12b7c13426a9755447f1f3 | [] | no_license | nikkisora/cses_problemset | d089db048444e07e002f131b4323adc9df95b05b | 03160f33e36cdc6d538403357b36bcb015b4dba7 | refs/heads/master | 2023-07-03T10:34:23.487709 | 2021-08-05T21:13:49 | 2021-08-05T21:13:49 | 379,251,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,018 | py | '''
CSES - Signal Processing
Time limit: 1.00 s
Memory limit: 512 MB
You are given two integer sequences: a signal and a mask. Your task is to process the signal by moving the mask through the signal from left to right. At each mask position calculate the sum of products of aligned signal and mask values in the part where the signal and the mask overlap.
Input
The first input line consists of two integers n and m: the length of the signal and the length of the mask.
The next line consists of n integers a_1,a_2,...,a_n defining the signal.
The last line consists of m integers b_1,b_2,...,b_m defining the mask.
Output
Print n+m-1 integers: the sum of products of aligned values at each mask position from left to right.
Constraints
1 <= n,m <= 2 * 10^5
1 <= a_i,b_i <= 100
Example
Input:
5 3
1 3 2 1 4
1 2 3
Output:
3 11 13 10 16 9 4
Explanation: For example, at the second mask position the sum of aligned products is 2 * 1 + 3 * 3 = 11.
''' | [
"[email protected]"
] | |
fa987bfdd73ebad2cf8c88d6d524f5747f1813f0 | 0827979a9e3bfca5900726f1cef428f8a8c819ba | /NRPyPN/PN_Hamiltonian_SS.py | c7b73e919953363b5e77f4d954b77a8449fb0f81 | [
"BSD-2-Clause"
] | permissive | zachetienne/nrpytutorial | 12763c9c0e0be0007b8cae5688225a33c8fb4442 | 1230b4d602e0657d42de0c7ea193c34058e4aca9 | refs/heads/master | 2023-09-01T06:31:22.549594 | 2023-08-14T19:47:16 | 2023-08-14T19:47:16 | 135,812,438 | 88 | 46 | BSD-2-Clause | 2023-09-02T00:25:36 | 2018-06-02T11:34:10 | Jupyter Notebook | UTF-8 | Python | false | false | 4,781 | py | # As documented in the NRPyPN notebook
# PN-Hamiltonian-Spin-Spin.ipynb, this Python script
# generates spin-spin coupling pieces of the
# post-Newtonian (PN) Hamiltonian, up to and
# including 3PN order.
# Core functions:
# f_H_SS_2PN(m1,m2, S1U,S2U, nU, q):
# Compute the complete H_SS_2PN term and store to
# global variable of the same name.
# f_HS1S2_3PN(m1,m2, n12U, S1U,S2U, p1U,p2U, q)):
# Compute HS1S2_3PN and store to global variable
# of the same name.
# f_H_SS_S1sq_S2sq_3PN(m1,m2, n12U,n21U, S1U,S2U, p1U,p2U, q):
# Compute H_SS_S1sq_S2sq_3PN and store to global
# variable of the same name.
# Author: Zach Etienne
# zachetie **at** gmail **dot* com
# Step 0: Add NRPy's directory to the path
# https://stackoverflow.com/questions/16780014/import-file-from-parent-directory
import indexedexpNRPyPN as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
from NRPyPN_shortcuts import div,dot,cross # NRPyPN: shortcuts for e.g., vector operations
#################################
#################################
# 2PN spin-spin term, from Eqs. 2.18 and 2.19 of
# Buonanno, Chen, and Damour (2006):
# https://arxiv.org/abs/gr-qc/0508067
def f_H_SS_2PN(m1,m2, S1U,S2U, nU, q):
S0U = ixp.zerorank1()
for i in range(3):
S0U[i] = (1 + m2/m1)*S1U[i] + (1 + m1/m2)*S2U[i]
global H_SS_2PN
mu = m1*m2 / (m1 + m2)
H_SS_2PN = mu/(m1 + m2) * (3*dot(S0U,nU)**2 - dot(S0U,S0U)) / (2*q**3)
#################################
#################################
# 3PN spin-spin S_1,S_2 coupling term, from Eq. 2.11 of
# Steinhoff, Hergt, and Sch\"afer (2008a)
# https://arxiv.org/abs/0712.1716
def f_H_SS_S1S2_3PN(m1,m2, n12U, S1U,S2U, p1U,p2U, r12):
global H_SS_S1S2_3PN
H_SS_S1S2_3PN = (+div(3,2)*(dot(cross(p1U,S1U),n12U)*dot(cross(p2U,S2U),n12U))
+ 6*(dot(cross(p2U,S1U),n12U)*dot(cross(p1U,S2U),n12U))
-15*dot(S1U,n12U)*dot(S2U,n12U)*dot(p1U,n12U)*dot(p2U,n12U)
-3*dot(S1U,n12U)*dot(S2U,n12U)*dot(p1U,p2U)
+3*dot(S1U,p2U)*dot(S2U,n12U)*dot(p1U,n12U)
+3*dot(S2U,p1U)*dot(S1U,n12U)*dot(p2U,n12U)
+3*dot(S1U,p1U)*dot(S2U,n12U)*dot(p2U,n12U)
+3*dot(S2U,p2U)*dot(S1U,n12U)*dot(p1U,n12U)
-div(1,2)*dot(S1U,p2U)*dot(S2U,p1U)
+dot(S1U,p1U)*dot(S2U,p2U)
-3*dot(S1U,S2U)*dot(p1U,n12U)*dot(p2U,n12U)
+div(1,2)*dot(S1U,S2U)*dot(p1U,p2U))/(2*m1*m2*r12**3)
H_SS_S1S2_3PN+= (-dot(cross(p1U,S1U),n12U)*dot(cross(p1U,S2U),n12U)
+dot(S1U,S2U)*dot(p1U,n12U)**2
-dot(S1U,n12U)*dot(S2U,p1U)*dot(p1U,n12U))*3/(2*m1**2*r12**3)
H_SS_S1S2_3PN+= (-dot(cross(p2U,S2U),n12U)*dot(cross(p2U,S1U),n12U)
+dot(S1U,S2U)*dot(p2U,n12U)**2
-dot(S2U,n12U)*dot(S1U,p1U)*dot(p2U,n12U))*3/(2*m2**2*r12**3)
H_SS_S1S2_3PN+= (+dot(S1U,S2U)-2*dot(S1U,n12U)*dot(S2U,n12U))*6*(m1+m2)/r12**4
#################################
#################################
# 3PN spin-orbit coupling term, from Eq. 9 of
# Steinhoff, Hergt, and Sch\"afer (2008b)
# https://arxiv.org/abs/0809.2200
def f_H_SS_S1sq_S2sq_3PN(m1,m2, n12U,n21U, S1U,S2U, p1U,p2U, r12):
def f_H_SS_particle(m1,m2, n12U, S1U,_S2U, p1U,p2U, r12): # _S2U unused.
H_SS_S1sq_S2sq_3PN_particle = (
+ m2/(4*m1**3)*dot(p1U,S1U)**2
+3*m2/(8*m1**3)*dot(p1U,n12U)**2*dot(S1U,S1U)
-3*m2/(8*m1**3)*dot(p1U,p1U)*dot(S1U,n12U)**2
-3*m2/(4*m1**3)*dot(p1U,n12U)*dot(S1U,n12U)*dot(p1U,S1U)
-3/(4*m1*m2)*dot(p2U,p2U)*dot(S1U,S1U)
+9/(4*m1*m2)*dot(p2U,p2U)*dot(S1U,n12U)**2
+3/(4*m1**2)*dot(p1U,p2U)*dot(S1U,S1U)
-9/(4*m1**2)*dot(p1U,p2U)*dot(S1U,n12U)**2
-3/(2*m1**2)*dot(p1U,n12U)*dot(p2U,S1U)*dot(S1U,n12U)
+3/(m1**2) *dot(p2U,n12U)*dot(p1U,S1U)*dot(S1U,n12U)
+3/(4*m1**2)*dot(p1U,n12U)*dot(p2U,n12U)*dot(S1U,S1U)
-15/(4*m1**2)*dot(p1U,n12U)*dot(p2U,n12U)*dot(S1U,n12U)**2)/r12**3
H_SS_S1sq_S2sq_3PN_particle+= -(+div(9,2)*dot(S1U,n12U)**2
-div(5,2)*dot(S1U,S1U)
+7*m2/m1*dot(S1U,n12U)**2
-3*m2/m1*dot(S1U,S1U))*m2/r12**4
return H_SS_S1sq_S2sq_3PN_particle
global H_SS_S1sq_S2sq_3PN
H_SS_S1sq_S2sq_3PN = (+f_H_SS_particle(m1,m2, n12U, S1U,S2U, p1U,p2U, r12)
+f_H_SS_particle(m2,m1, n21U, S2U,S1U, p2U,p1U, r12))
| [
"[email protected]"
] | |
c5ec7aeea7ebd380c20fdedc5a2edfd5b703ce91 | 8a1bbbe4d3d487fcb5f86c9d5f108ea2b4de1894 | /df/r_incore.py | 818b962533399e9d73ea9a297d17207225f2dd09 | [
"BSD-2-Clause"
] | permissive | molguin-qc/pyscf | a7abaa7b61143c58fae065d2cf035952e782a1f0 | 0ca910a816e116542c83913b52e7a4a1cad83454 | refs/heads/master | 2020-04-06T06:21:13.065884 | 2015-11-24T22:49:49 | 2015-11-24T22:49:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,758 | py | #!/usr/bin/env python
#
# Author: Qiming Sun <[email protected]>
#
import time
import ctypes
import _ctypes
import numpy
import scipy.linalg
import pyscf.lib
from pyscf.lib import logger
import pyscf.gto
from pyscf.df import incore
from pyscf.scf import _vhf
libri = pyscf.lib.load_library('libri')
def _fpointer(name):
return ctypes.c_void_p(_ctypes.dlsym(libri._handle, name))
# (ij|L)
def aux_e2(mol, auxmol, intor='cint3c2e_spinor', aosym='s1', comp=1, hermi=0):
atm, bas, env = \
pyscf.gto.mole.conc_env(mol._atm, mol._bas, mol._env,
auxmol._atm, auxmol._bas, auxmol._env)
c_atm = numpy.array(atm, dtype=numpy.int32)
c_bas = numpy.array(bas, dtype=numpy.int32)
c_env = numpy.array(env)
natm = ctypes.c_int(mol.natm+auxmol.natm)
nbas = ctypes.c_int(mol.nbas)
nao = mol.nao_2c()
naoaux = auxmol.nao_nr()
if aosym == 's1':
eri = numpy.empty((nao*nao,naoaux), dtype=numpy.complex)
fill = _fpointer('RIfill_r_s1_auxe2')
else:
eri = numpy.empty((nao*(nao+1)//2,naoaux), dtype=numpy.complex)
fill = _fpointer('RIfill_r_s2ij_auxe2')
fintor = _fpointer(intor)
cintopt = _vhf.make_cintopt(c_atm, c_bas, c_env, intor)
libri.RIr_3c2e_auxe2_drv(fintor, fill,
eri.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(0), ctypes.c_int(mol.nbas),
ctypes.c_int(mol.nbas), ctypes.c_int(auxmol.nbas),
ctypes.c_int(1), cintopt,
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
libri.CINTdel_optimizer(ctypes.byref(cintopt))
return eri
# (L|ij)
def aux_e1(mol, auxmol, intor='cint3c2e_spinor', aosym='s1', comp=1, hermi=0):
pass
def cholesky_eri(mol, auxbasis='weigend', aosym='s1', verbose=0):
t0 = (time.clock(), time.time())
if isinstance(verbose, logger.Logger):
log = verbose
else:
log = logger.Logger(mol.stdout, verbose)
auxmol = incore.format_aux_basis(mol, auxbasis)
j2c = incore.fill_2c2e(mol, auxmol)
log.debug('size of aux basis %d', j2c.shape[0])
t1 = log.timer('2c2e', *t0)
low = scipy.linalg.cholesky(j2c, lower=True)
j2c = None
t1 = log.timer('Cholesky 2c2e', *t1)
j3c_ll = aux_e2(mol, auxmol, intor='cint3c2e_spinor', aosym=aosym)
j3c_ss = aux_e2(mol, auxmol, intor='cint3c2e_spsp1_spinor', aosym=aosym)
t1 = log.timer('3c2e', *t1)
cderi_ll = scipy.linalg.solve_triangular(low, j3c_ll.T, lower=True,
overwrite_b=True)
cderi_ss = scipy.linalg.solve_triangular(low, j3c_ss.T, lower=True,
overwrite_b=True)
# solve_triangular return cderi in Fortran order
cderi = (pyscf.lib.transpose(cderi_ll.T),
pyscf.lib.transpose(cderi_ss.T))
log.timer('cholesky_eri', *t0)
return cderi
if __name__ == '__main__':
from pyscf import scf
mol = pyscf.gto.Mole()
mol.build(
verbose = 0,
atom = [["O" , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ],
basis = 'ccpvdz',
)
cderi = cholesky_eri(mol, verbose=5)
n2c = mol.nao_2c()
c2 = .5 / mol.light_speed
def fjk(mol, dm, *args, **kwargs):
# dm is 4C density matrix
cderi_ll = cderi[0].reshape(-1,n2c,n2c)
cderi_ss = cderi[1].reshape(-1,n2c,n2c)
vj = numpy.zeros((n2c*2,n2c*2), dtype=dm.dtype)
vk = numpy.zeros((n2c*2,n2c*2), dtype=dm.dtype)
rho =(numpy.dot(cderi[0], dm[:n2c,:n2c].T.reshape(-1))
+ numpy.dot(cderi[1], dm[n2c:,n2c:].T.reshape(-1)*c2**2))
vj[:n2c,:n2c] = numpy.dot(rho, cderi[0]).reshape(n2c,n2c)
vj[n2c:,n2c:] = numpy.dot(rho, cderi[1]).reshape(n2c,n2c) * c2**2
v1 = numpy.einsum('pij,jk->pik', cderi_ll, dm[:n2c,:n2c])
vk[:n2c,:n2c] = numpy.einsum('pik,pkj->ij', v1, cderi_ll)
v1 = numpy.einsum('pij,jk->pik', cderi_ss, dm[n2c:,n2c:])
vk[n2c:,n2c:] = numpy.einsum('pik,pkj->ij', v1, cderi_ss) * c2**4
v1 = numpy.einsum('pij,jk->pik', cderi_ll, dm[:n2c,n2c:])
vk[:n2c,n2c:] = numpy.einsum('pik,pkj->ij', v1, cderi_ss) * c2**2
vk[n2c:,:n2c] = vk[:n2c,n2c:].T.conj()
return vj, vk
mf = scf.DHF(mol)
mf.get_jk = fjk
mf.direct_scf = False
ehf1 = mf.scf()
print(ehf1, -76.08073868516945)
cderi = cderi[0].reshape(-1,n2c,n2c)
print(numpy.allclose(cderi, cderi.transpose(0,2,1).conj()))
| [
"[email protected]"
] | |
0022ad2cde11b4459237ac8330bc909f4317b4fd | 9cf97aa5fafe0ba5e06d72a19b50a7b326857dcf | /create_model_input.py | 7e02025b5139eebef743c40f5db58fca2dfd87f8 | [] | no_license | Shawn-nau/Time-series-prediction | a027b22f250e3dcd859f1d92a41a4e979a1a0526 | 044d34846d04a19898c3c8b874c7e982d545ab40 | refs/heads/master | 2020-09-11T13:18:34.457153 | 2019-03-30T15:00:57 | 2019-03-30T15:00:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,183 | py | import logging
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
class Input_builder(object):
def __init__(self):
pass
def __call__(self, model,x,y=None,train_window=20,train_window_2=None):
if model=='weibull':
return self.create_weibull_input(x,y,train_window)
elif model=='svm' or model=='lstm':
return self.create_RNN_input(x,train_window=20)
elif model=='seq2seq':
return self.create_seq2seq_basic_input(x,train_window,train_window_2)
elif str(model)=='arima':
return x.iloc[:,-1].values
elif str(model)=='xgb':
return self.create_xgb_input(x)
def create_weibull_input(self,x,y,train_windows=20):
index_end=len(y)-1
y=list(y)
for yy in y[::-1]:
if yy!=y[-1]:
index_end=y.index(yy)
break
index_begin=index_end-train_windows if (index_end-train_windows>0) else 1
x,y=x[index_begin:index_end],y[index_begin:index_end]
logging.info("Weibull train data {}".format(len(x)))
return np.array(x),np.array(y)
def create_RNN_input(self,x_train,train_window):
#data=self.examples.iloc[:,-1].values
x,y=[],[]
for i in range(len(x_train)-train_window-1):
x.append(x_train[i:i+train_window])
y.append(x_train[i+train_window])
x=np.array(x)
x= x.reshape(x.shape[0],x.shape[1],1)
y=np.array(y)
y=y.reshape(y.shape[0],1)
return x,y
def create_seq2seq_basic_input(self,data,input_seq_length,output_seq_length):
#data=self.examples.iloc[:,-1].values
x,y=[],[]
for i in range(len(data)-input_seq_length-output_seq_length-1):
x.append([data[i:(i+input_seq_length)]])
y.append([data[(i+input_seq_length):(i+input_seq_length+output_seq_length)]])
x = np.array(x)
x2 = x.reshape(x.shape[0],-1, x.shape[1])
y= np.array(y)
y2 = y.reshape(y.shape[0],-1,y.shape[1])
return x2,y2
def create_seq2seq_input(self):
pass
def create_arima_input(self,examples):
data = examples.iloc[:,-1].values
return data
def create_xgb_input(self,examples):
# create date or time related feature as inputs
examples['year']=examples.iloc[:,0].apply(lambda x: int(str(x)[0:4]))
examples['week']=examples.iloc[:,0].apply(lambda x: int(str(x)[4:]))
examples.drop(columns=['Repair week'],inplace=True)
#examples = pd.get_dummies(examples, columns=['year']) # month
return examples.values
def _read_csv(self,data_dir):
examples=pd.read_csv(data_dir)
return examples
def _normalize(self,data):
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(data)
return dataset
class Input_pipe(object):
def __init__(self):
pass
def get_train_features(self):
pass
def get_dev_features(self):
pass
def get_test_features(self):
pass
def create_examples2features(self):
pass
| [
"[email protected]"
] | |
ff10be4b7205ee829e3efe5d87de1af27b52f859 | 02bbac5a5e12b44919945ae7e95eb8d4c5bde28d | /hyperion/metrics/dcf.py | d6dd9c58b03ec60f96d509f00b84566fa255949f | [
"Apache-2.0"
] | permissive | whkanggg/hyperion | 5f594cb97512080cf0523abdc6407a8bc6db4562 | 14a11436d62f3c15cd9b1f70bcce3eafbea2f753 | refs/heads/master | 2020-08-09T14:18:04.689788 | 2019-07-25T18:39:01 | 2019-07-25T18:39:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,869 | py | """
Copyright 2018 Johns Hopkins University (Author: Jesus Villalba)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from six.moves import xrange
import numpy as np
from .roc import compute_rocch, rocch2eer
def compute_dcf(p_miss, p_fa, prior, normalize=True):
"""Computes detection cost function
DCF = prior*p_miss + (1-prior)*p_fa
Args:
p_miss: Vector of miss probabilities.
p_fa: Vector of false alarm probabilities.
prior: Target prior or vector of target priors.
normalize: if true, return normalized DCF, else unnormalized.
Returns:
Matrix of DCF for each pair of (p_miss, p_fa) and each value of prior.
[len(prior) x len(p_miss)]
"""
prior = np.asarray(prior)
if prior.ndim == 1:
prior = prior[:,None]
dcf = prior * p_miss + (1-prior) * p_fa
if normalize:
dcf /= np.minimum(prior, 1-prior)
return dcf
def compute_min_dcf(tar, non, prior, normalize=True):
"""Computes minimum DCF
min_DCF = min_t prior*p_miss(t) + (1-prior)*p_fa(t)
where t is the decission threshold.
Args:
tar: Target scores.
non: Non-target scores.
prior: Target prior or vector of target priors.
normalize: if true, return normalized DCF, else unnormalized.
Returns:
Vector Minimum DCF for each prior.
Vector of P_miss corresponding to each min DCF.
Vector of P_fa corresponding to each min DCF.
"""
p_miss, p_fa = compute_rocch(tar, non)
dcf = compute_dcf(p_miss, p_fa, prior, normalize)
idx_min_dcf = np.argmin(dcf, axis=-1)
if dcf.ndim==1:
min_dcf = dcf[idx_min_dcf]
p_miss = p_miss[idx_min_dcf]
p_fa = p_fa[idx_min_dcf]
else:
i1 = np.arange(dcf.shape[0])
min_dcf = dcf[i1,idx_min_dcf]
p_miss = p_miss[idx_min_dcf]
p_fa = p_fa[idx_min_dcf]
return min_dcf, p_miss, p_fa
def compute_act_dcf(tar, non, prior, normalize=True):
"""Computes actual DCF by making decisions assuming that scores
are calibrated to act as log-likelihood ratios.
Args:
tar: Target scores.
non: Non-target scores.
prior: Target prior or vector of target priors.
normalize: if true, return normalized DCF, else unnormalized.
Returns:
Vector actual DCF for each prior.
Vector of P_miss corresponding to each act DCF.
Vector of P_fa corresponding to each act DCF.
"""
prior = np.asarray(prior)
if prior.ndim == 1:
assert np.all(prior == np.sort(prior, kind='mergesort')), 'priors must be in ascending order'
else:
prior = prior[None]
num_priors = len(prior)
ntar = len(tar)
nnon = len(non)
#thresholds
t = - np.log(prior) + np.log(1-prior)
ttar = np.concatenate((t, tar))
ii = np.argsort(ttar, kind='mergesort')
r = np.zeros((num_priors + ntar), dtype='int32')
r[ii] = np.arange(1, num_priors + ntar + 1)
r = r[:num_priors]
n_miss = r - np.arange(num_priors, 0, -1)
tnon = np.concatenate((t, non))
ii = np.argsort(tnon, kind='mergesort')
r = np.zeros((num_priors + nnon), dtype='int32')
r[ii] = np.arange(1, num_priors + nnon + 1)
r = r[:num_priors]
n_fa = nnon - r + np.arange(num_priors, 0, -1)
# n_miss2 = np.zeros((num_priors,), dtype='int32')
# n_fa2 = np.zeros((num_priors,), dtype='int32')
# for i in xrange(len(t)):
# n_miss2[i] = np.sum(tar<t[i])
# n_fa2[i] = np.sum(non>t[i])
# assert np.all(n_miss2 == n_miss)
# assert np.all(n_fa2 == n_fa)
# print(n_miss)
# print(n_fa)
p_miss = n_miss/ntar
p_fa = n_fa/nnon
act_dcf = prior * p_miss + (1-prior)*p_fa
if normalize:
act_dcf /= np.minimum(prior, 1-prior)
if len(act_dcf) == 1:
act_dcf = act_dcf[0]
return act_dcf, p_miss, p_fa
def fast_eval_dcf_eer(tar, non, prior, normalize_dcf=True):
"""Computes actual DCF, minimum DCF, EER and PRBE all togther
Args:
tar: Target scores.
non: Non-target scores.
prior: Target prior or vector of target priors.
normalize_cdf: if true, return normalized DCF, else unnormalized.
Returns:
Vector Minimum DCF for each prior.
Vector Actual DCF for each prior.
EER value
PREBP value
"""
p_miss, p_fa = compute_rocch(tar, non)
eer = rocch2eer(p_miss, p_fa)
N_miss = p_miss * len(tar)
N_fa = p_fa * len(non)
prbep = rocch2eer(N_miss, N_fa)
dcf = compute_dcf(p_miss, p_fa, prior, normalize_dcf)
min_dcf = np.min(dcf, axis=-1)
act_dcf, _, _ = compute_act_dcf(tar, non, prior, normalize_dcf)
return min_dcf, act_dcf, eer, prbep
| [
"[email protected]"
] | |
1ac91b0249727f18d895e26bd82e26c8503f0e06 | 075390d2642d56861a742e2f1dcf6e5a774d1ec8 | /wechatArticies/demo.py | 7407b0377dafd13f5c64cb3b565f8eee8729aecd | [] | no_license | ybsdegit/proxypool | 903ed9ae77950e1840d93cb7fd2b38ddadc9e749 | 0a9354e4e3fdbb4b4d58e6e4881afc5afa8587f4 | refs/heads/master | 2020-04-30T17:23:34.894806 | 2019-03-21T16:50:35 | 2019-03-21T16:50:35 | 176,978,061 | 0 | 0 | null | 2019-03-21T15:52:46 | 2019-03-21T15:52:46 | null | UTF-8 | Python | false | false | 2,594 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/3/22 0:08
# @Author : Paulson
# @File : demo.py
# @Software: PyCharm
# @define : function
import json
import requests
from requests.exceptions import ConnectionError
PROXY_POOL_URL = 'http://localhost:5000/one' # one proxy
PROXIES_POOL_URL = 'http://127.0.0.1:5000/all' # all proxies
def get_proxy():
try:
response = requests.get(PROXY_POOL_URL)
if response.status_code == 200:
return response.json()
except ConnectionError:
return None
def test():
proxy = get_proxy()['proxy']
url = 'https://weixin.sogou.com/weixin?query=python&type=2&page=1'
headers = {
'Cookie': 'SUV=1547344793853119; SMYUV=1547344793854887; UM_distinctid=16844efd1034ab-0a1be8536d48a-b781636-1fa400-16844efd104204; CXID=5637462A2349CEE62D6F76AA18FF09AD; ad=HClhtZllll2tAipElllllVha6b7lllllnLLg0Zllll9lllllRZlll5@@@@@@@@@@; SUID=8F976D3B4B238B0A5C8B91BB0002B72F; IPLOC=CN1100; pgv_pvi=8364128256; pgv_si=s1505230848; ABTEST=0|1553084608|v1; SNUID=7B01FEA89397161722F5FFD993353858; weixinIndexVisited=1; sct=1; JSESSIONID=aaaD0UP5hNkqa4RQ9U-Lw; ppinf=5|1553086365|1554295965|dHJ1c3Q6MToxfGNsaWVudGlkOjQ6MjAxN3x1bmlxbmFtZTo1OTolRTUlODUlODMlRTUlQUUlOUQlRTYlQTMlQUUlRUYlQkMlODhQYXVsc29uJTIwV2llciVFRiVCQyU4OXxjcnQ6MTA6MTU1MzA4NjM2NXxyZWZuaWNrOjU5OiVFNSU4NSU4MyVFNSVBRSU5RCVFNiVBMyVBRSVFRiVCQyU4OFBhdWxzb24lMjBXaWVyJUVGJUJDJTg5fHVzZXJpZDo0NDpvOXQybHVCeW1jeXBjYXBVSjg4U2l6MUx6YXZ3QHdlaXhpbi5zb2h1LmNvbXw; pprdig=vmo-4_vS31dWkit52GXYNfr5d7VspV-gcfbJhb-dTfOkb9T7DxpGujrgoTJ_5ZgtIguTDlwcftF86zhWKjgIYgfjvl9qyZrh4yjhMSXYDRH0NWe4rGoBxRuY2siHHgaybghgxQo-s6Er2couIiHGJ50NNvwzbmfxPVHeurh3LbQ; sgid=14-39756811-AVySN513EIhUdTRZh2ibWXQk; ppmdig=1553086365000000133d76ef551230b4d4481bc7eb3066d5',
'Host': 'weixin.sogou.com',
'Referer': 'https://weixin.sogou.com/weixin?query=python&_sug_type_=&sut=1457&lkt=1%2C1553084639056%2C1553084639056&s_from=input&_sug_=y&type=2&sst0=1553084639158&page=11&ie=utf8&w=01019900&dr=1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36'
}
proxies = {
'https': 'https://' + proxy
}
try:
response = requests.get(url,allow_redirects=False,headers=headers,proxies=proxies,timeout=5)
print(proxies)
print(response.status_code)
except ConnectionError as e:
print('Error Occurred',e.args)
if __name__ == '__main__':
while True:
test()
# s=json.loads(proxy)
# print(proxy['proxy'])
| [
"[email protected]"
] | |
df1b94b8ff8b9f70e3b53c78cbdbd988c19b38a9 | e210c28eeed9d38eb78c14b3a6388eca1e0e85d8 | /tests/unit_test/app_common/statistics/stats_def_test.py | b96578b1c7ca54b12726685ee84f4b9e06a3b7e9 | [
"Apache-2.0"
] | permissive | NVIDIA/NVFlare | 5a2d2e4c85a3fd0948e25f1ba510449727529a15 | 1433290c203bd23f34c29e11795ce592bc067888 | refs/heads/main | 2023-08-03T09:21:32.779763 | 2023-07-05T21:17:16 | 2023-07-05T21:17:16 | 388,876,833 | 442 | 140 | Apache-2.0 | 2023-09-14T19:12:35 | 2021-07-23T17:26:12 | Python | UTF-8 | Python | false | false | 2,173 | py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import random
import pandas as pd
from nvflare.app_common.abstract.statistics_spec import Bin, DataType, Histogram, HistogramType
from nvflare.app_common.statistics.numpy_utils import dtype_to_data_type
from nvflare.app_common.utils.json_utils import ObjectEncoder
class TestStatsDef:
def test_dtype_to_data_type(self):
train_data = [
["tom", 10, 15.5],
["nick", 15, 10.2],
["juli", 14],
["tom2", 10, 13.0],
["nick1", 25],
["juli1", 24, 10.5],
]
train = pd.DataFrame(train_data, columns=["Name", "Age", "Edu"])
assert DataType.STRING == dtype_to_data_type(train["Name"].dtype)
assert DataType.INT == dtype_to_data_type(train["Age"].dtype)
assert DataType.FLOAT == dtype_to_data_type(train["Edu"].dtype)
def test_feature_histogram_to_json(self):
even = [1, 3, 5, 7, 9]
odd = [2, 4, 6, 8, 10]
buckets = zip(even, odd)
bins = [Bin(low_value=b[0], high_value=b[1], sample_count=random.randint(10, 100)) for b in buckets]
hist = Histogram(HistogramType.STANDARD, bins)
statistics = {"histogram": {"site-1": {"train": {"feat": hist}}}}
x = json.dumps(statistics, cls=ObjectEncoder)
assert x.__eq__(
{
"histogram": {
"site-1": {
"train": {"feat": [0, [[1, 2, 83], [3, 4, 79], [5, 6, 69], [7, 8, 72], [9, 10, 20]], "null"]}
}
}
}
)
| [
"[email protected]"
] | |
0acf2de8988b83f552ee0e68ad6596e21dbee688 | e17b0ad0ebeb361e5565eb3d12e717f296a7b878 | /campanha/serializers.py | 59d2a9c5d8533925b7660699f482c3e15c887c8b | [] | no_license | easy-rpg/SheetAPI | 94ea732083c3a7a82577e59e3a882a878772d6eb | 5542197f8388eed761a15a79c6ccca4fd481ccba | refs/heads/master | 2022-12-11T17:01:16.130002 | 2018-07-05T00:26:48 | 2018-07-05T00:26:48 | 131,898,341 | 1 | 0 | null | 2022-11-22T02:30:09 | 2018-05-02T19:44:34 | Python | UTF-8 | Python | false | false | 603 | py | from rest_framework.serializers import ModelSerializer, CharField, StringRelatedField
from .models import Campanha, Arco
class ArcoSerializer(ModelSerializer):
campanha_nome = CharField(source='campanha.nome', read_only=True)
personagens = StringRelatedField(many=True, read_only=True)
class Meta:
model = Arco
fields = '__all__'
class CampanhaSerializer(ModelSerializer):
arcos = ArcoSerializer(many=True, read_only=True)
mestre_nome = CharField(source='mestre.username', read_only=True)
class Meta:
model = Campanha
fields = '__all__'
| [
"[email protected]"
] | |
8822e51cbaa2e4c42d764c8168d1caab8609a540 | efc6c38070f4587346c88ae2444a8b47bb51a635 | /backend/nameless_wave_19563/wsgi.py | 08b98350d05b1e315aaad1417e4a82387add737d | [] | no_license | andremcb/nameless-wave-19563 | ef259d2819855bb7b65f2c1c777a0d7fbf33df49 | cdfe66614bea363b8dbd25ab3232183971759041 | refs/heads/master | 2023-03-12T04:39:05.580066 | 2021-03-03T22:01:29 | 2021-03-03T22:01:29 | 344,275,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | """
WSGI config for nameless_wave_19563 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'nameless_wave_19563.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
8915a08a3bb083150ff2fcbcd30f46be371d3afe | 421b0ae45f495110daec64ed98c31af525585c2c | /BasicPrograms/PrintPartten.py | 21336ad33f20ce8a61acd7e9063cb741f6ca0304 | [] | no_license | Pradeepsuthar/pythonCode | a2c87fb64c79edd11be54c2015f9413ddce246c4 | 14e2b397f69b3fbebde5b3af98898c4ff750c28c | refs/heads/master | 2021-02-18T05:07:40.402466 | 2020-03-05T13:14:15 | 2020-03-05T13:14:15 | 245,163,673 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | # # # # #
# # # # #
# # # # #
# # # # #
for i in range(4): # for column
for j in range(4): # for Row
print("# ",end="")
print("")
print("\n")
#
# #
# # #
# # # #
for i in range(4): # for column
for j in range(i+1): # for Row
print("# ",end="")
print("")
print("\n")
# # # #
# # #
# #
#
for i in range(4): # for column
for j in range(4-i): # for Row
print("# ",end="")
print("")
| [
"[email protected]"
] | |
87bb7f7ef350864d08ee12e01c5a02668a812e6e | 2fc11a0aaf47cbaa64fb1d3aa304c51424a96324 | /test/basic_test.py | 1a8c1072e21942884e38dbec0556b33a7a1ac19c | [] | no_license | isabella232/dex-cli | 2cd73758980d0661c083cdf8aebcb8d73f07c297 | 652101177afdc76ab2f378e9a9cc5cc1b7a6aaa8 | refs/heads/master | 2022-12-30T18:42:50.279225 | 2020-10-21T08:45:53 | 2020-10-21T08:45:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | # Example of test: Just for Integrating travis PR
# TODO: Add real tests https://github.com/gnosis/dex-cli/issues/25
def inc(x):
return x + 1
def test_answer():
assert inc(4) == 5
| [
"[email protected]"
] | |
76f406522001c4ab4dc3b879a3abdad7333ea711 | 8651c2c84e4b70ef6977d9364043605c354e1489 | /Ch8/02_pets.py | c92ecceef9a49b651aaee9681a2e0440e0395b43 | [] | no_license | sliverz6/Python_Crash_Course | c222cf1ff9dbe6518ee36a3db7f376c2e3b2a317 | 44cea09ab066e82feba97fee1e74e61fc7e1e565 | refs/heads/main | 2023-02-25T02:57:53.585677 | 2021-01-30T14:27:49 | 2021-01-30T14:27:49 | 333,345,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | def describe_pet(pet_name, animal_type="dog"):
"""애완동물에 관한 정보를 출력합니다."""
print("\nI have a " + animal_type + ".")
print("My " + animal_type + "'s name is " + pet_name.title() + ".")
describe_pet("harry") # 위치 매개변수
describe_pet(pet_name="harry", animal_type="hamster") # 키워드 매개변수
| [
"[email protected]"
] | |
331eaa11de4c8d4744427b517f6adbfc7b3e5a25 | 4a36b5979b0753b32cff3956fd97fb8ed8b11e84 | /0.24/_downloads/ecd77f376b369abaa61bcf309ffb8563/interpolate_bad_channels.py | 1c7c1f1d7a168c1c71f51760d3aba752b53d2d47 | [] | permissive | mne-tools/mne-tools.github.io | 8aac7ae10bf2faeeb875b9a351a5530dc0e53154 | 495e878adc1ef3374e3db88604504d7542b01194 | refs/heads/main | 2023-09-03T07:06:00.660557 | 2023-09-03T04:10:18 | 2023-09-03T04:10:18 | 35,639,371 | 12 | 16 | BSD-3-Clause | 2023-05-05T19:04:32 | 2015-05-14T22:04:23 | HTML | UTF-8 | Python | false | false | 1,452 | py | """
.. _ex-interpolate-bad-channels:
=============================================
Interpolate bad channels for MEG/EEG channels
=============================================
This example shows how to interpolate bad MEG/EEG channels
- Using spherical splines from :footcite:`PerrinEtAl1989` for EEG data.
- Using field interpolation for MEG and EEG data.
In this example, the bad channels will still be marked as bad.
Only the data in those channels is replaced.
"""
# Authors: Denis A. Engemann <[email protected]>
# Mainak Jas <[email protected]>
#
# License: BSD-3-Clause
# %%
# sphinx_gallery_thumbnail_number = 2
import mne
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
evoked = mne.read_evokeds(fname, condition='Left Auditory',
baseline=(None, 0))
# plot with bads
evoked.plot(exclude=[], picks=('grad', 'eeg'))
# %%
# Compute interpolation (also works with Raw and Epochs objects)
evoked_interp = evoked.copy().interpolate_bads(reset_bads=False)
evoked_interp.plot(exclude=[], picks=('grad', 'eeg'))
# %%
# You can also use minimum-norm for EEG as well as MEG
evoked_interp_mne = evoked.copy().interpolate_bads(
reset_bads=False, method=dict(eeg='MNE'), verbose=True)
evoked_interp_mne.plot(exclude=[], picks=('grad', 'eeg'))
# %%
# References
# ----------
# .. footbibliography::
| [
"[email protected]"
] | |
f310678a9fa600d8ab56e1100b469f3b7d2b850c | 6b233b45ac4ae18711a7f8a7730eebcf7e4e80ed | /dlms_control.py | 4db110d59c15672ed39fe3e81697db22ab8c0a10 | [] | no_license | petervanya/PTFEsim | 251b7501a48ab05245c778be0f39b9bacd821348 | 509ef87df647f5c1231efbbc0d0a84add1da28d6 | refs/heads/master | 2021-01-21T04:51:05.644202 | 2016-07-20T16:32:34 | 2016-07-20T16:32:34 | 46,088,758 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,278 | py | #!/usr/bin/env python
"""Usage:
dlms_control.py [--L <L> --dt <dt> --steps <n> --thermo <th> --halo <h>]
Generate DL_MESO control file.
Options:
--L <L> Box length [default: 40.0]
--dt <dt> Timestep [default: 0.05]
--steps <n> Number of steps [default: 10000]
--thermo <th> Print every [default: 100]
--halo <h> Boundary halo, like neighbor [default: 2.5]
[email protected], 06/06/16
"""
from docopt import docopt
import sys
args = docopt(__doc__)
L = float(args["--L"])
dt = float(args["--dt"])
N = int(args["--steps"])
thermo = int(args["--thermo"])
halo = float(args["--halo"])
s = "pokus\n\n"
s += "volume " + str(L**3) + "\n"
s += "temperature 1.0\n"
s += "cutoff 1.0\n"
s += "boundary halo " + str(halo) + "\n\n"
s += "timestep " + str(dt) + "\n"
s += "steps " + str(N) + "\n"
s += "equilibration steps 0\n"
s += "scale temperature every 10\n"
s += "trajectory 0 100\n"
s += "stats every 100\n"
s += "stack size 100\n"
s += "print every " + str(thermo) + "\n\n"
s += "job time 1000000.0\n"
s += "close time 1.0\n\n"
s += "ensemble nvt dpdvv\n\n"
s += "finish\n"
print("Box size: %.1f | Timestep: %.3f | Num steps: %i" % (L, dt, N))
open("CONTROL", "w").write(s)
print("CONTROL file saved.")
| [
"[email protected]"
] | |
b6640b3f4567202c7d8f584c09ed67a6e7001c9d | cd76b483bdd0a3676d67c524c8923be2f744dcac | /pytorch3d/renderer/mesh/textures.py | abfc0a5474fc81649104829924bec2b62e1e377f | [
"BSD-3-Clause"
] | permissive | ikonushok/pytorch3d | c55ed6ced0d82d8d399879a9f8d06a36c1721165 | 36b451a49bdc481fb32707323c5bca53c34ac369 | refs/heads/master | 2023-05-10T14:24:48.561011 | 2021-06-04T01:29:42 | 2021-06-04T01:30:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 58,386 | py | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import itertools
import warnings
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
import torch
import torch.nn.functional as F
from pytorch3d.ops import interpolate_face_attributes
from pytorch3d.structures.utils import list_to_packed, list_to_padded, padded_to_list
from torch.nn.functional import interpolate
from .utils import PackedRectangle, Rectangle, pack_unique_rectangles
# This file contains classes and helper functions for texturing.
# There are three types of textures: TexturesVertex, TexturesAtlas
# and TexturesUV which inherit from a base textures class TexturesBase.
#
# Each texture class has a method 'sample_textures' to sample a
# value given barycentric coordinates.
#
# All the textures accept either list or padded inputs. The values
# are stored as either per face values (TexturesAtlas, TexturesUV),
# or per face vertex features (TexturesVertex).
def _list_to_padded_wrapper(
x: List[torch.Tensor],
pad_size: Union[list, tuple, None] = None,
pad_value: float = 0.0,
) -> torch.Tensor:
r"""
This is a wrapper function for
pytorch3d.structures.utils.list_to_padded function which only accepts
3-dimensional inputs.
For this use case, the input x is of shape (F, 3, ...) where only F
is different for each element in the list
Transforms a list of N tensors each of shape (Mi, ...) into a single tensor
of shape (N, pad_size, ...), or (N, max(Mi), ...)
if pad_size is None.
Args:
x: list of Tensors
pad_size: int specifying the size of the first dimension
of the padded tensor
pad_value: float value to be used to fill the padded tensor
Returns:
x_padded: tensor consisting of padded input tensors
"""
N = len(x)
dims = x[0].ndim
reshape_dims = x[0].shape[1:]
D = torch.prod(torch.tensor(reshape_dims)).item()
x_reshaped = []
for y in x:
if y.ndim != dims and y.shape[1:] != reshape_dims:
msg = (
"list_to_padded requires tensors to have the same number of dimensions"
)
raise ValueError(msg)
x_reshaped.append(y.reshape(-1, D))
x_padded = list_to_padded(x_reshaped, pad_size=pad_size, pad_value=pad_value)
return x_padded.reshape((N, -1) + reshape_dims)
def _padded_to_list_wrapper(
x: torch.Tensor, split_size: Union[list, tuple, None] = None
) -> List[torch.Tensor]:
r"""
This is a wrapper function for pytorch3d.structures.utils.padded_to_list
which only accepts 3-dimensional inputs.
For this use case, the input x is of shape (N, F, ...) where F
is the number of faces which is different for each tensor in the batch.
This function transforms a padded tensor of shape (N, M, ...) into a
list of N tensors of shape (Mi, ...) where (Mi) is specified in
split_size(i), or of shape (M,) if split_size is None.
Args:
x: padded Tensor
split_size: list of ints defining the number of items for each tensor
in the output list.
Returns:
x_list: a list of tensors
"""
N, M = x.shape[:2]
reshape_dims = x.shape[2:]
D = torch.prod(torch.tensor(reshape_dims)).item()
x_reshaped = x.reshape(N, M, D)
x_list = padded_to_list(x_reshaped, split_size=split_size)
x_list = [xl.reshape((xl.shape[0],) + reshape_dims) for xl in x_list]
return x_list
def _pad_texture_maps(
images: Union[Tuple[torch.Tensor], List[torch.Tensor]], align_corners: bool
) -> torch.Tensor:
"""
Pad all texture images so they have the same height and width.
Args:
images: list of N tensors of shape (H_i, W_i, 3)
align_corners: used for interpolation
Returns:
tex_maps: Tensor of shape (N, max_H, max_W, 3)
"""
tex_maps = []
max_H = 0
max_W = 0
for im in images:
h, w, _3 = im.shape
if h > max_H:
max_H = h
if w > max_W:
max_W = w
tex_maps.append(im)
max_shape = (max_H, max_W)
for i, image in enumerate(tex_maps):
if image.shape[:2] != max_shape:
image_BCHW = image.permute(2, 0, 1)[None]
new_image_BCHW = interpolate(
image_BCHW, size=max_shape, mode="bilinear", align_corners=align_corners
)
tex_maps[i] = new_image_BCHW[0].permute(1, 2, 0)
tex_maps = torch.stack(tex_maps, dim=0) # (num_tex_maps, max_H, max_W, 3)
return tex_maps
# A base class for defining a batch of textures
# with helper methods.
# This is also useful to have so that inside `Meshes`
# we can allow the input textures to be any texture
# type which is an instance of the base class.
class TexturesBase:
def isempty(self):
if self._N is not None and self.valid is not None:
return self._N == 0 or self.valid.eq(False).all()
return False
def to(self, device):
for k in dir(self):
v = getattr(self, k)
if isinstance(v, (list, tuple)) and all(
torch.is_tensor(elem) for elem in v
):
v = [elem.to(device) for elem in v]
setattr(self, k, v)
if torch.is_tensor(v) and v.device != device:
setattr(self, k, v.to(device))
self.device = device
return self
def _extend(self, N: int, props: List[str]) -> Dict[str, Union[torch.Tensor, List]]:
"""
Create a dict with the specified properties
repeated N times per batch element.
Args:
N: number of new copies of each texture
in the batch.
props: a List of strings which refer to either
class attributes or class methods which
return tensors or lists.
Returns:
Dict with the same keys as props. The values are the
extended properties.
"""
if not isinstance(N, int):
raise ValueError("N must be an integer.")
if N <= 0:
raise ValueError("N must be > 0.")
new_props = {}
for p in props:
t = getattr(self, p)
if callable(t):
t = t() # class method
if isinstance(t, list):
if not all(isinstance(elem, (int, float)) for elem in t):
raise ValueError("Extend only supports lists of scalars")
t = [[ti] * N for ti in t]
new_props[p] = list(itertools.chain(*t))
elif torch.is_tensor(t):
new_props[p] = t.repeat_interleave(N, dim=0)
return new_props
def _getitem(self, index: Union[int, slice], props: List[str]):
"""
Helper function for __getitem__
"""
new_props = {}
if isinstance(index, (int, slice)):
for p in props:
t = getattr(self, p)
if callable(t):
t = t() # class method
new_props[p] = t[index]
elif isinstance(index, list):
index = torch.tensor(index)
if isinstance(index, torch.Tensor):
if index.dtype == torch.bool:
index = index.nonzero()
index = index.squeeze(1) if index.numel() > 0 else index
index = index.tolist()
for p in props:
t = getattr(self, p)
if callable(t):
t = t() # class method
new_props[p] = [t[i] for i in index]
return new_props
def sample_textures(self):
"""
Different texture classes sample textures in different ways
e.g. for vertex textures, the values at each vertex
are interpolated across the face using the barycentric
coordinates.
Each texture class should implement a sample_textures
method to take the `fragments` from rasterization.
Using `fragments.pix_to_face` and `fragments.bary_coords`
this function should return the sampled texture values for
each pixel in the output image.
"""
raise NotImplementedError()
def faces_verts_textures_packed(self):
"""
Returns the texture for each vertex for each face in the mesh.
For N meshes, this function returns sum(Fi)x3xC where Fi is the
number of faces in the i-th mesh and C is the dimensional of
the feature (C = 3 for RGB textures).
You can use the utils function in structures.utils to convert the
packed representation to a list or padded.
"""
raise NotImplementedError()
def clone(self):
"""
Each texture class should implement a method
to clone all necessary internal tensors.
"""
raise NotImplementedError()
def detach(self):
"""
Each texture class should implement a method
to detach all necessary internal tensors.
"""
raise NotImplementedError()
def __getitem__(self, index):
"""
Each texture class should implement a method
to get the texture properties for the
specified elements in the batch.
The TexturesBase._getitem(i) method
can be used as a helper function to retrieve the
class attributes for item i. Then, a new
instance of the child class can be created with
the attributes.
"""
raise NotImplementedError()
def Textures(
maps: Union[List, torch.Tensor, None] = None,
faces_uvs: Optional[torch.Tensor] = None,
verts_uvs: Optional[torch.Tensor] = None,
verts_rgb: Optional[torch.Tensor] = None,
) -> TexturesBase:
"""
Textures class has been DEPRECATED.
Preserving Textures as a function for backwards compatibility.
Args:
maps: texture map per mesh. This can either be a list of maps
[(H, W, 3)] or a padded tensor of shape (N, H, W, 3).
faces_uvs: (N, F, 3) tensor giving the index into verts_uvs for each
vertex in the face. Padding value is assumed to be -1.
verts_uvs: (N, V, 2) tensor giving the uv coordinate per vertex.
verts_rgb: (N, V, 3) tensor giving the rgb color per vertex. Padding
value is assumed to be -1.
Returns:
a Textures class which is an instance of TexturesBase e.g. TexturesUV,
TexturesAtlas, TexturesVertex
"""
warnings.warn(
"""Textures class is deprecated,
use TexturesUV, TexturesAtlas, TexturesVertex instead.
Textures class will be removed in future releases.""",
PendingDeprecationWarning,
)
if all(x is not None for x in [faces_uvs, verts_uvs, maps]):
# pyre-fixme[6]: Expected `Union[List[torch.Tensor], torch.Tensor]` for 1st
# param but got `Union[None, List[typing.Any], torch.Tensor]`.
return TexturesUV(maps=maps, faces_uvs=faces_uvs, verts_uvs=verts_uvs)
elif verts_rgb is not None:
return TexturesVertex(verts_features=verts_rgb)
else:
raise ValueError(
"Textures either requires all three of (faces uvs, verts uvs, maps) or verts rgb"
)
class TexturesAtlas(TexturesBase):
def __init__(self, atlas: Union[torch.Tensor, List, None]):
"""
A texture representation where each face has a square texture map.
This is based on the implementation from SoftRasterizer [1].
Args:
atlas: (N, F, R, R, D) tensor giving the per face texture map.
The atlas can be created during obj loading with the
pytorch3d.io.load_obj function - in the input arguments
set `create_texture_atlas=True`. The atlas will be
returned in aux.texture_atlas.
The padded and list representations of the textures are stored
and the packed representations is computed on the fly and
not cached.
[1] Liu et al, 'Soft Rasterizer: A Differentiable Renderer for Image-based
3D Reasoning', ICCV 2019
See also https://github.com/ShichenLiu/SoftRas/issues/21
"""
if isinstance(atlas, (list, tuple)):
correct_format = all(
(
torch.is_tensor(elem)
and elem.ndim == 4
and elem.shape[1] == elem.shape[2]
and elem.shape[1] == atlas[0].shape[1]
)
for elem in atlas
)
if not correct_format:
msg = (
"Expected atlas to be a list of tensors of shape (F, R, R, D) "
"with the same value of R."
)
raise ValueError(msg)
self._atlas_list = atlas
self._atlas_padded = None
self.device = torch.device("cpu")
# These values may be overridden when textures is
# passed into the Meshes constructor. For more details
# refer to the __init__ of Meshes.
self._N = len(atlas)
self._num_faces_per_mesh = [len(a) for a in atlas]
if self._N > 0:
self.device = atlas[0].device
elif torch.is_tensor(atlas):
# pyre-fixme[16]: `Optional` has no attribute `ndim`.
if atlas.ndim != 5:
msg = "Expected atlas to be of shape (N, F, R, R, D); got %r"
raise ValueError(msg % repr(atlas.ndim))
self._atlas_padded = atlas
self._atlas_list = None
# pyre-fixme[16]: `Optional` has no attribute `device`.
self.device = atlas.device
# These values may be overridden when textures is
# passed into the Meshes constructor. For more details
# refer to the __init__ of Meshes.
# pyre-fixme[6]: Expected `Sized` for 1st param but got
# `Optional[torch.Tensor]`.
self._N = len(atlas)
# pyre-fixme[16]: `Optional` has no attribute `shape`.
max_F = atlas.shape[1]
self._num_faces_per_mesh = [max_F] * self._N
else:
raise ValueError("Expected atlas to be a tensor or list")
# The num_faces_per_mesh, N and valid
# are reset inside the Meshes object when textures is
# passed into the Meshes constructor. For more details
# refer to the __init__ of Meshes.
self.valid = torch.ones((self._N,), dtype=torch.bool, device=self.device)
def clone(self):
tex = self.__class__(atlas=self.atlas_padded().clone())
if self._atlas_list is not None:
tex._atlas_list = [atlas.clone() for atlas in self._atlas_list]
num_faces = (
self._num_faces_per_mesh.clone()
if torch.is_tensor(self._num_faces_per_mesh)
else self._num_faces_per_mesh
)
tex.valid = self.valid.clone()
tex._num_faces_per_mesh = num_faces
return tex
def detach(self):
tex = self.__class__(atlas=self.atlas_padded().detach())
if self._atlas_list is not None:
tex._atlas_list = [atlas.detach() for atlas in self._atlas_list]
num_faces = (
self._num_faces_per_mesh.detach()
if torch.is_tensor(self._num_faces_per_mesh)
else self._num_faces_per_mesh
)
tex.valid = self.valid.detach()
tex._num_faces_per_mesh = num_faces
return tex
def __getitem__(self, index):
props = ["atlas_list", "_num_faces_per_mesh"]
new_props = self._getitem(index, props=props)
atlas = new_props["atlas_list"]
if isinstance(atlas, list):
# multiple batch elements
new_tex = self.__class__(atlas=atlas)
elif torch.is_tensor(atlas):
# single element
new_tex = self.__class__(atlas=[atlas])
else:
raise ValueError("Not all values are provided in the correct format")
new_tex._num_faces_per_mesh = new_props["_num_faces_per_mesh"]
return new_tex
def atlas_padded(self) -> torch.Tensor:
if self._atlas_padded is None:
if self.isempty():
self._atlas_padded = torch.zeros(
(self._N, 0, 0, 0, 3), dtype=torch.float32, device=self.device
)
else:
self._atlas_padded = _list_to_padded_wrapper(
self._atlas_list, pad_value=0.0
)
return self._atlas_padded
def atlas_list(self) -> List[torch.Tensor]:
if self._atlas_list is None:
if self.isempty():
self._atlas_padded = [
torch.empty((0, 0, 0, 3), dtype=torch.float32, device=self.device)
] * self._N
self._atlas_list = _padded_to_list_wrapper(
self._atlas_padded, split_size=self._num_faces_per_mesh
)
return self._atlas_list
def atlas_packed(self) -> torch.Tensor:
if self.isempty():
return torch.zeros(
(self._N, 0, 0, 3), dtype=torch.float32, device=self.device
)
atlas_list = self.atlas_list()
return list_to_packed(atlas_list)[0]
def extend(self, N: int) -> "TexturesAtlas":
new_props = self._extend(N, ["atlas_padded", "_num_faces_per_mesh"])
new_tex = self.__class__(atlas=new_props["atlas_padded"])
new_tex._num_faces_per_mesh = new_props["_num_faces_per_mesh"]
return new_tex
def sample_textures(self, fragments, **kwargs) -> torch.Tensor:
"""
This is similar to a nearest neighbor sampling and involves a
discretization step. The barycentric coordinates from
rasterization are used to find the nearest grid cell in the texture
atlas and the RGB is returned as the color.
This means that this step is differentiable with respect to the RGB
values of the texture atlas but not differentiable with respect to the
barycentric coordinates.
TODO: Add a different sampling mode which interpolates the barycentric
coordinates to sample the texture and will be differentiable w.r.t
the barycentric coordinates.
Args:
fragments:
The outputs of rasterization. From this we use
- pix_to_face: LongTensor of shape (N, H, W, K) specifying the indices
of the faces (in the packed representation) which
overlap each pixel in the image.
- barycentric_coords: FloatTensor of shape (N, H, W, K, 3) specifying
the barycentric coordinates of each pixel
relative to the faces (in the packed
representation) which overlap the pixel.
Returns:
texels: (N, H, W, K, 3)
"""
N, H, W, K = fragments.pix_to_face.shape
atlas_packed = self.atlas_packed()
R = atlas_packed.shape[1]
bary = fragments.bary_coords
pix_to_face = fragments.pix_to_face
bary_w01 = bary[..., :2]
# pyre-fixme[16]: `bool` has no attribute `__getitem__`.
mask = (pix_to_face < 0)[..., None]
bary_w01 = torch.where(mask, torch.zeros_like(bary_w01), bary_w01)
# If barycentric coordinates are > 1.0 (in the case of
# blur_radius > 0.0), wxy might be > R. We need to clamp this
# index to R-1 to index into the texture atlas.
w_xy = (bary_w01 * R).to(torch.int64).clamp(max=R - 1) # (N, H, W, K, 2)
below_diag = (
bary_w01.sum(dim=-1) * R - w_xy.float().sum(dim=-1)
) <= 1.0 # (N, H, W, K)
w_x, w_y = w_xy.unbind(-1)
w_x = torch.where(below_diag, w_x, (R - 1 - w_x))
w_y = torch.where(below_diag, w_y, (R - 1 - w_y))
texels = atlas_packed[pix_to_face, w_y, w_x]
texels = texels * (pix_to_face >= 0)[..., None].float()
return texels
def faces_verts_textures_packed(self) -> torch.Tensor:
"""
Samples texture from each vertex for each face in the mesh.
For N meshes with {Fi} number of faces, it returns a
tensor of shape sum(Fi)x3xD (D = 3 for RGB).
You can use the utils function in structures.utils to convert the
packed representation to a list or padded.
"""
atlas_packed = self.atlas_packed()
# assume each face consists of (v0, v1, v2).
# to sample from the atlas we only need the first two barycentric coordinates.
# for details on how this texture sample works refer to the sample_textures function.
t0 = atlas_packed[:, 0, -1] # corresponding to v0 with bary = (1, 0)
t1 = atlas_packed[:, -1, 0] # corresponding to v1 with bary = (0, 1)
t2 = atlas_packed[:, 0, 0] # corresponding to v2 with bary = (0, 0)
return torch.stack((t0, t1, t2), dim=1)
def join_batch(self, textures: List["TexturesAtlas"]) -> "TexturesAtlas":
"""
Join the list of textures given by `textures` to
self to create a batch of textures. Return a new
TexturesAtlas object with the combined textures.
Args:
textures: List of TexturesAtlas objects
Returns:
new_tex: TexturesAtlas object with the combined
textures from self and the list `textures`.
"""
tex_types_same = all(isinstance(tex, TexturesAtlas) for tex in textures)
if not tex_types_same:
raise ValueError("All textures must be of type TexturesAtlas.")
atlas_list = []
atlas_list += self.atlas_list()
num_faces_per_mesh = self._num_faces_per_mesh
for tex in textures:
atlas_list += tex.atlas_list()
num_faces_per_mesh += tex._num_faces_per_mesh
new_tex = self.__class__(atlas=atlas_list)
new_tex._num_faces_per_mesh = num_faces_per_mesh
return new_tex
def join_scene(self) -> "TexturesAtlas":
"""
Return a new TexturesAtlas amalgamating the batch.
"""
return self.__class__(atlas=[torch.cat(self.atlas_list())])
class TexturesUV(TexturesBase):
def __init__(
self,
maps: Union[torch.Tensor, List[torch.Tensor]],
faces_uvs: Union[torch.Tensor, List[torch.Tensor], Tuple[torch.Tensor]],
verts_uvs: Union[torch.Tensor, List[torch.Tensor], Tuple[torch.Tensor]],
padding_mode: str = "border",
align_corners: bool = True,
):
"""
Textures are represented as a per mesh texture map and uv coordinates for each
vertex in each face. NOTE: this class only supports one texture map per mesh.
Args:
maps: texture map per mesh. This can either be a list of maps
[(H, W, 3)] or a padded tensor of shape (N, H, W, 3)
faces_uvs: (N, F, 3) LongTensor giving the index into verts_uvs
for each face
verts_uvs: (N, V, 2) tensor giving the uv coordinates per vertex
(a FloatTensor with values between 0 and 1).
align_corners: If true, the extreme values 0 and 1 for verts_uvs
indicate the centers of the edge pixels in the maps.
padding_mode: padding mode for outside grid values
("zeros", "border" or "reflection").
The align_corners and padding_mode arguments correspond to the arguments
of the `grid_sample` torch function. There is an informative illustration of
the two align_corners options at
https://discuss.pytorch.org/t/22663/9 .
An example of how the indexing into the maps, with align_corners=True,
works is as follows.
If maps[i] has shape [1001, 101] and the value of verts_uvs[i][j]
is [0.4, 0.3], then a value of j in faces_uvs[i] means a vertex
whose color is given by maps[i][700, 40]. padding_mode affects what
happens if a value in verts_uvs is less than 0 or greater than 1.
Note that increasing a value in verts_uvs[..., 0] increases an index
in maps, whereas increasing a value in verts_uvs[..., 1] _decreases_
an _earlier_ index in maps.
If align_corners=False, an example would be as follows.
If maps[i] has shape [1000, 100] and the value of verts_uvs[i][j]
is [0.405, 0.2995], then a value of j in faces_uvs[i] means a vertex
whose color is given by maps[i][700, 40].
When align_corners=False, padding_mode even matters for values in
verts_uvs slightly above 0 or slightly below 1. In this case, the
padding_mode matters if the first value is outside the interval
[0.0005, 0.9995] or if the second is outside the interval
[0.005, 0.995].
"""
self.padding_mode = padding_mode
self.align_corners = align_corners
if isinstance(faces_uvs, (list, tuple)):
for fv in faces_uvs:
if fv.ndim != 2 or fv.shape[-1] != 3:
msg = "Expected faces_uvs to be of shape (F, 3); got %r"
raise ValueError(msg % repr(fv.shape))
self._faces_uvs_list = faces_uvs
self._faces_uvs_padded = None
self.device = torch.device("cpu")
# These values may be overridden when textures is
# passed into the Meshes constructor. For more details
# refer to the __init__ of Meshes.
self._N = len(faces_uvs)
self._num_faces_per_mesh = [len(fv) for fv in faces_uvs]
if self._N > 0:
self.device = faces_uvs[0].device
elif torch.is_tensor(faces_uvs):
if faces_uvs.ndim != 3 or faces_uvs.shape[-1] != 3:
msg = "Expected faces_uvs to be of shape (N, F, 3); got %r"
raise ValueError(msg % repr(faces_uvs.shape))
self._faces_uvs_padded = faces_uvs
self._faces_uvs_list = None
self.device = faces_uvs.device
# These values may be overridden when textures is
# passed into the Meshes constructor. For more details
# refer to the __init__ of Meshes.
self._N = len(faces_uvs)
max_F = faces_uvs.shape[1]
self._num_faces_per_mesh = [max_F] * self._N
else:
raise ValueError("Expected faces_uvs to be a tensor or list")
if isinstance(verts_uvs, (list, tuple)):
for fv in verts_uvs:
if fv.ndim != 2 or fv.shape[-1] != 2:
msg = "Expected verts_uvs to be of shape (V, 2); got %r"
raise ValueError(msg % repr(fv.shape))
self._verts_uvs_list = verts_uvs
self._verts_uvs_padded = None
if len(verts_uvs) != self._N:
raise ValueError(
"verts_uvs and faces_uvs must have the same batch dimension"
)
if not all(v.device == self.device for v in verts_uvs):
raise ValueError("verts_uvs and faces_uvs must be on the same device")
elif torch.is_tensor(verts_uvs):
if (
verts_uvs.ndim != 3
or verts_uvs.shape[-1] != 2
or verts_uvs.shape[0] != self._N
):
msg = "Expected verts_uvs to be of shape (N, V, 2); got %r"
raise ValueError(msg % repr(verts_uvs.shape))
self._verts_uvs_padded = verts_uvs
self._verts_uvs_list = None
if verts_uvs.device != self.device:
raise ValueError("verts_uvs and faces_uvs must be on the same device")
else:
raise ValueError("Expected verts_uvs to be a tensor or list")
if torch.is_tensor(maps):
# pyre-fixme[16]: `List` has no attribute `ndim`.
# pyre-fixme[16]: `List` has no attribute `shape`.
if maps.ndim != 4 or maps.shape[0] != self._N:
msg = "Expected maps to be of shape (N, H, W, 3); got %r"
raise ValueError(msg % repr(maps.shape))
self._maps_padded = maps
self._maps_list = None
elif isinstance(maps, (list, tuple)):
if len(maps) != self._N:
raise ValueError("Expected one texture map per mesh in the batch.")
self._maps_list = maps
if self._N > 0:
maps = _pad_texture_maps(maps, align_corners=self.align_corners)
else:
maps = torch.empty(
(self._N, 0, 0, 3), dtype=torch.float32, device=self.device
)
self._maps_padded = maps
else:
raise ValueError("Expected maps to be a tensor or list.")
if self._maps_padded.device != self.device:
raise ValueError("maps must be on the same device as verts/faces uvs.")
self.valid = torch.ones((self._N,), dtype=torch.bool, device=self.device)
def clone(self):
tex = self.__class__(
self.maps_padded().clone(),
self.faces_uvs_padded().clone(),
self.verts_uvs_padded().clone(),
)
if self._maps_list is not None:
tex._maps_list = [m.clone() for m in self._maps_list]
if self._verts_uvs_list is not None:
tex._verts_uvs_list = [v.clone() for v in self._verts_uvs_list]
if self._faces_uvs_list is not None:
tex._faces_uvs_list = [f.clone() for f in self._faces_uvs_list]
num_faces = (
self._num_faces_per_mesh.clone()
if torch.is_tensor(self._num_faces_per_mesh)
else self._num_faces_per_mesh
)
tex._num_faces_per_mesh = num_faces
tex.valid = self.valid.clone()
return tex
def detach(self):
tex = self.__class__(
self.maps_padded().detach(),
self.faces_uvs_padded().detach(),
self.verts_uvs_padded().detach(),
)
if self._maps_list is not None:
tex._maps_list = [m.detach() for m in self._maps_list]
if self._verts_uvs_list is not None:
tex._verts_uvs_list = [v.detach() for v in self._verts_uvs_list]
if self._faces_uvs_list is not None:
tex._faces_uvs_list = [f.detach() for f in self._faces_uvs_list]
num_faces = (
self._num_faces_per_mesh.detach()
if torch.is_tensor(self._num_faces_per_mesh)
else self._num_faces_per_mesh
)
tex._num_faces_per_mesh = num_faces
tex.valid = self.valid.detach()
return tex
def __getitem__(self, index):
props = ["verts_uvs_list", "faces_uvs_list", "maps_list", "_num_faces_per_mesh"]
new_props = self._getitem(index, props)
faces_uvs = new_props["faces_uvs_list"]
verts_uvs = new_props["verts_uvs_list"]
maps = new_props["maps_list"]
# if index has multiple values then faces/verts/maps may be a list of tensors
if all(isinstance(f, (list, tuple)) for f in [faces_uvs, verts_uvs, maps]):
new_tex = self.__class__(
faces_uvs=faces_uvs,
verts_uvs=verts_uvs,
maps=maps,
padding_mode=self.padding_mode,
align_corners=self.align_corners,
)
elif all(torch.is_tensor(f) for f in [faces_uvs, verts_uvs, maps]):
new_tex = self.__class__(
faces_uvs=[faces_uvs],
verts_uvs=[verts_uvs],
maps=[maps],
padding_mode=self.padding_mode,
align_corners=self.align_corners,
)
else:
raise ValueError("Not all values are provided in the correct format")
new_tex._num_faces_per_mesh = new_props["_num_faces_per_mesh"]
return new_tex
def faces_uvs_padded(self) -> torch.Tensor:
if self._faces_uvs_padded is None:
if self.isempty():
self._faces_uvs_padded = torch.zeros(
(self._N, 0, 3), dtype=torch.float32, device=self.device
)
else:
self._faces_uvs_padded = list_to_padded(
self._faces_uvs_list, pad_value=0.0
)
return self._faces_uvs_padded
def faces_uvs_list(self) -> List[torch.Tensor]:
if self._faces_uvs_list is None:
if self.isempty():
self._faces_uvs_list = [
torch.empty((0, 3), dtype=torch.float32, device=self.device)
] * self._N
else:
self._faces_uvs_list = padded_to_list(
self._faces_uvs_padded, split_size=self._num_faces_per_mesh
)
return self._faces_uvs_list
def verts_uvs_padded(self) -> torch.Tensor:
if self._verts_uvs_padded is None:
if self.isempty():
self._verts_uvs_padded = torch.zeros(
(self._N, 0, 2), dtype=torch.float32, device=self.device
)
else:
self._verts_uvs_padded = list_to_padded(
self._verts_uvs_list, pad_value=0.0
)
return self._verts_uvs_padded
def verts_uvs_list(self) -> List[torch.Tensor]:
if self._verts_uvs_list is None:
if self.isempty():
self._verts_uvs_list = [
torch.empty((0, 2), dtype=torch.float32, device=self.device)
] * self._N
else:
# The number of vertices in the mesh and in verts_uvs can differ
# e.g. if a vertex is shared between 3 faces, it can
# have up to 3 different uv coordinates.
self._verts_uvs_list = list(self._verts_uvs_padded.unbind(0))
return self._verts_uvs_list
# Currently only the padded maps are used.
def maps_padded(self) -> torch.Tensor:
return self._maps_padded
def maps_list(self) -> List[torch.Tensor]:
if self._maps_list is not None:
return self._maps_list
return self._maps_padded.unbind(0)
def extend(self, N: int) -> "TexturesUV":
new_props = self._extend(
N,
[
"maps_padded",
"verts_uvs_padded",
"faces_uvs_padded",
"_num_faces_per_mesh",
],
)
new_tex = self.__class__(
maps=new_props["maps_padded"],
faces_uvs=new_props["faces_uvs_padded"],
verts_uvs=new_props["verts_uvs_padded"],
padding_mode=self.padding_mode,
align_corners=self.align_corners,
)
new_tex._num_faces_per_mesh = new_props["_num_faces_per_mesh"]
return new_tex
def sample_textures(self, fragments, **kwargs) -> torch.Tensor:
"""
Interpolate a 2D texture map using uv vertex texture coordinates for each
face in the mesh. First interpolate the vertex uvs using barycentric coordinates
for each pixel in the rasterized output. Then interpolate the texture map
using the uv coordinate for each pixel.
Args:
fragments:
The outputs of rasterization. From this we use
- pix_to_face: LongTensor of shape (N, H, W, K) specifying the indices
of the faces (in the packed representation) which
overlap each pixel in the image.
- barycentric_coords: FloatTensor of shape (N, H, W, K, 3) specifying
the barycentric coordinates of each pixel
relative to the faces (in the packed
representation) which overlap the pixel.
Returns:
texels: tensor of shape (N, H, W, K, C) giving the interpolated
texture for each pixel in the rasterized image.
"""
if self.isempty():
faces_verts_uvs = torch.zeros(
(self._N, 3, 2), dtype=torch.float32, device=self.device
)
else:
packing_list = [
i[j] for i, j in zip(self.verts_uvs_list(), self.faces_uvs_list())
]
faces_verts_uvs = torch.cat(packing_list)
texture_maps = self.maps_padded()
# pixel_uvs: (N, H, W, K, 2)
pixel_uvs = interpolate_face_attributes(
fragments.pix_to_face, fragments.bary_coords, faces_verts_uvs
)
N, H_out, W_out, K = fragments.pix_to_face.shape
N, H_in, W_in, C = texture_maps.shape # 3 for RGB
# pixel_uvs: (N, H, W, K, 2) -> (N, K, H, W, 2) -> (NK, H, W, 2)
pixel_uvs = pixel_uvs.permute(0, 3, 1, 2, 4).reshape(N * K, H_out, W_out, 2)
# textures.map:
# (N, H, W, C) -> (N, C, H, W) -> (1, N, C, H, W)
# -> expand (K, N, C, H, W) -> reshape (N*K, C, H, W)
texture_maps = (
texture_maps.permute(0, 3, 1, 2)[None, ...]
.expand(K, -1, -1, -1, -1)
.transpose(0, 1)
.reshape(N * K, C, H_in, W_in)
)
# Textures: (N*K, C, H, W), pixel_uvs: (N*K, H, W, 2)
# Now need to format the pixel uvs and the texture map correctly!
# From pytorch docs, grid_sample takes `grid` and `input`:
# grid specifies the sampling pixel locations normalized by
# the input spatial dimensions It should have most
# values in the range of [-1, 1]. Values x = -1, y = -1
# is the left-top pixel of input, and values x = 1, y = 1 is the
# right-bottom pixel of input.
pixel_uvs = pixel_uvs * 2.0 - 1.0
texture_maps = torch.flip(texture_maps, [2]) # flip y axis of the texture map
if texture_maps.device != pixel_uvs.device:
texture_maps = texture_maps.to(pixel_uvs.device)
texels = F.grid_sample(
texture_maps,
pixel_uvs,
align_corners=self.align_corners,
padding_mode=self.padding_mode,
)
# texels now has shape (NK, C, H_out, W_out)
texels = texels.reshape(N, K, C, H_out, W_out).permute(0, 3, 4, 1, 2)
return texels
def faces_verts_textures_packed(self) -> torch.Tensor:
"""
Samples texture from each vertex and for each face in the mesh.
For N meshes with {Fi} number of faces, it returns a
tensor of shape sum(Fi)x3xC (C = 3 for RGB).
You can use the utils function in structures.utils to convert the
packed representation to a list or padded.
"""
if self.isempty():
return torch.zeros(
(0, 3, self.maps_padded().shape[-1]),
dtype=torch.float32,
device=self.device,
)
else:
packing_list = [
i[j] for i, j in zip(self.verts_uvs_list(), self.faces_uvs_list())
]
faces_verts_uvs = _list_to_padded_wrapper(
packing_list, pad_value=0.0
) # Nxmax(Fi)x3x2
texture_maps = self.maps_padded() # NxHxWxC
texture_maps = texture_maps.permute(0, 3, 1, 2) # NxCxHxW
faces_verts_uvs = faces_verts_uvs * 2.0 - 1.0
texture_maps = torch.flip(texture_maps, [2]) # flip y axis of the texture map
textures = F.grid_sample(
texture_maps,
faces_verts_uvs,
align_corners=self.align_corners,
padding_mode=self.padding_mode,
) # NxCxmax(Fi)x3
textures = textures.permute(0, 2, 3, 1) # Nxmax(Fi)x3xC
textures = _padded_to_list_wrapper(
textures, split_size=self._num_faces_per_mesh
) # list of N {Fix3xC} tensors
return list_to_packed(textures)[0]
def join_batch(self, textures: List["TexturesUV"]) -> "TexturesUV":
"""
Join the list of textures given by `textures` to
self to create a batch of textures. Return a new
TexturesUV object with the combined textures.
Args:
textures: List of TexturesUV objects
Returns:
new_tex: TexturesUV object with the combined
textures from self and the list `textures`.
"""
tex_types_same = all(isinstance(tex, TexturesUV) for tex in textures)
if not tex_types_same:
raise ValueError("All textures must be of type TexturesUV.")
padding_modes_same = all(
tex.padding_mode == self.padding_mode for tex in textures
)
if not padding_modes_same:
raise ValueError("All textures must have the same padding_mode.")
align_corners_same = all(
tex.align_corners == self.align_corners for tex in textures
)
if not align_corners_same:
raise ValueError("All textures must have the same align_corners value.")
verts_uvs_list = []
faces_uvs_list = []
maps_list = []
faces_uvs_list += self.faces_uvs_list()
verts_uvs_list += self.verts_uvs_list()
maps_list += self.maps_list()
num_faces_per_mesh = self._num_faces_per_mesh
for tex in textures:
verts_uvs_list += tex.verts_uvs_list()
faces_uvs_list += tex.faces_uvs_list()
num_faces_per_mesh += tex._num_faces_per_mesh
maps_list += tex.maps_list()
new_tex = self.__class__(
maps=maps_list,
verts_uvs=verts_uvs_list,
faces_uvs=faces_uvs_list,
padding_mode=self.padding_mode,
align_corners=self.align_corners,
)
new_tex._num_faces_per_mesh = num_faces_per_mesh
return new_tex
def _place_map_into_single_map(
self, single_map: torch.Tensor, map_: torch.Tensor, location: PackedRectangle
) -> None:
"""
Copy map into a larger tensor single_map at the destination specified by location.
If align_corners is False, we add the needed border around the destination.
Used by join_scene.
Args:
single_map: (total_H, total_W, 3)
map_: (H, W, 3) source data
location: where to place map
"""
do_flip = location.flipped
source = map_.transpose(0, 1) if do_flip else map_
border_width = 0 if self.align_corners else 1
lower_u = location.x + border_width
lower_v = location.y + border_width
upper_u = lower_u + source.shape[0]
upper_v = lower_v + source.shape[1]
single_map[lower_u:upper_u, lower_v:upper_v] = source
if self.padding_mode != "zeros" and not self.align_corners:
single_map[lower_u - 1, lower_v:upper_v] = single_map[
lower_u, lower_v:upper_v
]
single_map[upper_u, lower_v:upper_v] = single_map[
upper_u - 1, lower_v:upper_v
]
single_map[lower_u:upper_u, lower_v - 1] = single_map[
lower_u:upper_u, lower_v
]
single_map[lower_u:upper_u, upper_v] = single_map[
lower_u:upper_u, upper_v - 1
]
single_map[lower_u - 1, lower_v - 1] = single_map[lower_u, lower_v]
single_map[lower_u - 1, upper_v] = single_map[lower_u, upper_v - 1]
single_map[upper_u, lower_v - 1] = single_map[upper_u - 1, lower_v]
single_map[upper_u, upper_v] = single_map[upper_u - 1, upper_v - 1]
def join_scene(self) -> "TexturesUV":
"""
Return a new TexturesUV amalgamating the batch.
We calculate a large single map which contains the original maps,
and find verts_uvs to point into it. This will not replicate
behavior of padding for verts_uvs values outside [0,1].
If align_corners=False, we need to add an artificial border around
every map.
We use the function `pack_unique_rectangles` to provide a layout for
the single map. This means that if self was created with a list of maps,
and to() has not been called, and there were two maps which were exactly
the same tensor object, then they will become the same data in the unified map.
_place_map_into_single_map is used to copy the maps into the single map.
The merging of verts_uvs and faces_uvs is handled locally in this function.
"""
maps = self.maps_list()
heights_and_widths = []
extra_border = 0 if self.align_corners else 2
for map_ in maps:
heights_and_widths.append(
Rectangle(
map_.shape[0] + extra_border, map_.shape[1] + extra_border, id(map_)
)
)
merging_plan = pack_unique_rectangles(heights_and_widths)
# pyre-fixme[16]: `Tensor` has no attribute `new_zeros`.
single_map = maps[0].new_zeros((*merging_plan.total_size, 3))
verts_uvs = self.verts_uvs_list()
verts_uvs_merged = []
for map_, loc, uvs in zip(maps, merging_plan.locations, verts_uvs):
new_uvs = uvs.clone()
if loc.is_first:
self._place_map_into_single_map(single_map, map_, loc)
do_flip = loc.flipped
x_shape = map_.shape[1] if do_flip else map_.shape[0]
y_shape = map_.shape[0] if do_flip else map_.shape[1]
if do_flip:
# Here we have flipped / transposed the map.
# In uvs, the y values are decreasing from 1 to 0 and the x
# values increase from 0 to 1. We subtract all values from 1
# as the x's become y's and the y's become x's.
new_uvs = 1.0 - new_uvs[:, [1, 0]]
if TYPE_CHECKING:
new_uvs = torch.Tensor(new_uvs)
# If align_corners is True, then an index of x (where x is in
# the range 0 .. map_.shape[]-1) in one of the input maps
# was hit by a u of x/(map_.shape[]-1).
# That x is located at the index loc[] + x in the single_map, and
# to hit that we need u to equal (loc[] + x) / (total_size[]-1)
# so the old u should be mapped to
# { u*(map_.shape[]-1) + loc[] } / (total_size[]-1)
# If align_corners is False, then an index of x (where x is in
# the range 1 .. map_.shape[]-2) in one of the input maps
# was hit by a u of (x+0.5)/(map_.shape[]).
# That x is located at the index loc[] + 1 + x in the single_map,
# (where the 1 is for the border)
# and to hit that we need u to equal (loc[] + 1 + x + 0.5) / (total_size[])
# so the old u should be mapped to
# { loc[] + 1 + u*map_.shape[]-0.5 + 0.5 } / (total_size[])
# = { loc[] + 1 + u*map_.shape[] } / (total_size[])
# We change the y's in new_uvs for the scaling of height,
# and the x's for the scaling of width.
# That is why the 1's and 0's are mismatched in these lines.
one_if_align = 1 if self.align_corners else 0
one_if_not_align = 1 - one_if_align
denom_x = merging_plan.total_size[0] - one_if_align
scale_x = x_shape - one_if_align
denom_y = merging_plan.total_size[1] - one_if_align
scale_y = y_shape - one_if_align
new_uvs[:, 1] *= scale_x / denom_x
new_uvs[:, 1] += (loc.x + one_if_not_align) / denom_x
new_uvs[:, 0] *= scale_y / denom_y
new_uvs[:, 0] += (loc.y + one_if_not_align) / denom_y
verts_uvs_merged.append(new_uvs)
faces_uvs_merged = []
offset = 0
for faces_uvs_, verts_uvs_ in zip(self.faces_uvs_list(), verts_uvs):
faces_uvs_merged.append(offset + faces_uvs_)
offset += verts_uvs_.shape[0]
return self.__class__(
maps=[single_map],
verts_uvs=[torch.cat(verts_uvs_merged)],
faces_uvs=[torch.cat(faces_uvs_merged)],
align_corners=self.align_corners,
padding_mode=self.padding_mode,
)
def centers_for_image(self, index):
"""
Return the locations in the texture map which correspond to the given
verts_uvs, for one of the meshes. This is potentially useful for
visualizing the data. See the texturesuv_image_matplotlib and
texturesuv_image_PIL functions.
Args:
index: batch index of the mesh whose centers to return.
Returns:
centers: coordinates of points in the texture image
- a FloatTensor of shape (V,2)
"""
if self._N != 1:
raise ValueError(
"This function only supports plotting textures for one mesh."
)
texture_image = self.maps_padded()
verts_uvs = self.verts_uvs_list()[index][None]
_, H, W, _3 = texture_image.shape
coord1 = torch.arange(W).expand(H, W)
coord2 = torch.arange(H)[:, None].expand(H, W)
coords = torch.stack([coord1, coord2])[None]
with torch.no_grad():
# Get xy cartesian coordinates based on the uv coordinates
centers = F.grid_sample(
torch.flip(coords.to(texture_image), [2]),
# Convert from [0, 1] -> [-1, 1] range expected by grid sample
verts_uvs[:, None] * 2.0 - 1,
align_corners=self.align_corners,
padding_mode=self.padding_mode,
).cpu()
centers = centers[0, :, 0].T
return centers
class TexturesVertex(TexturesBase):
def __init__(
self,
verts_features: Union[torch.Tensor, List[torch.Tensor], Tuple[torch.Tensor]],
):
"""
Batched texture representation where each vertex in a mesh
has a D dimensional feature vector.
Args:
verts_features: list of (Vi, D) or (N, V, D) tensor giving a feature
vector with arbitrary dimensions for each vertex.
"""
if isinstance(verts_features, (tuple, list)):
correct_shape = all(
(torch.is_tensor(v) and v.ndim == 2) for v in verts_features
)
if not correct_shape:
raise ValueError(
"Expected verts_features to be a list of tensors of shape (V, D)."
)
self._verts_features_list = verts_features
self._verts_features_padded = None
self.device = torch.device("cpu")
# These values may be overridden when textures is
# passed into the Meshes constructor. For more details
# refer to the __init__ of Meshes.
self._N = len(verts_features)
self._num_verts_per_mesh = [len(fv) for fv in verts_features]
if self._N > 0:
self.device = verts_features[0].device
elif torch.is_tensor(verts_features):
if verts_features.ndim != 3:
msg = "Expected verts_features to be of shape (N, V, D); got %r"
raise ValueError(msg % repr(verts_features.shape))
self._verts_features_padded = verts_features
self._verts_features_list = None
self.device = verts_features.device
# These values may be overridden when textures is
# passed into the Meshes constructor. For more details
# refer to the __init__ of Meshes.
self._N = len(verts_features)
max_F = verts_features.shape[1]
self._num_verts_per_mesh = [max_F] * self._N
else:
raise ValueError("verts_features must be a tensor or list of tensors")
# This is set inside the Meshes object when textures is
# passed into the Meshes constructor. For more details
# refer to the __init__ of Meshes.
self.valid = torch.ones((self._N,), dtype=torch.bool, device=self.device)
def clone(self):
tex = self.__class__(self.verts_features_padded().clone())
if self._verts_features_list is not None:
tex._verts_features_list = [f.clone() for f in self._verts_features_list]
tex._num_verts_per_mesh = self._num_verts_per_mesh.copy()
tex.valid = self.valid.clone()
return tex
def detach(self):
tex = self.__class__(self.verts_features_padded().detach())
if self._verts_features_list is not None:
tex._verts_features_list = [f.detach() for f in self._verts_features_list]
tex._num_verts_per_mesh = self._num_verts_per_mesh.copy()
tex.valid = self.valid.detach()
return tex
def __getitem__(self, index):
props = ["verts_features_list", "_num_verts_per_mesh"]
new_props = self._getitem(index, props)
verts_features = new_props["verts_features_list"]
if isinstance(verts_features, list):
new_tex = self.__class__(verts_features=verts_features)
elif torch.is_tensor(verts_features):
new_tex = self.__class__(verts_features=[verts_features])
else:
raise ValueError("Not all values are provided in the correct format")
new_tex._num_verts_per_mesh = new_props["_num_verts_per_mesh"]
return new_tex
def verts_features_padded(self) -> torch.Tensor:
if self._verts_features_padded is None:
if self.isempty():
self._verts_features_padded = torch.zeros(
(self._N, 0, 3, 0), dtype=torch.float32, device=self.device
)
else:
self._verts_features_padded = list_to_padded(
self._verts_features_list, pad_value=0.0
)
return self._verts_features_padded
def verts_features_list(self) -> List[torch.Tensor]:
if self._verts_features_list is None:
if self.isempty():
self._verts_features_list = [
torch.empty((0, 3), dtype=torch.float32, device=self.device)
] * self._N
else:
self._verts_features_list = padded_to_list(
self._verts_features_padded, split_size=self._num_verts_per_mesh
)
return self._verts_features_list
def verts_features_packed(self) -> torch.Tensor:
if self.isempty():
return torch.zeros((self._N, 3, 0), dtype=torch.float32, device=self.device)
verts_features_list = self.verts_features_list()
return list_to_packed(verts_features_list)[0]
def extend(self, N: int) -> "TexturesVertex":
new_props = self._extend(N, ["verts_features_padded", "_num_verts_per_mesh"])
new_tex = self.__class__(verts_features=new_props["verts_features_padded"])
new_tex._num_verts_per_mesh = new_props["_num_verts_per_mesh"]
return new_tex
def sample_textures(self, fragments, faces_packed=None) -> torch.Tensor:
"""
Determine the color for each rasterized face. Interpolate the colors for
vertices which form the face using the barycentric coordinates.
Args:
fragments:
The outputs of rasterization. From this we use
- pix_to_face: LongTensor of shape (N, H, W, K) specifying the indices
of the faces (in the packed representation) which
overlap each pixel in the image.
- barycentric_coords: FloatTensor of shape (N, H, W, K, 3) specifying
the barycentric coordinates of each pixel
relative to the faces (in the packed
representation) which overlap the pixel.
Returns:
texels: An texture per pixel of shape (N, H, W, K, C).
There will be one C dimensional value for each element in
fragments.pix_to_face.
"""
verts_features_packed = self.verts_features_packed()
faces_verts_features = verts_features_packed[faces_packed]
texels = interpolate_face_attributes(
fragments.pix_to_face, fragments.bary_coords, faces_verts_features
)
return texels
def faces_verts_textures_packed(self, faces_packed=None) -> torch.Tensor:
"""
Samples texture from each vertex and for each face in the mesh.
For N meshes with {Fi} number of faces, it returns a
tensor of shape sum(Fi)x3xC (C = 3 for RGB).
You can use the utils function in structures.utils to convert the
packed representation to a list or padded.
"""
verts_features_packed = self.verts_features_packed()
faces_verts_features = verts_features_packed[faces_packed]
return faces_verts_features
def join_batch(self, textures: List["TexturesVertex"]) -> "TexturesVertex":
"""
Join the list of textures given by `textures` to
self to create a batch of textures. Return a new
TexturesVertex object with the combined textures.
Args:
textures: List of TexturesVertex objects
Returns:
new_tex: TexturesVertex object with the combined
textures from self and the list `textures`.
"""
tex_types_same = all(isinstance(tex, TexturesVertex) for tex in textures)
if not tex_types_same:
raise ValueError("All textures must be of type TexturesVertex.")
verts_features_list = []
verts_features_list += self.verts_features_list()
num_verts_per_mesh = self._num_verts_per_mesh.copy()
for tex in textures:
verts_features_list += tex.verts_features_list()
num_verts_per_mesh += tex._num_verts_per_mesh
new_tex = self.__class__(verts_features=verts_features_list)
new_tex._num_verts_per_mesh = num_verts_per_mesh
return new_tex
def join_scene(self) -> "TexturesVertex":
"""
Return a new TexturesVertex amalgamating the batch.
"""
return self.__class__(verts_features=[torch.cat(self.verts_features_list())])
| [
"[email protected]"
] | |
bdbc62414e39c5378751c220020b0e1074e5603e | 560136cbc70809a66d7fd653fadcc5f6ac2f7b8d | /buy_info.py | cb3e350fcee2935aba754ef4481f5686867ed763 | [] | no_license | Python51888/Tickets12306 | 4b3c7381bbf163de4b148e6c718977f633323197 | 25db032a835f7617410e080143668a11663573a8 | refs/heads/master | 2020-06-15T04:21:33.352932 | 2018-09-25T07:50:50 | 2018-09-25T07:50:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,672 | py | import tkinter as tk
import re
test = 0
def confirm_snp(t_file):
time = t_file[0]
checi = t_file[1]
start_station = t_file[2]
start_time = t_file[3]
start_time = start_time[:2] + ':' + start_time[2:]
stop_station = t_file[4]
stop_time = t_file[5]
stop_time = stop_time[:2] + ':' + stop_time[2:]
zuowei = t_file[7]
user = dict(t_file[6])
prices = t_file[8]
checixinxi = [checi, start_station, start_time, stop_station, stop_time]
root = tk.Tk()
# root.geometry('830x350+500+200')
root.title('购票信息')
root.resizable(width=False, height=False)
# 列车信息
# l1 = tk.Label(root, text='列车信息(余票信息仅供参考)')
# l1.pack(anchor='nw', ipady=20)
ff = tk.LabelFrame(root, text='列车信息(余票信息仅供参考)')
ff.pack()
la1 = tk.Label(ff, text='-------车次信息---------------------------------')
la1.pack(anchor='w', padx=100, pady=10)
# can1 = tk.Canvas(ff,bg = 'blue')
# can1.pack()
# 列车信息显示
f = tk.Frame(ff)
f.pack(anchor='w', padx=100, pady=10)
l2 = tk.Label(f, text=time + ' ')
l2.pack(side=tk.LEFT)
l3 = tk.Label(f, text=checi + ' ')
l3.pack(side=tk.LEFT)
l4 = tk.Label(f, text=start_station)
l4.pack(side=tk.LEFT)
l5 = tk.Label(f, text=start_time + ' --> ')
l5.pack(side=tk.LEFT)
l6 = tk.Label(f, text=stop_station)
l6.pack(side=tk.LEFT)
l7 = tk.Label(f, text=stop_time)
l7.pack(side=tk.LEFT)
la2 = tk.Label(ff, text='-------票价信息---------------------------------')
la2.pack(anchor='w', padx=100, pady=10)
# 座位信息
f2 = tk.Frame(ff)
f2.pack(anchor='w', padx=100, pady=10)
# "YZ_num": "1", # 硬座
# "RZ_num": "2", # 软座
# "YW_num": "3", # 硬卧
# "RW_num": "4", # 软卧
# "GR_num": "6", # 高级软卧
# "TZ_num": "P", # 特等座
# "WZ_num": "WZ", # 无座
# "ZE_num": "O", # 二等座
# "ZY_num": "M", # 一等座
# "SWZ_num": "9", # 商务座
# # zuo_wei = {"YZ_num'": '1',"RZ_num'":'2',"YW_num'":'3',
# "RW_num'":'4',"GR_num'":'6',"TZ_num'":'P',"WZ_num'":'WZ',"ZE_num'":'O',"ZY_num'":'M',"SWZ_num'":'9'}
zuo_weidict = {"YZ_num'": "硬座", "RZ_num'": "软座", "YW_num'": "硬卧", "RW_num'": "软卧",
"GR_num'": "高级软卧", "TZ_num'": "特等座", "WZ_num'": "无座", "ZE_num'": "二等座",
"ZY_num'": "一等座", "SWZ_num'": "商务座"}
v = tk.IntVar(root)
la3 = tk.Label(ff, text='-------乘客信息---------------------------------')
la3.pack(anchor='w', padx=100, pady=10)
for i in range(len(zuowei)):
s = zuowei[i - 1].split(':')
p = prices[i - 1].split(':')
p1 = p[0].split('_')
s1 = s[0].split('_')
regex = re.search(r"'0*(\d+)(\d)'$", p[1])
price1 = regex.group(1) + '.' + regex.group(2)
if s[0] in zuo_weidict:
n = zuo_weidict[s[0]]
rb = tk.Radiobutton(f2, text=n + '(¥' + price1 + ')' + ' ' + '剩余:' + eval(s[1]) + '张', value=i, variable=v)
rb.pack(side=tk.LEFT)
# 乘客信息
f3 = tk.Frame(ff)
f3.pack(anchor='w', padx=100)
user1 = list(user.values())
v2 = tk.IntVar(root)
for x in range(len(user)):
userinfo = user1[x - 1]
rb1 = tk.Radiobutton(f3, text='姓名:' + userinfo[0] + ' 性别:' + userinfo[1]
+ ' 身份证:' + userinfo[2] + ' 票种:' + userinfo[3] + ' 电话:' + userinfo[4],
variable=v2, value=x)
rb1.pack(anchor='nw', ipady=7)
# 信息提交
f4 = tk.Frame(ff)
f4.pack(anchor='w', pady=20, padx=150)
btnback = tk.Button(f4, width=15, text='返回', command=lambda: back(root))
btnback.pack(side=tk.LEFT, padx=50)
btn = tk.Button(f4, width=15, text='提交',
command=lambda: onbtn(zuowei[v.get() - 1], user1[v2.get() - 1], checixinxi, root))
btn.pack(side=tk.LEFT, padx=50)
# root.maxsize(830, 350)
# root.minsize(850, 350)
root.mainloop()
return test
def onbtn(a, b, c, root):
global test
# 获取用户点选数据
zuo_wei = {"YZ_num'": '1', "RZ_num'": '2', "YW_num'": '3',
"RW_num'": '4', "GR_num'": '6', "TZ_num'": 'P', "WZ_num'": 'WZ', "ZE_num'": 'O', "ZY_num'": 'M',
"SWZ_num'": '9'}
zuo_weidict = {"YZ_num'": "硬座", "RZ_num'": "软座", "YW_num'": "硬卧", "RW_num'": "软卧",
"GR_num'": "高级软卧", "TZ_num'": "特等座", "WZ_num'": "无座", "ZE_num'": "二等座",
"ZY_num'": "一等座", "SWZ_num'": "商务座"}
ticket = a.split(':')
b.insert(0, zuo_wei[ticket[0]])
zuoweixinxi = zuo_weidict[ticket[0]]
yonghuxinxi = '车次:' + c[0] + ' ' + c[1] + ' ' + c[2] + '---' + c[3] + ' ' + c[4] \
+ '\n姓名:' + b[1] + ' ' + '性别:' + b[2] + ' ' + '\n身份证:' + b[3] + ' ' + '票种:' + b[4] + ' ' + '\n电话:' + \
b[5] + ' ' + '\n选座信息:' + zuoweixinxi
msg = tk.messagebox.askokcancel(title='请确认购票信息', message=yonghuxinxi)
if msg == True:
test = b
msg1 = tk.messagebox.showinfo('成功', '购票成功,请尽快登录官网付款')
root.destroy()
def back(root):
global test
test = 1
root.destroy()
| [
"[email protected]"
] | |
b64d844d001553a64547a137b9166e561f22ae46 | 77101d8433d33ebe948c21757047457067136c7c | /Code/81-90/87.py | 9d31c515f0e2db48e99a86593a833c62b4eceef5 | [] | no_license | ITlearning/CodeUP_Python | 0665291c4731fde8b7f2f86e0519c4205240bf8f | 0066e4a25764e8097b08137691e4ad75ece2d519 | refs/heads/main | 2023-04-30T07:30:28.439123 | 2021-05-20T05:39:04 | 2021-05-20T05:39:04 | 347,662,657 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | a = int(input())
for i in range(1,a+1) :
if i % 3 == 0 :
continue
print(i, end=' ')
| [
"[email protected]"
] | |
9de8911cbd66e83d3e396e4ca7e788c763423aff | 7822e658e88f3f948732e6e3e588ca4b2eb5662a | /diapos/programas/caso-asistencia-estrella.py | e678137cde84bc1b8ad7ea849799be70fe97ac53 | [] | no_license | carlos2020Lp/progra-utfsm | 632b910e96c17b9f9bb3d28329e70de8aff64570 | a0231d62837c54d4eb8bbf00bb1b84484efc1af2 | refs/heads/master | 2021-05-28T06:00:35.711630 | 2015-02-05T02:19:18 | 2015-02-05T02:19:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44 | py | >>> alumno_estrella(asistencia)
'Fulanita'
| [
"[email protected]"
] | |
698adef33400954612ddb390d4e2b4be321adb6a | 9788df18d5adaa469a0cb51f47309cd7401201e5 | /alisdk/top/api/rest/SimbaNonsearchDemographicsUpdateRequest.py | a8f2e81f002dde34594772cd1fb1c4a9b8124c77 | [] | no_license | sevennothing/aliyunTestPrj | cf690ce4765497f1b16359b82ef64f1ef992713c | 1b3e883d32c759e03fe5053c50e9a973f59bbffc | refs/heads/master | 2021-01-17T03:15:59.082544 | 2015-03-11T14:16:58 | 2015-03-11T14:16:58 | 32,001,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | '''
Created by auto_sdk on 2014-11-20 12:53:43
'''
from top.api.base import RestApi
class SimbaNonsearchDemographicsUpdateRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.campaign_id = None
self.demographic_id_price_json = None
self.nick = None
def getapiname(self):
return 'taobao.simba.nonsearch.demographics.update'
| [
"[email protected]"
] | |
bc486f952345fcf08f137b8312608b15be52db9c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03146/s029247670.py | 51dd873af5ba4128376ae5aa863e5f55ee218fdc | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | import sys
s = int(input())
a = []
a.append(s)
i = 1
while True:
n = 0
if a[i-1] %2 ==0:
n = a[i-1]/2
else:
n = a[i-1]*3 +1
i +=1
if n in a:
print(i)
sys.exit()
a.append(n)
| [
"[email protected]"
] | |
29fcaaf93f932a8ac6846c0055accf37a56c5ef7 | 61c9e13bac533432a54d62ce9c063f99aa7acf04 | /akshare/economic/macro_bank.py | 959dad9cfdaa56a57f2c76657f290419635f6cf8 | [
"MIT"
] | permissive | guangxinli/akshare | 2c91aab074b16ede7d426279999e6b53e8ed16ec | e27666f94051749e3a2d8c4b669b43f03e16d7cb | refs/heads/master | 2022-04-25T00:29:40.314978 | 2020-04-21T13:22:08 | 2020-04-21T13:22:08 | 257,750,911 | 1 | 0 | MIT | 2020-04-22T00:41:59 | 2020-04-22T00:41:58 | null | UTF-8 | Python | false | false | 31,394 | py | # -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Author: Albert King
date: 2020/1/9 22:52
contact: [email protected]
desc: 金十数据中心-经济指标-央行利率-主要央行利率
https://datacenter.jin10.com/economic
美联储利率决议报告
欧洲央行决议报告
新西兰联储决议报告
中国央行决议报告
瑞士央行决议报告
英国央行决议报告
澳洲联储决议报告
日本央行决议报告
俄罗斯央行决议报告
印度央行决议报告
巴西央行决议报告
"""
import json
import time
import pandas as pd
import requests
# 金十数据中心-经济指标-央行利率-主要央行利率-美联储利率决议报告
def macro_bank_usa_interest_rate():
"""
美联储利率决议报告, 数据区间从19820927-至今
https://datacenter.jin10.com/reportType/dc_usa_interest_rate_decision
https://cdn.jin10.com/dc/reports/dc_usa_interest_rate_decision_all.js?v=1578581921
:return: 美联储利率决议报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国利率决议"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "24",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_interest_rate"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-央行利率-主要央行利率-欧洲央行决议报告
def macro_bank_euro_interest_rate():
"""
欧洲央行决议报告, 数据区间从19990101-至今
https://datacenter.jin10.com/reportType/dc_interest_rate_decision
https://cdn.jin10.com/dc/reports/dc_interest_rate_decision_all.js?v=1578581663
:return: 欧洲央行决议报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["欧元区利率决议"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "21",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "euro_interest_rate"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-央行利率-主要央行利率-新西兰联储决议报告
def macro_bank_newzealand_interest_rate():
"""
新西兰联储决议报告, 数据区间从19990401-至今
https://datacenter.jin10.com/reportType/dc_newzealand_interest_rate_decision
https://cdn.jin10.com/dc/reports/dc_newzealand_interest_rate_decision_all.js?v=1578582075
:return: 新西兰联储决议报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_newzealand_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["新西兰利率决议报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "23",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "newzealand_interest_rate"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-央行利率-主要央行利率-中国央行决议报告
def macro_bank_china_interest_rate():
"""
中国人民银行利率报告, 数据区间从19910501-至今
https://datacenter.jin10.com/reportType/dc_china_interest_rate_decision
https://cdn.jin10.com/dc/reports/dc_china_interest_rate_decision_all.js?v=1578582163
:return: 中国人民银行利率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_china_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["中国人民银行利率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "91",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "china_interest_rate"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-央行利率-主要央行利率-瑞士央行决议报告
def macro_bank_switzerland_interest_rate():
"""
瑞士央行利率决议报告, 数据区间从20080313-至今
https://datacenter.jin10.com/reportType/dc_switzerland_interest_rate_decision
https://cdn.jin10.com/dc/reports/dc_switzerland_interest_rate_decision_all.js?v=1578582240
:return: 瑞士央行利率决议报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_switzerland_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["瑞士央行利率决议报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "25",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "switzerland_interest_rate"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-央行利率-主要央行利率-英国央行决议报告
def macro_bank_english_interest_rate():
"""
英国央行决议报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_english_interest_rate_decision
https://cdn.jin10.com/dc/reports/dc_english_interest_rate_decision_all.js?v=1578582331
:return: 英国央行决议报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_english_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["英国利率决议报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "26",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "english_interest_rate"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-央行利率-主要央行利率-澳洲联储决议报告
def macro_bank_australia_interest_rate():
"""
澳洲联储决议报告, 数据区间从19800201-至今
https://datacenter.jin10.com/reportType/dc_australia_interest_rate_decision
https://cdn.jin10.com/dc/reports/dc_australia_interest_rate_decision_all.js?v=1578582414
:return: 澳洲联储决议报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_australia_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["澳大利亚利率决议报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "27",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "australia_interest_rate"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-央行利率-主要央行利率-日本央行决议报告
def macro_bank_japan_interest_rate():
"""
日本利率决议报告, 数据区间从20080214-至今
https://datacenter.jin10.com/reportType/dc_japan_interest_rate_decision
https://cdn.jin10.com/dc/reports/dc_japan_interest_rate_decision_all.js?v=1578582485
:return: 日本利率决议报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_japan_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["日本利率决议报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "22",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "japan_interest_rate"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-央行利率-主要央行利率-俄罗斯央行决议报告
def macro_bank_russia_interest_rate():
"""
俄罗斯利率决议报告, 数据区间从20030601-至今
https://datacenter.jin10.com/reportType/dc_russia_interest_rate_decision
https://cdn.jin10.com/dc/reports/dc_russia_interest_rate_decision_all.js?v=1578582572
:return: 俄罗斯利率决议报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_russia_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["俄罗斯利率决议报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "64",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "russia_interest_rate"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-央行利率-主要央行利率-印度央行决议报告
def macro_bank_india_interest_rate():
"""
印度利率决议报告, 数据区间从20000801-至今
https://datacenter.jin10.com/reportType/dc_india_interest_rate_decision
https://cdn.jin10.com/dc/reports/dc_india_interest_rate_decision_all.js?v=1578582645
:return: 印度利率决议报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_india_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["印度利率决议报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "68",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "india_interest_rate"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-央行利率-主要央行利率-巴西央行决议报告
def macro_bank_brazil_interest_rate():
"""
巴西利率决议报告, 数据区间从20080201-至今
https://datacenter.jin10.com/reportType/dc_brazil_interest_rate_decision
https://cdn.jin10.com/dc/reports/dc_brazil_interest_rate_decision_all.js?v=1578582718
:return: 巴西利率决议报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_brazil_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["巴西利率决议报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "55",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "brazil_interest_rate"
temp_df = temp_df.astype("float")
return temp_df
if __name__ == "__main__":
# 金十数据中心-经济指标-央行利率-主要央行利率-美联储利率决议报告
macro_bank_usa_interest_rate_df = macro_bank_usa_interest_rate()
print(macro_bank_usa_interest_rate_df)
# 金十数据中心-经济指标-央行利率-主要央行利率-欧洲央行决议报告
macro_bank_euro_interest_rate_df = macro_bank_euro_interest_rate()
print(macro_bank_euro_interest_rate_df)
# 金十数据中心-经济指标-央行利率-主要央行利率-新西兰联储决议报告
macro_bank_newzealand_interest_rate_df = macro_bank_newzealand_interest_rate()
print(macro_bank_newzealand_interest_rate_df)
# 金十数据中心-经济指标-央行利率-主要央行利率-中国央行决议报告
macro_bank_china_interest_rate_df = macro_bank_china_interest_rate()
print(macro_bank_china_interest_rate_df)
# 金十数据中心-经济指标-央行利率-主要央行利率-瑞士央行决议报告
macro_bank_switzerland_interest_rate_df = macro_bank_switzerland_interest_rate()
print(macro_bank_switzerland_interest_rate_df)
# 金十数据中心-经济指标-央行利率-主要央行利率-英国央行决议报告
macro_bank_english_interest_rate_df = macro_bank_english_interest_rate()
print(macro_bank_english_interest_rate_df)
# 金十数据中心-经济指标-央行利率-主要央行利率-澳洲联储决议报告
macro_bank_australia_interest_rate_df = macro_bank_australia_interest_rate()
print(macro_bank_australia_interest_rate_df)
# 金十数据中心-经济指标-央行利率-主要央行利率-日本央行决议报告
macro_bank_japan_interest_rate_df = macro_bank_japan_interest_rate()
print(macro_bank_japan_interest_rate_df)
# 金十数据中心-经济指标-央行利率-主要央行利率-俄罗斯央行决议报告
macro_bank_russia_interest_rate_df = macro_bank_russia_interest_rate()
print(macro_bank_russia_interest_rate_df)
# 金十数据中心-经济指标-央行利率-主要央行利率-印度央行决议报告
macro_bank_india_interest_rate_df = macro_bank_india_interest_rate()
print(macro_bank_india_interest_rate_df)
# 金十数据中心-经济指标-央行利率-主要央行利率-巴西央行决议报告
macro_bank_brazil_interest_rate_df = macro_bank_brazil_interest_rate()
print(macro_bank_brazil_interest_rate_df)
| [
"[email protected]"
] | |
3035286654d17c751f091358d055f45857303648 | b136cbf689dfd1171679b1d7741ba910f2ed2161 | /flask_appbuilder/messages.py | 465d23c1e5a6b4d9d53374333a4046e1e9253990 | [
"BSD-3-Clause"
] | permissive | dbongo/Flask-AppBuilder | 7b34b582f10eef2877b010128ea3d7bfa6f23907 | 2de58428507afec0595fa762e977f539448878d5 | refs/heads/master | 2020-12-25T22:06:48.882882 | 2013-12-16T23:39:27 | 2013-12-16T23:39:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | from flask.ext.babel import lazy_gettext as _
"""
This Module is not used.
Just use it to automate Babel extraction
"""
auto_translations_import = [
_("Security"),
_("List Users"),
_("Base Permissions"),
_("Views/Menus"),
_("Permission on Views/Menus"),
_("Search"),
_("Back"),
_("Save"),
_("This field is required."),
_("Not a valid date value"),
_("No records found")
]
| [
"[email protected]"
] | |
85068524922ac829dc1894bca46e44fbc1dde60b | 64bcadfc9cab7013412a3dafed4624d70d2a5215 | /pySDC/implementations/controller_classes/allinclusive_multigrid_nonMPI.py | 7efac3411d82dc05fc0f870b28ff4de4537f76cc | [
"BSD-2-Clause"
] | permissive | schreiberx/pySDC | 9e7783ac782074f2246da766f440661e73b929b7 | 9d4fda2d9d7f5070a7a237e821140e11b288d477 | refs/heads/master | 2020-03-14T22:49:34.565440 | 2018-04-25T06:10:39 | 2018-04-25T06:10:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,176 | py | import itertools
import copy as cp
import numpy as np
import dill
from pySDC.core.Controller import controller
from pySDC.core import Step as stepclass
from pySDC.core.Errors import ControllerError, CommunicationError
class allinclusive_multigrid_nonMPI(controller):
"""
PFASST controller, running serialized version of PFASST in blocks (MG-style)
"""
def __init__(self, num_procs, controller_params, description):
"""
Initialization routine for PFASST controller
Args:
num_procs: number of parallel time steps (still serial, though), can be 1
controller_params: parameter set for the controller and the steps
description: all the parameters to set up the rest (levels, problems, transfer, ...)
"""
# call parent's initialization routine
super(allinclusive_multigrid_nonMPI, self).__init__(controller_params)
self.MS = [stepclass.step(description)]
for p in range(num_procs - 1):
self.MS.append(dill.copy(self.MS[0]))
if self.params.dump_setup:
self.dump_setup(step=self.MS[0], controller_params=controller_params, description=description)
if num_procs > 1 and len(self.MS[0].levels) > 1:
for S in self.MS:
for L in S.levels:
if not L.sweep.coll.right_is_node:
raise ControllerError("For PFASST to work, we assume uend^k = u_M^k")
if all(len(S.levels) == len(self.MS[0].levels) for S in self.MS):
self.nlevels = len(self.MS[0].levels)
else:
raise ControllerError('all steps need to have the same number of levels')
if self.nlevels == 0:
raise ControllerError('need at least one level')
self.nsweeps = []
for nl in range(self.nlevels):
if all(S.levels[nl].params.nsweeps == self.MS[0].levels[nl].params.nsweeps for S in self.MS):
self.nsweeps.append(self.MS[0].levels[nl].params.nsweeps)
if self.nlevels > 1 and self.nsweeps[-1] > 1:
raise ControllerError('this controller cannot do multiple sweeps on coarsest level')
def run(self, u0, t0, Tend):
"""
Main driver for running the serial version of SDC, MSSDC, MLSDC and PFASST (virtual parallelism)
Args:
u0: initial values
t0: starting time
Tend: ending time
Returns:
end values on the finest level
stats object containing statistics for each step, each level and each iteration
"""
# some initializations and reset of statistics
uend = None
num_procs = len(self.MS)
self.hooks.reset_stats()
# initial ordering of the steps: 0,1,...,Np-1
slots = [p for p in range(num_procs)]
# initialize time variables of each step
time = [t0 + sum(self.MS[j].dt for j in range(p)) for p in slots]
# determine which steps are still active (time < Tend)
active = [time[p] < Tend - 10 * np.finfo(float).eps for p in slots]
# compress slots according to active steps, i.e. remove all steps which have times above Tend
active_slots = list(itertools.compress(slots, active))
# initialize block of steps with u0
self.restart_block(active_slots, time, u0)
# call pre-run hook
for S in self.MS:
self.hooks.pre_run(step=S, level_number=0)
# main loop: as long as at least one step is still active (time < Tend), do something
while any(active):
MS_active = []
for p in active_slots:
MS_active.append(self.MS[p])
while not all([MS_active[p].status.done for p in range(len(MS_active))]):
MS_active = self.pfasst(MS_active)
for p in range(len(MS_active)):
self.MS[active_slots[p]] = MS_active[p]
# uend is uend of the last active step in the list
uend = self.MS[active_slots[-1]].levels[0].uend
for p in active_slots:
time[p] += num_procs * self.MS[p].dt
# determine new set of active steps and compress slots accordingly
active = [time[p] < Tend - 10 * np.finfo(float).eps for p in slots]
active_slots = list(itertools.compress(slots, active))
# restart active steps (reset all values and pass uend to u0)
self.restart_block(active_slots, time, uend)
# call post-run hook
for S in self.MS:
self.hooks.post_run(step=S, level_number=0)
return uend, self.hooks.return_stats()
def restart_block(self, active_slots, time, u0):
"""
Helper routine to reset/restart block of (active) steps
Args:
active_slots: list of active steps
time: list of new times
u0: initial value to distribute across the steps
"""
# loop over active slots (not directly, since we need the previous entry as well)
for j in range(len(active_slots)):
# get slot number
p = active_slots[j]
# store current slot number for diagnostics
self.MS[p].status.slot = p
# store link to previous step
self.MS[p].prev = self.MS[active_slots[j - 1]]
# resets step
self.MS[p].reset_step()
# determine whether I am the first and/or last in line
self.MS[p].status.first = active_slots.index(p) == 0
self.MS[p].status.last = active_slots.index(p) == len(active_slots) - 1
# initialize step with u0
self.MS[p].init_step(u0)
# reset some values
self.MS[p].status.done = False
self.MS[p].status.iter = 0
self.MS[p].status.stage = 'SPREAD'
for l in self.MS[p].levels:
l.tag = None
l.status.sweep = 1
for p in active_slots:
for lvl in self.MS[p].levels:
lvl.status.time = time[p]
@staticmethod
def recv(target, source, tag=None):
"""
Receive function
Args:
target: level which will receive the values
source: level which initiated the send
tag: identifier to check if this message is really for me
"""
if tag is not None and source.tag != tag:
raise CommunicationError('source and target tag are not the same, got %s and %s' % (source.tag, tag))
# simply do a deepcopy of the values uend to become the new u0 at the target
target.u[0] = target.prob.dtype_u(source.uend)
# re-evaluate f on left interval boundary
target.f[0] = target.prob.eval_f(target.u[0], target.time)
@staticmethod
def send(source, tag):
"""
Send function
Args:
source: level which has the new values
tag: identifier for this message
"""
# sending here means computing uend ("one-sided communication")
source.sweep.compute_end_point()
source.tag = cp.deepcopy(tag)
def predictor(self, MS):
"""
Predictor function, extracted from the stepwise implementation (will be also used by matrix sweppers)
Args:
MS: all active steps
Returns:
all active steps
"""
# loop over all steps
for S in MS:
# restrict to coarsest level
for l in range(1, len(S.levels)):
S.transfer(source=S.levels[l - 1], target=S.levels[l])
# loop over all steps
for q in range(len(MS)):
# loop over last steps: [1,2,3,4], [2,3,4], [3,4], [4]
for p in range(q, len(MS)):
S = MS[p]
# do the sweep with new values
S.levels[-1].sweep.update_nodes()
# send updated values on coarsest level
self.logger.debug('Process %2i provides data on level %2i with tag %s -- PREDICT'
% (S.status.slot, len(S.levels) - 1, 0))
self.send(S.levels[-1], tag=(len(S.levels), 0, S.status.slot))
# loop over last steps: [2,3,4], [3,4], [4]
for p in range(q + 1, len(MS)):
S = MS[p]
# receive values sent during previous sweep
self.logger.debug('Process %2i receives from %2i on level %2i with tag %s -- PREDICT' %
(S.status.slot, S.prev.status.slot, len(S.levels) - 1, 0))
self.recv(S.levels[-1], S.prev.levels[-1], tag=(len(S.levels), 0, S.prev.status.slot))
# loop over all steps
for S in MS:
# interpolate back to finest level
for l in range(len(S.levels) - 1, 0, -1):
S.transfer(source=S.levels[l], target=S.levels[l - 1])
return MS
def pfasst(self, MS):
"""
Main function including the stages of SDC, MLSDC and PFASST (the "controller")
For the workflow of this controller, check out one of our PFASST talks
Args:
MS: all active steps
Returns:
all active steps
"""
# if all stages are the same, continue, otherwise abort
if all(S.status.stage for S in MS):
stage = MS[0].status.stage
else:
raise ControllerError('not all stages are equal')
self.logger.debug(stage)
if stage == 'SPREAD':
# (potentially) serial spreading phase
for S in MS:
# first stage: spread values
self.hooks.pre_step(step=S, level_number=0)
# call predictor from sweeper
S.levels[0].sweep.predict()
# update stage
if len(S.levels) > 1 and self.params.predict: # MLSDC or PFASST with predict
S.status.stage = 'PREDICT'
else:
S.status.stage = 'IT_CHECK'
return MS
elif stage == 'PREDICT':
# call predictor (serial)
MS = self.predictor(MS)
for S in MS:
# update stage
S.status.stage = 'IT_CHECK'
return MS
elif stage == 'IT_CHECK':
# check whether to stop iterating (parallel)
for S in MS:
# send updated values forward
if self.params.fine_comm and not S.status.last:
self.logger.debug('Process %2i provides data on level %2i with tag %s'
% (S.status.slot, 0, S.status.iter))
self.send(S.levels[0], tag=(0, S.status.iter, S.status.slot))
# # receive values
if self.params.fine_comm and not S.status.first:
self.logger.debug('Process %2i receives from %2i on level %2i with tag %s' %
(S.status.slot, S.prev.status.slot, 0, S.status.iter))
self.recv(S.levels[0], S.prev.levels[0], tag=(0, S.status.iter, S.prev.status.slot))
S.levels[0].sweep.compute_residual()
S.status.done = self.check_convergence(S)
if S.status.iter > 0:
self.hooks.post_iteration(step=S, level_number=0)
# if not everyone is ready yet, keep doing stuff
if not all(S.status.done for S in MS):
for S in MS:
S.status.done = False
# increment iteration count here (and only here)
S.status.iter += 1
self.hooks.pre_iteration(step=S, level_number=0)
if len(S.levels) > 1: # MLSDC or PFASST
S.status.stage = 'IT_UP'
else: # SDC
S.status.stage = 'IT_FINE'
else:
# if everyone is ready, end
for S in MS:
S.levels[0].sweep.compute_end_point()
self.hooks.post_step(step=S, level_number=0)
S.status.stage = 'DONE'
return MS
elif stage == 'IT_FINE':
# do fine sweep for all steps (virtually parallel)
for S in MS:
S.levels[0].status.sweep = 0
for k in range(self.nsweeps[0]):
for S in MS:
S.levels[0].status.sweep += 1
for S in MS:
# standard sweep workflow: update nodes, compute residual, log progress
self.hooks.pre_sweep(step=S, level_number=0)
S.levels[0].sweep.update_nodes()
for S in MS:
# send updated values forward
if self.params.fine_comm and not S.status.last:
self.logger.debug('Process %2i provides data on level %2i with tag %s'
% (S.status.slot, 0, S.status.iter))
self.send(S.levels[0], tag=(0, S.status.iter, S.status.slot))
# # receive values
if self.params.fine_comm and not S.status.first:
self.logger.debug('Process %2i receives from %2i on level %2i with tag %s' %
(S.status.slot, S.prev.status.slot, 0, S.status.iter))
self.recv(S.levels[0], S.prev.levels[0], tag=(0, S.status.iter, S.prev.status.slot))
S.levels[0].sweep.compute_residual()
self.hooks.post_sweep(step=S, level_number=0)
for S in MS:
# update stage
S.status.stage = 'IT_CHECK'
return MS
elif stage == 'IT_UP':
# go up the hierarchy from finest to coarsest level (parallel)
for S in MS:
S.transfer(source=S.levels[0], target=S.levels[1])
for l in range(1, self.nlevels - 1):
# sweep on middle levels (not on finest, not on coarsest, though)
for k in range(self.nsweeps[l]):
for S in MS:
self.hooks.pre_sweep(step=S, level_number=l)
S.levels[l].sweep.update_nodes()
# send updated values forward
if self.params.fine_comm and not S.status.last:
self.logger.debug('Process %2i provides data on level %2i with tag %s'
% (S.status.slot, l, S.status.iter))
self.send(S.levels[l], tag=(l, S.status.iter, S.status.slot))
# # receive values
if self.params.fine_comm and not S.status.first:
self.logger.debug('Process %2i receives from %2i on level %2i with tag %s' %
(S.status.slot, S.prev.status.slot, l, S.status.iter))
self.recv(S.levels[l], S.prev.levels[l], tag=(l, S.status.iter, S.prev.status.slot))
S.levels[l].sweep.compute_residual()
self.hooks.post_sweep(step=S, level_number=l)
for S in MS:
# transfer further up the hierarchy
S.transfer(source=S.levels[l], target=S.levels[l + 1])
for S in MS:
# update stage
S.status.stage = 'IT_COARSE'
return MS
elif stage == 'IT_COARSE':
# sweeps on coarsest level (serial/blocking)
for S in MS:
# receive from previous step (if not first)
if not S.status.first:
self.logger.debug('Process %2i receives from %2i on level %2i with tag %s' %
(S.status.slot, S.prev.status.slot, len(S.levels) - 1, S.status.iter))
self.recv(S.levels[-1], S.prev.levels[-1], tag=(len(S.levels), S.status.iter, S.prev.status.slot))
# do the sweep
self.hooks.pre_sweep(step=S, level_number=len(S.levels) - 1)
S.levels[-1].sweep.update_nodes()
S.levels[-1].sweep.compute_residual()
self.hooks.post_sweep(step=S, level_number=len(S.levels) - 1)
# send to succ step
if not S.status.last:
self.logger.debug('Process %2i provides data on level %2i with tag %s'
% (S.status.slot, len(S.levels) - 1, S.status.iter))
self.send(S.levels[-1], tag=(len(S.levels), S.status.iter, S.status.slot))
# update stage
if len(S.levels) > 1: # MLSDC or PFASST
S.status.stage = 'IT_DOWN'
else: # MSSDC
S.status.stage = 'IT_CHECK'
return MS
elif stage == 'IT_DOWN':
# prolong corrections down to finest level (parallel)
for l in range(self.nlevels - 1, 0, -1):
for S in MS:
# prolong values
S.transfer(source=S.levels[l], target=S.levels[l - 1])
# send updated values forward
if self.params.fine_comm and not S.status.last:
self.logger.debug('Process %2i provides data on level %2i with tag %s'
% (S.status.slot, l - 1, S.status.iter))
self.send(S.levels[l - 1], tag=(l - 1, S.status.iter, S.status.slot))
# # receive values
if self.params.fine_comm and not S.status.first:
self.logger.debug('Process %2i receives from %2i on level %2i with tag %s' %
(S.status.slot, S.prev.status.slot, l - 1, S.status.iter))
self.recv(S.levels[l - 1], S.prev.levels[l - 1], tag=(l - 1, S.status.iter,
S.prev.status.slot))
S.levels[l - 1].sweep.compute_residual()
# on middle levels: do communication and sweep as usual
if l - 1 > 0:
for k in range(self.nsweeps[l - 1]):
for S in MS:
self.hooks.pre_sweep(step=S, level_number=l - 1)
S.levels[l - 1].sweep.update_nodes()
# send updated values forward
if self.params.fine_comm and not S.status.last:
self.logger.debug('Process %2i provides data on level %2i with tag %s'
% (S.status.slot, l - 1, S.status.iter))
self.send(S.levels[l - 1], tag=(l - 1, S.status.iter, S.status.slot))
# # receive values
if self.params.fine_comm and not S.status.first:
self.logger.debug('Process %2i receives from %2i on level %2i with tag %s' %
(S.status.slot, S.prev.status.slot, l - 1, S.status.iter))
self.recv(S.levels[l - 1], S.prev.levels[l - 1], tag=(l - 1, S.status.iter,
S.prev.status.slot))
S.levels[l - 1].sweep.compute_residual()
self.hooks.post_sweep(step=S, level_number=l - 1)
# on finest level, first check for convergence (where we will communication, too)
for S in MS:
# update stage
S.status.stage = 'IT_FINE'
return MS
else:
raise ControllerError('Unknown stage, got %s' % stage)
| [
"[email protected]"
] | |
fc55a5be31881904c162e9a36f5926be2272163b | 930ef8a8ec0338e497be3a9475af1b5244f01dc1 | /drl_net.py | 2ae907fa51e49a14821b1db1b815e50dc6c805d8 | [] | no_license | xiaogaogaoxiao/DQN_user_grouping | 837c48c051f32d848f135bebcea3410aeba68ca7 | e694dcebacb74b1c0530adc892398616b15d0fc1 | refs/heads/main | 2023-04-17T07:46:08.182794 | 2021-04-30T15:14:42 | 2021-04-30T15:14:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,274 | py | from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from MecOpt import MecEnv
import math
import random
print(torch.__version__)
EPS_START = 0.8
EPS_END = 0.01
EPS_DECAY = 2000
steps_done = 0
class QNet(nn.Module):
def __init__(self, n_inputs, n_outputs):
hidden1 = 3 * n_outputs
hidden2 = 2 * n_outputs
super(QNet, self).__init__()
self.fc1 = nn.Linear(n_inputs, hidden1)
self.fc1.weight.data.normal_(0, 0.1)
self.fc2 = nn.Linear(hidden1, hidden2)
self.fc2.weight.data.normal_(0, 0.1)
self.fc3 = nn.Linear(hidden2, n_outputs)
self.fc3.weight.data.normal_(0, 0.1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = torch.sigmoid(self.fc2(x))
x = torch.tanh(self.fc3(x))
return x
class dqn:
def __init__(self,
n_inputs=1,
n_outputs=1,
memory_size=1,
batch_size=32,
learning_rate=1e-3,
training_interval=10,
epsilon_greedy=0.9,
gamma=0.6,
):
self.memory_low = 1000
self.state_dim = n_inputs
self.action_dim = n_outputs
self.memory_size = memory_size
self.batch_size = batch_size
self.learning_rate = learning_rate
self.training_interval = training_interval
self.epsilon_greedy = epsilon_greedy
self.gamma = gamma
self.eval_net = QNet(self.state_dim, self.action_dim)
self.target_net = QNet(self.state_dim, self.action_dim)
self.learn_step_counter = 0
self.memory_counter = 0
self.memory = np.zeros((self.memory_size, self.state_dim * 2 + 2))
self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=learning_rate)
self.criterion = nn.MSELoss()
def choose_action(self, s):
global steps_done
sample = random.random()
eps_threshold = EPS_END + (EPS_START - EPS_END) * math.exp(-1. * steps_done / EPS_DECAY)
steps_done += 1
s = Variable(torch.unsqueeze(torch.Tensor(s), 0))
if sample > eps_threshold:
action = torch.max(self.eval_net(s), 1)[1].data[0]
return action
else:
return random.randrange(self.action_dim)
def store_memory(self, s, a, r, s_):
transition = np.hstack((s, [a, r], s_))
index = self.memory_counter % self.memory_size
self.memory[index, :] = transition
self.memory_counter += 1
def learn(self):
# target net parameter update
# sample experience
# data from mini batch
if self.memory_low <= self.memory_counter < self.memory_size:
sample_index = np.random.choice(self.memory_counter, self.batch_size)
elif self.memory_counter >= self.memory_size:
sample_index = np.random.choice(self.memory_size, self.batch_size)
else:
return
if self.learn_step_counter % self.training_interval == 0:
self.target_net.load_state_dict(self.eval_net.state_dict())
self.learn_step_counter += 1
# sample experience
# data from mini batch
b_memory = self.memory[sample_index, :]
b_s = Variable(torch.FloatTensor(b_memory[:, :self.state_dim]))
b_a = Variable(torch.LongTensor(b_memory[:, self.state_dim:self.state_dim + 1].astype(int)))
b_r = Variable(torch.FloatTensor(b_memory[:, self.state_dim + 1: self.state_dim + 2]))
b_s_ = Variable(torch.FloatTensor(b_memory[:, -self.state_dim:]))
self.eval_net.eval()
self.target_net.eval()
q_eval = self.eval_net(b_s).gather(1, b_a) # shape (batch, 1)
q_next = self.target_net(b_s_).detach() # detach
q_target = b_r + self.gamma * q_next.max(1)[0].view(self.batch_size, 1) # shape (batch, 1)
loss = self.criterion(q_target, q_eval) # MSE loss
# update
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
| [
"[email protected]"
] | |
643fd19f16b4df78eeb49c578ac040f68bb0cae2 | b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1 | /tensorflow/python/kernel_tests/signal/dct_ops_test.py | 51206abed17e08efa63d4f1a13a2483bc0fb34ff | [
"Apache-2.0"
] | permissive | uve/tensorflow | e48cb29f39ed24ee27e81afd1687960682e1fbef | e08079463bf43e5963acc41da1f57e95603f8080 | refs/heads/master | 2020-11-29T11:30:40.391232 | 2020-01-11T13:43:10 | 2020-01-11T13:43:10 | 230,088,347 | 0 | 0 | Apache-2.0 | 2019-12-25T10:49:15 | 2019-12-25T10:49:14 | null | UTF-8 | Python | false | false | 7,880 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DCT operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import test_util
from tensorflow.python.ops import spectral_ops_test_util
from tensorflow.python.ops.signal import dct_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
fftpack = try_import("scipy.fftpack")
def _modify_input_for_dct(signals, n=None):
""" This is a supporting function for the numpy implementation
of DCT operations. If n < signal size, it returns the first n elements,
else it pads the signal with zeros.
"""
signal = np.array(signals)
if n is None or n == signal.shape[-1]:
signal_mod = signal
elif n >= 1:
signal_len = signal.shape[-1]
if n <= signal_len:
signal_mod = signal[..., 0:n]
else:
output_shape = list(signal.shape)
output_shape[-1] = n
signal_mod = np.zeros(output_shape)
signal_mod[..., 0:signal.shape[-1]] = signal
if n:
assert signal_mod.shape[-1] == n
return signal_mod
def _np_dct1(signals, n=None, norm=None):
"""Computes the DCT-I manually with NumPy."""
# X_k = (x_0 + (-1)**k * x_{N-1} +
# 2 * sum_{n=0}^{N-2} x_n * cos(\frac{pi}{N-1} * n * k) k=0,...,N-1
del norm
signals_mod = _modify_input_for_dct(signals, n=n)
dct_size = signals_mod.shape[-1]
dct = np.zeros_like(signals_mod)
for k in range(dct_size):
phi = np.cos(np.pi * np.arange(1, dct_size - 1) * k / (dct_size - 1))
dct[..., k] = 2 * np.sum(
signals_mod[..., 1:-1] * phi, axis=-1) + (
signals_mod[..., 0] + (-1)**k * signals_mod[..., -1])
return dct
def _np_dct2(signals, n=None, norm=None):
"""Computes the DCT-II manually with NumPy."""
# X_k = sum_{n=0}^{N-1} x_n * cos(\frac{pi}{N} * (n + 0.5) * k) k=0,...,N-1
signals_mod = _modify_input_for_dct(signals, n=n)
dct_size = signals_mod.shape[-1]
dct = np.zeros_like(signals_mod)
for k in range(dct_size):
phi = np.cos(np.pi * (np.arange(dct_size) + 0.5) * k / dct_size)
dct[..., k] = np.sum(signals_mod * phi, axis=-1)
# SciPy's `dct` has a scaling factor of 2.0 which we follow.
# https://github.com/scipy/scipy/blob/v0.15.1/scipy/fftpack/src/dct.c.src
if norm == "ortho":
# The orthonormal scaling includes a factor of 0.5 which we combine with
# the overall scaling of 2.0 to cancel.
dct[..., 0] *= np.sqrt(1.0 / dct_size)
dct[..., 1:] *= np.sqrt(2.0 / dct_size)
else:
dct *= 2.0
return dct
def _np_dct3(signals, n=None, norm=None):
"""Computes the DCT-III manually with NumPy."""
# SciPy's `dct` has a scaling factor of 2.0 which we follow.
# https://github.com/scipy/scipy/blob/v0.15.1/scipy/fftpack/src/dct.c.src
signals_mod = _modify_input_for_dct(signals, n=n)
dct_size = signals_mod.shape[-1]
signals_mod = np.array(signals_mod) # make a copy so we can modify
if norm == "ortho":
signals_mod[..., 0] *= np.sqrt(4.0 / dct_size)
signals_mod[..., 1:] *= np.sqrt(2.0 / dct_size)
else:
signals_mod *= 2.0
dct = np.zeros_like(signals_mod)
# X_k = 0.5 * x_0 +
# sum_{n=1}^{N-1} x_n * cos(\frac{pi}{N} * n * (k + 0.5)) k=0,...,N-1
half_x0 = 0.5 * signals_mod[..., 0]
for k in range(dct_size):
phi = np.cos(np.pi * np.arange(1, dct_size) * (k + 0.5) / dct_size)
dct[..., k] = half_x0 + np.sum(signals_mod[..., 1:] * phi, axis=-1)
return dct
NP_DCT = {1: _np_dct1, 2: _np_dct2, 3: _np_dct3}
NP_IDCT = {1: _np_dct1, 2: _np_dct3, 3: _np_dct2}
class DCTOpsTest(parameterized.TestCase, test.TestCase):
def _compare(self, signals, n, norm, dct_type, atol=5e-4, rtol=5e-4):
"""Compares (I)DCT to SciPy (if available) and a NumPy implementation."""
np_dct = NP_DCT[dct_type](signals, n=n, norm=norm)
tf_dct = dct_ops.dct(signals, n=n, type=dct_type, norm=norm).eval()
self.assertAllClose(np_dct, tf_dct, atol=atol, rtol=rtol)
np_idct = NP_IDCT[dct_type](signals, n=None, norm=norm)
tf_idct = dct_ops.idct(signals, type=dct_type, norm=norm).eval()
self.assertAllClose(np_idct, tf_idct, atol=atol, rtol=rtol)
if fftpack:
scipy_dct = fftpack.dct(signals, n=n, type=dct_type, norm=norm)
self.assertAllClose(scipy_dct, tf_dct, atol=atol, rtol=rtol)
scipy_idct = fftpack.idct(signals, type=dct_type, norm=norm)
self.assertAllClose(scipy_idct, tf_idct, atol=atol, rtol=rtol)
# Verify inverse(forward(s)) == s, up to a normalization factor.
# Since `n` is not implemented for IDCT operation, re-calculating tf_dct without n.
tf_dct = dct_ops.dct(signals, type=dct_type, norm=norm).eval()
tf_idct_dct = dct_ops.idct(
tf_dct, type=dct_type, norm=norm).eval()
tf_dct_idct = dct_ops.dct(
tf_idct, type=dct_type, norm=norm).eval()
if norm is None:
if dct_type == 1:
tf_idct_dct *= 0.5 / (signals.shape[-1] - 1)
tf_dct_idct *= 0.5 / (signals.shape[-1] - 1)
else:
tf_idct_dct *= 0.5 / signals.shape[-1]
tf_dct_idct *= 0.5 / signals.shape[-1]
self.assertAllClose(signals, tf_idct_dct, atol=atol, rtol=rtol)
self.assertAllClose(signals, tf_dct_idct, atol=atol, rtol=rtol)
@parameterized.parameters([
[[2]], [[3]], [[10]], [[2, 20]], [[2, 3, 25]]])
@test_util.run_deprecated_v1
def test_random(self, shape):
"""Test randomly generated batches of data."""
with spectral_ops_test_util.fft_kernel_label_map():
with self.session(use_gpu=True):
signals = np.random.rand(*shape).astype(np.float32)
n = np.random.randint(1, 2 * signals.shape[-1])
n = np.random.choice([None, n])
# Normalization not implemented for orthonormal.
self._compare(signals, n, norm=None, dct_type=1)
for norm in (None, "ortho"):
self._compare(signals, n=n, norm=norm, dct_type=2)
self._compare(signals, n=n, norm=norm, dct_type=3)
def test_error(self):
signals = np.random.rand(10)
# Unsupported type.
with self.assertRaises(ValueError):
dct_ops.dct(signals, type=5)
# Invalid n.
with self.assertRaises(ValueError):
dct_ops.dct(signals, n=-2)
# DCT-I normalization not implemented.
with self.assertRaises(ValueError):
dct_ops.dct(signals, type=1, norm="ortho")
# DCT-I requires at least two inputs.
with self.assertRaises(ValueError):
dct_ops.dct(np.random.rand(1), type=1)
# Unknown normalization.
with self.assertRaises(ValueError):
dct_ops.dct(signals, norm="bad")
with self.assertRaises(NotImplementedError):
dct_ops.dct(signals, axis=0)
if __name__ == "__main__":
test.main()
| [
"[email protected]"
] | |
cfd943a80e044add71dc7c4249a4404a20ce5e87 | cb848d0c80abb04c080155d1502d22391423c4e8 | /build_isolated/sick_ldmrs_driver/catkin_generated/pkg.develspace.context.pc.py | def09cbfec31e6271e0038e6e4a28f39cdfcd982 | [] | no_license | MTU-Autobot/catkin_ws | d8bc9b0de46befc53282b9b7e6d338a7ff7e3a0c | cf104fe048c6101f50be1b87e181d80a4be3e770 | refs/heads/master | 2020-03-13T23:14:56.276075 | 2018-04-27T18:28:01 | 2018-04-27T18:28:01 | 131,331,599 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 883 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/ubuntu/catkin_ws/devel_isolated/sick_ldmrs_driver/include;/home/ubuntu/catkin_ws/src/sick_ldmrs_laser/sick_ldmrs_driver/include;/usr/include".split(';') if "/home/ubuntu/catkin_ws/devel_isolated/sick_ldmrs_driver/include;/home/ubuntu/catkin_ws/src/sick_ldmrs_laser/sick_ldmrs_driver/include;/usr/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;sensor_msgs;diagnostic_updater;dynamic_reconfigure;pcl_conversions;sick_ldmrs_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-l:/usr/lib/aarch64-linux-gnu/libboost_system.so".split(';') if "-l:/usr/lib/aarch64-linux-gnu/libboost_system.so" != "" else []
PROJECT_NAME = "sick_ldmrs_driver"
PROJECT_SPACE_DIR = "/home/ubuntu/catkin_ws/devel_isolated/sick_ldmrs_driver"
PROJECT_VERSION = "0.0.0"
| [
"[email protected]"
] | |
0690db07264c5795d1457e10640984b025aa63e7 | 155bf47fa1b33a31576f6b8b90aaa74cd41e352a | /lianjia-spider/test/date_test.py | bdb011ebe1d659ecb20cac9fcfe8c34d272f7d4a | [] | no_license | ares5221/Python-Crawler-Projects | af4ec40a26f4f69ef285a0edf0428192a594d4cd | 45b496000631f0f3b887501d9d67f3e24f5e6186 | refs/heads/master | 2021-07-03T07:11:25.474055 | 2020-09-08T08:17:17 | 2020-09-08T08:17:17 | 145,980,513 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | #!/usr/bin/env python
# coding=utf-8
# author: zengyuetian
import unittest
from lib.utility.date import *
class DateTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_time_string(self):
self.assertEqual(len(get_time_string()), 14)
def test_date_string(self):
self.assertEqual(len(get_date_string()), 8)
def test_year_string(self):
self.assertEqual(len(get_year_month_string()), 6)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
609229cf48ea3f2d2ea42efbf2d6709292827d98 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/ALTIGA-GENERAL-STATS-MIB.py | c3f350b52f7400da40c42fb0517a6ca6a440ccd2 | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 4,919 | py | #
# PySNMP MIB module ALTIGA-GENERAL-STATS-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ALTIGA-GENERAL-STATS-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:05:40 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
alGeneralMibModule, = mibBuilder.importSymbols("ALTIGA-GLOBAL-REG", "alGeneralMibModule")
alGeneralGroup, alStatsGeneral = mibBuilder.importSymbols("ALTIGA-MIB", "alGeneralGroup", "alStatsGeneral")
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection")
ModuleCompliance, NotificationGroup, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup", "ObjectGroup")
Counter32, iso, Integer32, Counter64, Gauge32, Unsigned32, ModuleIdentity, IpAddress, Bits, NotificationType, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "iso", "Integer32", "Counter64", "Gauge32", "Unsigned32", "ModuleIdentity", "IpAddress", "Bits", "NotificationType", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "TimeTicks")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
altigaGeneralStatsMibModule = ModuleIdentity((1, 3, 6, 1, 4, 1, 3076, 1, 1, 30, 2))
altigaGeneralStatsMibModule.setRevisions(('2002-09-11 13:00', '2002-07-10 00:00',))
if mibBuilder.loadTexts: altigaGeneralStatsMibModule.setLastUpdated('200209111300Z')
if mibBuilder.loadTexts: altigaGeneralStatsMibModule.setOrganization('Cisco Systems, Inc.')
alStatsGeneralGlobal = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 2, 1, 2, 25, 1))
alGeneralTime = MibScalar((1, 3, 6, 1, 4, 1, 3076, 2, 1, 2, 25, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: alGeneralTime.setStatus('current')
alGeneralGaugeCpuUtil = MibScalar((1, 3, 6, 1, 4, 1, 3076, 2, 1, 2, 25, 1, 2), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: alGeneralGaugeCpuUtil.setStatus('current')
alGeneralGaugeActiveSessions = MibScalar((1, 3, 6, 1, 4, 1, 3076, 2, 1, 2, 25, 1, 3), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: alGeneralGaugeActiveSessions.setStatus('current')
alGeneralGaugeThroughput = MibScalar((1, 3, 6, 1, 4, 1, 3076, 2, 1, 2, 25, 1, 4), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: alGeneralGaugeThroughput.setStatus('current')
alGeneralTimeZone = MibScalar((1, 3, 6, 1, 4, 1, 3076, 2, 1, 2, 25, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: alGeneralTimeZone.setStatus('current')
altigaGeneralStatsMibConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 30, 2, 1))
altigaGeneralStatsMibCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 30, 2, 1, 1))
altigaGeneralStatsMibCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 3076, 1, 1, 30, 2, 1, 1, 1)).setObjects(("ALTIGA-GENERAL-STATS-MIB", "altigaGeneralStatsGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
altigaGeneralStatsMibCompliance = altigaGeneralStatsMibCompliance.setStatus('current')
altigaGeneralStatsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 3076, 2, 1, 1, 1, 25, 2)).setObjects(("ALTIGA-GENERAL-STATS-MIB", "alGeneralTime"), ("ALTIGA-GENERAL-STATS-MIB", "alGeneralGaugeCpuUtil"), ("ALTIGA-GENERAL-STATS-MIB", "alGeneralGaugeActiveSessions"), ("ALTIGA-GENERAL-STATS-MIB", "alGeneralGaugeThroughput"), ("ALTIGA-GENERAL-STATS-MIB", "alGeneralTimeZone"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
altigaGeneralStatsGroup = altigaGeneralStatsGroup.setStatus('current')
mibBuilder.exportSymbols("ALTIGA-GENERAL-STATS-MIB", alGeneralTimeZone=alGeneralTimeZone, altigaGeneralStatsGroup=altigaGeneralStatsGroup, alStatsGeneralGlobal=alStatsGeneralGlobal, alGeneralGaugeActiveSessions=alGeneralGaugeActiveSessions, alGeneralGaugeThroughput=alGeneralGaugeThroughput, altigaGeneralStatsMibModule=altigaGeneralStatsMibModule, altigaGeneralStatsMibConformance=altigaGeneralStatsMibConformance, alGeneralGaugeCpuUtil=alGeneralGaugeCpuUtil, altigaGeneralStatsMibCompliances=altigaGeneralStatsMibCompliances, altigaGeneralStatsMibCompliance=altigaGeneralStatsMibCompliance, PYSNMP_MODULE_ID=altigaGeneralStatsMibModule, alGeneralTime=alGeneralTime)
| [
"[email protected]"
] | |
1f7e2e64977bf40382acf2fc8b836b554e487eb3 | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_16573.py | 921fc4365ab95e5dd3c06d397032235af117c2e5 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,842 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((518.552, 629.65, 574.573), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((505.012, 570.576, 547.731), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((501.147, 499.211, 506.167), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((626.574, 556.372, 531.081), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((448.028, 337.515, 409.475), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((510.303, 594.831, 545.061), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((510.64, 596.869, 544.861), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((490.47, 599.218, 525.22), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((473.965, 602.545, 502.571), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((481.78, 611.486, 476.997), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((501.673, 626.876, 463.966), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((516.455, 650.39, 471.291), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((502.922, 608.574, 568.927), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((523.23, 696.873, 375.944), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((455.251, 509.78, 340.901), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((455.251, 509.78, 340.901), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((468.957, 518.96, 363.679), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((472.979, 518.321, 391.487), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((479.056, 528.906, 416.808), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((477.501, 539.29, 443.131), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((472.849, 550.561, 468.848), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((476.865, 561.772, 495.01), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((539.957, 603.04, 248.392), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((422.412, 529.415, 747.31), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((462.708, 525.179, 502.959), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((462.708, 525.179, 502.959), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((490.601, 530.497, 497.558), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((519.432, 525.731, 500.402), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((535.327, 503.856, 510.345), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((567.852, 599.712, 583.659), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((505.878, 403.164, 442.652), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((532.558, 573.259, 553.322), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((532.966, 573.314, 553.381), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((550.504, 565.207, 532.397), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((551.617, 569.751, 504.257), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((534.831, 586.458, 488.461), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((514.669, 606.854, 491.924), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((500.54, 631.605, 497.225), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((485.792, 651.153, 482.357), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((437.984, 596.805, 527.398), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((535.785, 704.79, 433.897), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((445.351, 559.719, 553.907), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((472.367, 549.859, 549.918), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((526.2, 529.426, 539.588), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((577.156, 509.203, 526.697), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((592.253, 541.985, 598.089), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((632.387, 458.157, 455.836), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((538.548, 599.818, 519.562), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((521.475, 576.912, 518.611), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((508.234, 551.196, 520.098), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((494.284, 527.697, 530.856), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((494.721, 499.814, 539.922), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((492.604, 470.497, 540.317), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((494.272, 547.923, 557.013), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((491.083, 388.156, 526.611), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"[email protected]"
] | |
02bdf2ff0b549bdfb9f180710387a1f670c585c1 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/466/usersdata/283/111275/submittedfiles/Av2_Parte2.py | 37c67dbe12d8064c666d9cb7468d46f05bb3de9c | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | # -*- coding: utf-8 -*-
a=[]
b=[]
c=[]
n=int(input('Digite o número de elementos: '))
while n<=0:
print('Número inválido!')
n=int(input('Digite o número de elemento: '))
for i in range(0,n,1):
a.append(input('Digite um elemento para a: '))
for j in range(0,n,1):
b.append(input('Digite um elemento para b: '))
for k in range(0,n,1):
c.append(input('Digite um elemento para c: '))
g=[]
o=[]
for l in range(0,n,1):
if (l+1) == n:
break
if a[l]<a[l+1]
g.append(a[l])
g.append(a[len(a)-1])
print(g)
| [
"[email protected]"
] | |
4546480635d8c354c4fef52bcf2e215e44eef81b | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /055_modules/001_modules/examples/Python 3 Most Nessesary/12. Listing 12.1. Checking the module startup method.py | 53ea34d3d155e5bb12d3c7369668efe347fb7bd5 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 143 | py | if __name__ == "__main__":
print("Это главная программа")
else:
print("Импортированный модуль") | [
"[email protected]"
] | |
3b436ade09c46670b26faecdb2da74694f10439e | 058c258ecb9d456dce6dc9ff41d9d2c9e5a5c489 | /view/plat/Content.py | c46abb112ef988737d99b17d3bb343e70441c33e | [] | no_license | lukoou3/Toolbox | 7f64f49ab5b24e8ff3a7334a794a1ef8be520dc0 | d23c1531adc4b03c8df043e05daa6dec4f3afaa9 | refs/heads/master | 2020-07-26T22:55:00.141722 | 2020-03-20T03:35:37 | 2020-03-20T03:35:37 | 208,787,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,312 | py | from PyQt5.QtWidgets import QTabWidget
from view.content.DbTablesWidget import DbTablesWidget
from view.content.FileRenameWidget import FileRenameWidget
from view.content.JsonParseWidget import JsonParseWidget
from view.content.MarkdownWidget import MarkdownWidget
from view.content.SqlParseWidget import SqlParseWidget
from view.content.DbTableWidget import DbTableWidget
from view.content.StrMapReduceWidget import StrMapReduceWidget
from view.content.TransformWidget import TransformWidget
class Content(QTabWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.menuMap = {}
self.initUI()
def initUI(self):
"""http://www.jsons.cn/unicode/"""
self.setContentsMargins(0, 0, 0, 0)
self.tabBar().hide()
str_mapreduce_widget = StrMapReduceWidget()
self.menuMap["str_mapreduce_widget"] = str_mapreduce_widget
self.addTab(str_mapreduce_widget, "")
str_json_widget = JsonParseWidget()
self.menuMap["str_json_widget"] = str_json_widget
self.addTab(str_json_widget, "")
str_sql_widget = SqlParseWidget()
self.menuMap["str_sql_widget"] = str_sql_widget
self.addTab(str_sql_widget, "")
str_transform_widget = TransformWidget()
self.menuMap["str_transform_widget"] = str_transform_widget
self.addTab(str_transform_widget, "")
str_markdown_widget = MarkdownWidget()
self.menuMap["str_markdown_widget"] = str_markdown_widget
self.addTab(str_markdown_widget, "")
file_rename_widget = FileRenameWidget()
self.menuMap["file_rename_widget"] = file_rename_widget
self.addTab(file_rename_widget, "")
db_tables_widget = DbTablesWidget()
self.menuMap["db_tables_widget"] = db_tables_widget
self.addTab(db_tables_widget, "")
# db_table_widget = DbTableWidget()
# self.menuMap["db_table_widget"] = db_table_widget
# self.addTab(db_table_widget, "")
self.setCurrentIndex(0)
def setCurrentWidgetByMenu(self, menu):
widget = self.menuMap.get(menu.get("contentWidget", "str_mapreduce_widget"))
self.setCurrentWidget(widget)
loadData = getattr(widget, "loadData", None)
if callable(loadData):
loadData() | [
"[email protected]"
] | |
313a00b61f3722dff02dbad8119a1b9e42205264 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02715/s675628627.py | 35d438a031aa3325f3538e0e2d2ff10f00f4b32d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | N, K = map(int, input().split())
mod = 10 ** 9 + 7
G = [1] * (K + 1) #そのindexを最大公約数にもつ数列の数
ans = 0
for k in range(K, 0, -1):
x = K // k
t = int(pow(x, N, mod))
for j in range(x - 1):
t -= G[(j + 2) * k]
G[k] = t
ans += t * k
ans %= mod
print(ans)
| [
"[email protected]"
] | |
3ecae40e32b5b7054eba8fd90a4dc60f9c611a72 | 9a358fbd62eaed4ef96c7a0c607322e11aa7d3bf | /api/com_dayoung_api/cop/act/model/actor_ai.py | c54bc66f60fa2e0f084ebbe04e5208998db8dea6 | [] | no_license | ysk1026/project_dayoungi | 2b8a8cb569f1687024a00e7f3a3af6501aa67fb1 | cecb3a42496164b84ece1912932fe58de8537e46 | refs/heads/master | 2023-01-20T14:38:17.898499 | 2020-11-28T08:25:06 | 2020-11-28T08:25:06 | 311,549,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,101 | py | from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import export_graphviz
# pip install sklearn
# conda install python-graphviz
import pydotplus # pip install pydotplus
from IPython.core.display import Image
from IPython.display import display
# pip install Ipython
# conda install -c anaconda ipython
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
import pandas as pd
import numpy as np
from sklearn import tree
from sklearn import metrics
from six import StringIO
import os, sys
# PATH = r'C:/Program Files/Graphviz 2.44.1/bin'
# os.environ["PATH"] += os.pathsep+ PATH
class ActorAi:
def __init__(self):
...
def train_actors(self):
df = self.bring_dfo() # shape: (340, 10)
df = df[df['state'] == 1] # 현재 보이는 배우들만 확인
# df = df.head()
# print(df)
# age name real_name religion agency spouse children debut_year gender state
# 0 50 이병헌 no real name 불교 BH엔터테인먼트 이민정 이준후(아들) 1991 m 1
# 1 39 전지현 왕지현(王智賢) no religion 문화창고 최준혁 2남 1997 f 1
# 2 38 손예진 손언진 no religion 엠에스팀엔터테인먼트 no spouse no child 1999 f 1
# 3 28 안소희 no real name 불교 BH엔터테인먼트 no spouse no child 2004 f 1
# 4 39 강동원 no real name 무신론[1] YG 엔터테인먼트 no spouse no child 2003 m 1
# print(df.columns.values.tolist())
# ['age', 'name', 'real_name', 'religion', 'agency', 'spouse', 'children','debut_year', 'gender', 'state']
# 총 9개의 column 이지만 8개의 질문만 하면 됨
# 처음부터 state 는 1인걸 알고 있음
# 1st Question: 남자 입니까?
# 2nd Question: 자녀가 있습니까?
# 3rd Question: 배우자가 있습니까?
# 4th Question: 소속사가 관련 ->
# 5th Question: 종교 관련 ->
# 6th Question: 본명으로 활동 하나요?
# 7th Question: 나이가 어떻게 됩니까?
# 8th Question: 데뷔년도가 어떻게 됩니까?
# x = df['age', 'real_name', 'religion', 'agency', 'spouse', 'children','debut_year', 'gender', 'state']
# print(x)
print("-----------------------------------")
y_train = df.filter(["name"]) # 구할 것 Output
X_train = df.filter(['act_id','age', 'real_name', 'religion', 'agency', 'spouse', 'children','debut_year', 'gender', 'state'])
print("**************************************")
print(y_train)
print(X_train)
y_test = y_train
# 모르는 것을 예측 하는 것이 아니기 때문에 pred 에 train_set 과 같은 value
# 예상 100퍼 맞춤
print("-----------------------------------------------------------------------")
for set_max_depth in range(1,15):
set_random_state = 0
clf = tree.DecisionTreeClassifier(criterion = 'entropy', max_depth=set_max_depth, random_state=set_random_state)
clf.fit(X_train,y_train)
y_pred = clf.predict(X_train)
print("Accuracy :", metrics.accuracy_score(y_test, y_pred))
print("raondom state: ", set_random_state)
print("Max Depth: ", set_max_depth)
print("-----------------------------------------------------------------------")
dot_data = StringIO()
tree.export_graphviz(clf, out_file=dot_data)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
Image(graph.write_png("max_depth{}.png".format(set_max_depth))) # png file 생성
# ---------------------------------------------------------------------------------------------
# Actor ID 를 Drop 했을때
# 총 9개의 컬럼이 있기 때문에 max_depth 가 9 개면 100퍼 가까이 나올거라 예상.
# Accuracy : 0.9766763848396501
# raondom state: 0
# Max Depth: 9
# 내 예상으로는 100프로 나올거라 생각했지만 나오지 않았음
# 배우 수 = 343
# 343 * 0.9766763848396501 = 335
# 343 - 335 = 8명의 데이터가 겹치는 것을 알 수 있음!
# Actor ID 를 Drop "안" 했을 때
# dot_data = StringIO(Accuracy : 1.0
# raondom state: 0
# Max Depth: 9
# ----------------------------------------------------------------------
# 하지만 Actor ID 는 유저는 모르기 때문에 아무 의미 없음.
# 실제 이용할 데이터셋은 Drop Actor ID
def bring_dfo(self):
df = pd.read_csv("./data/actors2.csv")
# print(df.shape) # (340, 13) 13개중 두개의 컬럼은 actor_id 와 photo url 이기 때문에 필요 없음), 그래서 두개를 drop 하겠음
# 더해서 index 도 필요 없으니 삭제 하겠음
# print(df.columns)
# Index(['Unnamed: 0', 'photo_url', 'age', 'act_id', 'name', 'real_name',
# 'religion', 'agency', 'spouse', 'children', 'debut_year', 'gender',
# 'state'], dtype='object')
df = df.drop('photo_url',1) # 0 means to drop rows, 1 means drop columns
df = df.drop('act_id',1)
# print(df.shape) # (340, 10)
return df
if __name__ == "__main__":
ai = ActorAi()
# df = pd.read_csv("./data/actors2.csv")
# df = df.drop('photo_url',1) # 0 means to drop rows, 1 means drop columns
# df = df.drop('act_id',1)
# df = df[df['state'] == 1]
# print(df)
ai.train_actors() | [
"[email protected]"
] | |
c7f06138cb8e969387fdcd3d5ab3508c3ed9bf9d | 297b5e4e39fe9d5add2face0e246cd5317caa005 | /tests/settings.py | 544d69eb06f49d49e7af36cc41efd90486f0828c | [
"MIT"
] | permissive | alexdlaird/django-maintenance-mode | b2073971062839c5ee8c9fe5a65f99206a250a83 | b71d7d2d0c9a7af3f81e23c40b2010b9413aba79 | refs/heads/master | 2021-08-29T20:18:55.602310 | 2017-10-18T13:52:20 | 2017-10-18T13:52:20 | 114,284,763 | 0 | 0 | null | 2017-12-14T18:45:03 | 2017-12-14T18:45:03 | null | UTF-8 | Python | false | false | 1,486 | py | # -*- coding: utf-8 -*-
import django
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'django-maintenance-mode'
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'maintenance_mode',
]
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'maintenance_mode.middleware.MaintenanceModeMiddleware',
]
ROOT_URLCONF = 'tests.urls'
if django.VERSION < (1, 8):
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
'maintenance_mode.context_processors.maintenance_mode',
)
else:
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'maintenance_mode.context_processors.maintenance_mode',
],
},
},
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
| [
"[email protected]"
] | |
961aa82417237ecf10d6d5d56faa8015967b798a | 1dfba6d8c60a534d6bdeb985697fba913da5fe9b | /src/mcedit2/rendering/loadablechunks.py | 39ce8cd7239bb07b73d8222a3f8fb9d0aecc2e1f | [
"BSD-3-Clause"
] | permissive | shipbiulder101/mcedit2 | 2d88a6933bac3010f5bedcdd65d542587841a19f | 44179472b7834c803da243a82d731f9ef555764d | refs/heads/master | 2021-01-12T21:52:56.581572 | 2015-10-20T21:30:34 | 2015-10-20T21:30:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,704 | py | """
${NAME}
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import defaultdict
import logging
import numpy
from OpenGL import GL
from mcedit2.rendering.scenegraph import scenenode, rendernode
from mcedit2.util.glutils import Texture, gl
from mcedit2.rendering.depths import DepthOffset
log = logging.getLogger(__name__)
log.info("Making checkerboard texture...")
color0 = (0xff, 0xff, 0xff, 0x22)
color1 = (0xff, 0xff, 0xff, 0x44)
floorTexImage = numpy.array([color0, color1, color1, color0], dtype='uint8')
class LoadableChunksRenderNode(rendernode.RenderNode):
floorTexture = None
def compile(self):
if self.floorTexture is None:
self.floorTexture = Texture(image=floorTexImage, width=2, height=2,
minFilter=GL.GL_NEAREST,
magFilter=GL.GL_NEAREST,
)
self.floorTexture.load()
super(LoadableChunksRenderNode, self).compile()
def drawSelf(self):
with gl.glPushAttrib(GL.GL_FOG_BIT | GL.GL_ENABLE_BIT):
GL.glDisable(GL.GL_FOG)
GL.glEnable(GL.GL_BLEND)
GL.glEnable(GL.GL_POLYGON_OFFSET_FILL)
GL.glPolygonOffset(DepthOffset.ChunkMarkers, DepthOffset.ChunkMarkers)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glEnableClientState(GL.GL_TEXTURE_COORD_ARRAY)
GL.glEnable(GL.GL_TEXTURE_2D)
GL.glColor(1.0, 1.0, 1.0, 1.0)
self.floorTexture.bind()
for vertexArray in self.sceneNode.createVertexArrays():
GL.glVertexPointer(3, GL.GL_FLOAT, 0, vertexArray.ravel())
# chunkPositions *= 8
GL.glTexCoordPointer(2, GL.GL_FLOAT, 0, (vertexArray[..., (0, 2)] / 32).ravel())
GL.glDrawArrays(GL.GL_QUADS, 0, len(vertexArray) * 4)
GL.glDisableClientState(GL.GL_TEXTURE_COORD_ARRAY)
class LoadableChunksNode(scenenode.Node):
skipLargeLevels = False
RenderNodeClass = LoadableChunksRenderNode
def __init__(self, dimension):
super(LoadableChunksNode, self).__init__()
self.dimension = dimension
# if self.skipLargeLevels: # and hasattr(self.dimension.worldEditor, 'worldFolder'):
# try:
# p = self.dimension.worldEditor.adapter.selectedRevision.getFolderPath('region')
# if len(os.listdir(p)) > 50: # 50 * 1024 chunks
# return
#
# except AttributeError:
# log.exception("Don't know how to count region files in %s", self.dimension)
# raise
def createVertexArrays(self):
if self.dimension.chunkCount:
chunkSet = set(self.dimension.chunkPositions())
sizedChunks = chunkMarkers(chunkSet)
def arrays():
for size, chunks in sizedChunks.iteritems():
if not len(chunks):
continue
chunks = numpy.array(chunks, dtype='float32')
chunkPositions = numpy.zeros(shape=(chunks.shape[0], 4, 3), dtype='float32')
chunkPositions[:, :, (0, 2)] = numpy.array(((0, 0), (0, 1), (1, 1), (1, 0)), dtype='float32')
chunkPositions[:, :, (0, 2)] *= size
chunkPositions[:, :, (0, 2)] += chunks[:, numpy.newaxis, :]
chunkPositions *= 16
yield chunkPositions
return list(arrays())
def chunkMarkers(chunkSet):
""" Returns a mapping { size: [position, ...] } for different powers of 2
as size.
"""
sizedChunks = defaultdict(list)
size = 1
def all4(cx, cz):
cx &= ~size
cz &= ~size
return [(cx, cz), (cx + size, cz), (cx + size, cz + size), (cx, cz + size)]
# lastsize = 6
size = 1
while True:
nextsize = size << 1
chunkSet = set(chunkSet)
while len(chunkSet):
cx, cz = chunkSet.pop()
chunkSet.add((cx, cz))
o = all4(cx, cz)
others = set(o).intersection(chunkSet)
if len(others) == 4:
sizedChunks[nextsize].append(o[0])
for c in others:
chunkSet.discard(c)
else:
for c in others:
sizedChunks[size].append(c)
chunkSet.discard(c)
if len(sizedChunks[nextsize]):
chunkSet = set(sizedChunks[nextsize])
sizedChunks[nextsize] = []
size <<= 1
else:
break
return sizedChunks
| [
"[email protected]"
] | |
6eab8917304d95312065f0cf0e49b6057e96f5c3 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/131/usersdata/232/37596/submittedfiles/al10.py | 4832cdecb96c52b8bdafae42640590571de174b0 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | # -*- coding: utf-8 -*-
#NÃO APAGUE A LINHA ACIMA. COMECE ABAIXO DESTA LINHA
n=int(input('Digite o número de termos a ser calculado: '))
i=2
pi=1
for i in range (1,n+1,1):
pi=pi*(i/(i-1))*(i/(i+1))
pi=pi*2
print('%.5f'%pi)
| [
"[email protected]"
] | |
05ad692df50100b660ac54b791457f586c290261 | a209ce9617d2e135954d1e713b66540c252e3ea6 | /myvenv/bin/easy_install-3.8 | 0f5e416a38cdba8d6b743a9eb2177223b5a34e2a | [] | no_license | margaux-byte/mon-nouveau-blog | cff654eb216cb31180348056a483b6f50c7b206c | c16ff0300377ec7a450181c8c61b12a3096560b9 | refs/heads/master | 2020-08-22T10:05:46.031358 | 2019-10-20T13:24:54 | 2019-10-20T13:24:54 | 216,371,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | 8 | #!/Users/carlamoltosylvander/Documents/djangogirls/myvenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | |
5c72c3a1d0abf844c2e1fb52ff54d7df6d7b1685 | 4d98ac51b576e1d104cec50ecb510202b3f1fdaa | /pkg_config/__main__.py | f07c587299aaaed79dcbde454fb37c673d990455 | [] | no_license | cournape/pkg-config | 8b0ef687a4e0888d905d3eeb3fe56dd8e618a38c | ac7a6e61140b2cc588b514d02c62bdc401f41d73 | refs/heads/master | 2021-01-22T02:13:02.314974 | 2017-02-06T00:14:20 | 2017-02-06T00:14:20 | 81,031,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,384 | py | from __future__ import print_function
import argparse
import sys
from pkg_config.errors import PCFileNotFound
from pkg_config._commands import find_pc_file, list_all
from pkg_config._models import PackageInfo
VERSION = "0.0.1"
SEARCH_DIRECTORIES = [
"/usr/local/lib/pkgconfig",
"/usr/local/share/pkgconfig",
"/usr/lib/pkgconfig",
"/usr/local/Homebrew/Library/Homebrew/os/mac/pkgconfig/10.11",
]
def main(argv=None):
argv = argv or sys.argv[1:]
parser = argparse.ArgumentParser(
description=u"pkg-config reimplementation in python.")
parser.add_argument(
u"--cflags", help=u"output all pre-processor and compiler flags",
action="store_true"
)
parser.add_argument(
u"--libs", help=u"output all linker flags", action="store_true"
)
parser.add_argument(
u"--list-all", help=u"list all known packages", action="store_true"
)
parser.add_argument(u"--modversion", action="store_true")
parser.add_argument(
u"--print-requires-private", action="store_true",
)
parser.add_argument(
u"--version", help=u"Print version and exits", action="store_true"
)
parser.add_argument(u"pc_file", nargs="?")
namespace = parser.parse_args(argv)
if namespace.version:
print(VERSION)
sys.exit(0)
if namespace.list_all:
list_all(SEARCH_DIRECTORIES)
sys.exit(0)
if namespace.pc_file is None:
print(u"Must specify package names on the command line")
sys.exit(0)
try:
p = find_pc_file(SEARCH_DIRECTORIES, namespace.pc_file)
except PCFileNotFound:
print(
u"Package tls was not found in the pkg-config search path.\n"
"Perhaps you should add the directory containing `{0}.pc'\n"
"to the PKG_CONFIG_PATH environment variable\n"
"No package '{0}' found".format(namespace.pc_file)
)
sys.exit(1)
pkg_info = PackageInfo.from_path(p)
if namespace.cflags:
print(pkg_info.cflags)
sys.exit(0)
if namespace.libs:
print(pkg_info.libs)
sys.exit(0)
if namespace.modversion:
print(pkg_info.version)
sys.exit(0)
if namespace.print_requires_private:
print("\n".join(pkg_info.requires_private))
sys.exit(0)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
1dc36b5a99eb162fef96d10ca19cd0b9a53582e1 | d7016f69993570a1c55974582cda899ff70907ec | /sdk/rdbms/azure-mgmt-rdbms/azure/mgmt/rdbms/mysql/aio/operations/_server_security_alert_policies_operations.py | 10f2a6ad01572a9d53d8f1a0b278ec84e27675ad | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 21,543 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._server_security_alert_policies_operations import (
build_create_or_update_request,
build_get_request,
build_list_by_server_request,
)
from .._vendor import MySQLManagementClientMixinABC
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ServerSecurityAlertPoliciesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.rdbms.mysql.aio.MySQLManagementClient`'s
:attr:`server_security_alert_policies` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def get(
self,
resource_group_name: str,
server_name: str,
security_alert_policy_name: Union[str, _models.SecurityAlertPolicyName],
**kwargs: Any
) -> _models.ServerSecurityAlertPolicy:
"""Get a server's security alert policy.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param security_alert_policy_name: The name of the security alert policy. "Default" Required.
:type security_alert_policy_name: str or ~azure.mgmt.rdbms.mysql.models.SecurityAlertPolicyName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServerSecurityAlertPolicy or the result of cls(response)
:rtype: ~azure.mgmt.rdbms.mysql.models.ServerSecurityAlertPolicy
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-12-01")) # type: Literal["2017-12-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ServerSecurityAlertPolicy]
request = build_get_request(
resource_group_name=resource_group_name,
server_name=server_name,
security_alert_policy_name=security_alert_policy_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ServerSecurityAlertPolicy", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/servers/{serverName}/securityAlertPolicies/{securityAlertPolicyName}"} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
server_name: str,
security_alert_policy_name: Union[str, _models.SecurityAlertPolicyName],
parameters: Union[_models.ServerSecurityAlertPolicy, IO],
**kwargs: Any
) -> Optional[_models.ServerSecurityAlertPolicy]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-12-01")) # type: Literal["2017-12-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.ServerSecurityAlertPolicy]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "ServerSecurityAlertPolicy")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
server_name=server_name,
security_alert_policy_name=security_alert_policy_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("ServerSecurityAlertPolicy", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/servers/{serverName}/securityAlertPolicies/{securityAlertPolicyName}"} # type: ignore
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
server_name: str,
security_alert_policy_name: Union[str, _models.SecurityAlertPolicyName],
parameters: _models.ServerSecurityAlertPolicy,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.ServerSecurityAlertPolicy]:
"""Creates or updates a threat detection policy.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param security_alert_policy_name: The name of the threat detection policy. "Default" Required.
:type security_alert_policy_name: str or ~azure.mgmt.rdbms.mysql.models.SecurityAlertPolicyName
:param parameters: The server security alert policy. Required.
:type parameters: ~azure.mgmt.rdbms.mysql.models.ServerSecurityAlertPolicy
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ServerSecurityAlertPolicy or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.rdbms.mysql.models.ServerSecurityAlertPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
server_name: str,
security_alert_policy_name: Union[str, _models.SecurityAlertPolicyName],
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.ServerSecurityAlertPolicy]:
"""Creates or updates a threat detection policy.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param security_alert_policy_name: The name of the threat detection policy. "Default" Required.
:type security_alert_policy_name: str or ~azure.mgmt.rdbms.mysql.models.SecurityAlertPolicyName
:param parameters: The server security alert policy. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ServerSecurityAlertPolicy or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.rdbms.mysql.models.ServerSecurityAlertPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
server_name: str,
security_alert_policy_name: Union[str, _models.SecurityAlertPolicyName],
parameters: Union[_models.ServerSecurityAlertPolicy, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.ServerSecurityAlertPolicy]:
"""Creates or updates a threat detection policy.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param security_alert_policy_name: The name of the threat detection policy. "Default" Required.
:type security_alert_policy_name: str or ~azure.mgmt.rdbms.mysql.models.SecurityAlertPolicyName
:param parameters: The server security alert policy. Is either a model type or a IO type.
Required.
:type parameters: ~azure.mgmt.rdbms.mysql.models.ServerSecurityAlertPolicy or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ServerSecurityAlertPolicy or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.rdbms.mysql.models.ServerSecurityAlertPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-12-01")) # type: Literal["2017-12-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ServerSecurityAlertPolicy]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial( # type: ignore
resource_group_name=resource_group_name,
server_name=server_name,
security_alert_policy_name=security_alert_policy_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("ServerSecurityAlertPolicy", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/servers/{serverName}/securityAlertPolicies/{securityAlertPolicyName}"} # type: ignore
@distributed_trace
def list_by_server(
self, resource_group_name: str, server_name: str, **kwargs: Any
) -> AsyncIterable["_models.ServerSecurityAlertPolicy"]:
"""Get the server's threat detection policies.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServerSecurityAlertPolicy or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.rdbms.mysql.models.ServerSecurityAlertPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-12-01")) # type: Literal["2017-12-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ServerSecurityAlertPolicyListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_server_request(
resource_group_name=resource_group_name,
server_name=server_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_server.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ServerSecurityAlertPolicyListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_server.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/servers/{serverName}/securityAlertPolicies"} # type: ignore
| [
"[email protected]"
] | |
b069a9412f83db8f978c0847ed1620c7df76136a | eefb06b0d8c8c98c1e9cfc4c3852d5c453eb5429 | /data/input/aldebaran/qibuild/python/qisrc/snapshot.py | f59cbcb2a82c4aea7f2a376ac8bec88db32698d8 | [] | no_license | bopopescu/pythonanalyzer | db839453bde13bf9157b76e54735f11c2262593a | 8390a0139137574ab237b3ff5fe8ea61e8a0b76b | refs/heads/master | 2022-11-22T02:13:52.949119 | 2019-05-07T18:42:52 | 2019-05-07T18:42:52 | 282,079,884 | 0 | 0 | null | 2020-07-23T23:46:09 | 2020-07-23T23:46:08 | null | UTF-8 | Python | false | false | 4,166 | py | ## Copyright (c) 2012-2016 Aldebaran Robotics. All rights reserved.
## Use of this source code is governed by a BSD-style license that can be
## found in the COPYING file.
"""Functions to generate and load snapshot."""
import collections
import json
from qisys import ui
import qisys.error
import qisrc.git
import qisrc.status
import qisrc.reset
import qisrc.sync
class Snapshot(object):
""" Just a container for a git worktree snapshot """
def __init__(self):
self.refs = collections.OrderedDict()
self.manifest = qisrc.sync.LocalManifest()
self.format_version = None
def dump(self, output_path, deprecated_format=True):
""" Dump the snapshot into a human readable file """
if deprecated_format:
self._dump_deprecated(output_path)
else:
self._dump_json(output_path)
def _dump_deprecated(self, output_path):
srcs = self.refs.keys()
with open(output_path, 'w') as fp:
for src in srcs:
fp.write(src + ":" + self.refs[src] + "\n")
def _dump_json(self, output_path):
with open(output_path, "w") as fp:
serializable_manifest = dict()
serializable_manifest["url"] = self.manifest.url
serializable_manifest["branch"] = self.manifest.branch
serializable_manifest["groups"] = self.manifest.groups
if self.manifest.ref:
serializable_manifest["ref"] = self.manifest.ref
to_dump = {
"format" : 2,
"manifest" : serializable_manifest,
"refs" : self.refs
}
json.dump(to_dump, fp, indent=2)
def load(self, source):
""" Load a snapshot from a file path or a file object """
# Try to open, else assume it's a file object
try:
fp = open(source, "r")
data = fp.read()
except TypeError:
data = source.read()
try:
# Load JSON into an OrderedDict
parsed = json.loads(data, object_pairs_hook=collections.OrderedDict)
self._load_json(parsed)
except ValueError:
self._load_deprecated(data)
try:
source.close()
except AttributeError:
pass
def _load_deprecated(self, source):
for line in source.splitlines():
try:
(src, sha1) = line.split(":")
except ValueError:
ui.error("could not parse", line)
continue
src = src.strip()
sha1 = sha1.strip()
self.refs[src] = sha1
def _load_json(self, parsed_json):
self.format_version = parsed_json["format"]
if self.format_version == 1:
manifest_json = parsed_json["manifests"]["default"]
elif self.format_version == 2:
manifest_json = parsed_json["manifest"]
else:
raise qisys.error.Error(
"unknown format: %s" % self.format_version)
self.refs = parsed_json["refs"]
for key, value in manifest_json.iteritems():
setattr(self.manifest, key, value)
def __eq__(self, other):
if not isinstance(other, Snapshot):
return False
return other.refs == self.refs and other.manifest == self.manifest
def __ne__(self, other):
return not self.__eq__(other)
def generate_snapshot(git_worktree, output_path, deprecated_format=True):
snapshot = git_worktree.snapshot()
ui.info(ui.green, "Snapshot generated in", ui.white, output_path)
return snapshot.dump(output_path, deprecated_format=deprecated_format)
def load_snapshot(git_worktree, input_path):
"""Load a snapshot file and reset projects."""
snapshot = Snapshot()
ui.info(ui.green, "Loading snapshot from", ui.white, input_path)
snapshot.load(input_path)
for (src, ref) in snapshot.refs.iteritems():
ui.info("Loading", src)
git_project = git_worktree.get_git_project(src, raises=False)
if git_project:
qisrc.reset.clever_reset_ref(git_project, ref)
| [
"[email protected]"
] | |
675c537063a61902fa38a06372e2646e5734afe6 | 5ddcd95c0bbf27573f60cffd43fbe872432bb8fe | /test/language/offsets/python/ParameterOffsetTest.py | b833caa24291a1f9d3c1c94b74a316d188e65caa | [
"BSD-3-Clause"
] | permissive | chenpeihua/zserio | def7ba52b27a20673561e9f0fa9a78b12627fcc1 | c021d6f943f25c2eb7d91712eb7bd5de13f9c8bc | refs/heads/master | 2021-05-18T11:33:07.688831 | 2020-06-21T13:25:50 | 2020-06-21T13:25:50 | 251,227,439 | 0 | 0 | BSD-3-Clause | 2020-06-21T13:25:51 | 2020-03-30T07:04:56 | null | UTF-8 | Python | false | false | 4,049 | py | import unittest
import zserio
from testutils import getZserioApi
class ParameterOffsetTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "offsets.zs").parameter_offset
def testBitSizeOf(self):
createWrongOffset = False
school = self._createSchool(createWrongOffset)
self.assertEqual(self.SCHOOL_BIT_SIZE, school.bitSizeOf())
def testBitSizeOfWithPosition(self):
createWrongOffset = False
school = self._createSchool(createWrongOffset)
bitPosition = 2
self.assertEqual(self.SCHOOL_BIT_SIZE + 8 - bitPosition, school.bitSizeOf(bitPosition))
def testInitializeOffsets(self):
createWrongOffset = True
school = self._createSchool(createWrongOffset)
bitPosition = 0
self.assertEqual(self.SCHOOL_BIT_SIZE, school.initializeOffsets(bitPosition))
self._checkSchool(school)
def testInitializeOffsetsWithPosition(self):
createWrongOffset = True
school = self._createSchool(createWrongOffset)
bitPosition = 2
self.assertEqual(self.SCHOOL_BIT_SIZE + 8, school.initializeOffsets(bitPosition))
self._checkSchool(school, bitPosition)
def testRead(self):
writeWrongOffset = False
writer = zserio.BitStreamWriter()
self._writeSchoolToStream(writer, writeWrongOffset)
reader = zserio.BitStreamReader(writer.getByteArray())
school = self.api.School.fromReader(reader)
self._checkSchool(school)
def testReadWrongOffsets(self):
writeWrongOffset = True
writer = zserio.BitStreamWriter()
self._writeSchoolToStream(writer, writeWrongOffset)
reader = zserio.BitStreamReader(writer.getByteArray())
with self.assertRaises(zserio.PythonRuntimeException):
self.api.School.fromReader(reader)
def testWrite(self):
createWrongOffset = True
school = self._createSchool(createWrongOffset)
writer = zserio.BitStreamWriter()
school.write(writer)
self._checkSchool(school)
reader = zserio.BitStreamReader(writer.getByteArray())
readSchool = self.api.School.fromReader(reader)
self._checkSchool(readSchool)
self.assertTrue(school == readSchool)
def testWriteWithPosition(self):
createWrongOffset = True
school = self._createSchool(createWrongOffset)
writer = zserio.BitStreamWriter()
bitPosition = 2
writer.writeBits(0, bitPosition)
school.write(writer)
self._checkSchool(school, bitPosition)
def testWriteWrongOffset(self):
createWrongOffset = True
school = self._createSchool(createWrongOffset)
writer = zserio.BitStreamWriter()
with self.assertRaises(zserio.PythonRuntimeException):
school.write(writer, callInitializeOffsets=False)
def _writeSchoolToStream(self, writer, writeWrongOffset):
writer.writeBits(self.SCHOOL_ID, 16)
writer.writeBits(self.WRONG_ROOM_OFFSET if writeWrongOffset else self.ROOM_OFFSET, 32)
writer.writeBits(self.ROOM_ID, 16)
def _checkSchool(self, school, bitPosition=0):
self.assertEqual(self.SCHOOL_ID, school.getSchoolId())
expectedRoomOffset = (self.ROOM_OFFSET if (bitPosition == 0) else
self.ROOM_OFFSET + (bitPosition // 8) + 1)
self.assertEqual(expectedRoomOffset, school.getOffsetHolder().getRoomOffset())
self.assertEqual(self.ROOM_ID, school.getRoom().getRoomId())
def _createSchool(self, createWrongOffset):
roomOffset = self.WRONG_ROOM_OFFSET if createWrongOffset else self.ROOM_OFFSET
offsetHolder = self.api.OffsetHolder.fromFields(roomOffset)
room = self.api.Room.fromFields(offsetHolder, self.ROOM_ID)
return self.api.School.fromFields(self.SCHOOL_ID, offsetHolder, room)
SCHOOL_ID = 0x01
ROOM_ID = 0x11
WRONG_ROOM_OFFSET = 0
ROOM_OFFSET = 6
SCHOOL_BIT_SIZE = (6 + 2) * 8
| [
"[email protected]"
] | |
380a75aa4193fe3f3d3ed67f82ed8337f7fde3fa | a9ca00b277b90b16ac1a423e9b43697663dc9522 | /plastex/plasTeX/Base/LaTeX/Arrays.py | ec9ca286beeb1c06f40cae1c24748ff6d9f9f7b7 | [
"MIT"
] | permissive | gcdr/plastex-oreilly | 5873f06be21a87d6315c5d94b6900fb0258042a2 | ddc1472f9b1f15c8c2347f0d04573ce9450c6f72 | refs/heads/master | 2021-06-09T20:43:13.765276 | 2016-12-21T16:00:09 | 2016-12-21T16:00:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,074 | py | #!/usr/bin/env python
"""
C.10.2 The array and tabular Environments
"""
import new, sys
from plasTeX import Macro, Environment, Command, DimenCommand
from plasTeX import sourceChildren, sourceArguments
class ColumnType(Macro):
columnAttributes = {}
columnTypes = {}
def __init__(self, *args, **kwargs):
Macro.__init__(self, *args, **kwargs)
self.style.update(self.columnAttributes)
@classmethod
def new(cls, name, attributes, args='', before=[], after=[], between=[]):
"""
Generate a new column type definition
Required Arguments:
name -- name of the column type
attributes -- dictionary of style attributes for this column
Keyword Arguments:
args -- argument description string
before -- tokens to insert before this column
after -- tokens to insert after this column
"""
newclass = new.classobj(name, (cls,),
{'columnAttributes':attributes, 'args':args,
'before':before, 'after':after, 'between':between})
cls.columnTypes[name] = newclass
def __repr__(self):
return '%s: %s' % (type(self).__name__, self.style)
ColumnType.new('r', {'text-align':'right'})
ColumnType.new('R', {'text-align':'right'})
ColumnType.new('c', {'text-align':'center'})
ColumnType.new('C', {'text-align':'center'})
ColumnType.new('l', {'text-align':'left'})
ColumnType.new('L', {'text-align':'left'})
ColumnType.new('J', {'text-align':'left'})
ColumnType.new('X', {'text-align':'left'})
ColumnType.new('p', {'text-align':'left'}, args='width:str')
ColumnType.new('d', {'text-align':'right'}, args='delim:str')
class Array(Environment):
"""
Base class for all array-like structures
"""
colspec = None
blockType = True
captionable = True
class caption(Command):
""" Table caption """
args = '* [ toc ] self'
labelable = True
counter = 'table'
blockType = True
def invoke(self, tex):
res = Command.invoke(self, tex)
self.title = self.captionName
return res
class CellDelimiter(Command):
""" Cell delimiter """
macroName = 'active::&'
def invoke(self, tex):
# Pop and push a new context for each cell, this keeps
# any formatting changes from the previous cell from
# leaking over into the next cell
self.ownerDocument.context.pop()
self.ownerDocument.context.push()
# Add a phantom cell to absorb the appropriate tokens
return [self, self.ownerDocument.createElement('ArrayCell')]
class EndRow(Command):
""" End of a row """
macroName = '\\'
args = '* [ space ]'
def invoke(self, tex):
# Pop and push a new context for each row, this keeps
# any formatting changes from the previous row from
# leaking over into the next row
self.ownerDocument.context.pop()
self.parse(tex)
self.ownerDocument.context.push()
# Add a phantom row and cell to absorb the appropriate tokens
return [self, self.ownerDocument.createElement('ArrayRow'),
self.ownerDocument.createElement('ArrayCell')]
class cr(EndRow):
macroName = None
args = ''
class tabularnewline(EndRow):
macroName = None
args = ''
class BorderCommand(Command):
"""
Base class for border commands
"""
BORDER_BEFORE = 0
BORDER_AFTER = 1
position = BORDER_BEFORE
def applyBorders(self, cells, location=None):
"""
Apply borders to the given cells
Required Arguments:
location -- place where the border should be applied.
This should be 'top', 'bottom', 'left', or 'right'
cells -- iterable containing cell instances to apply
the borders
"""
# Find out if the border should start and stop, or just
# span the whole table.
a = self.attributes
if a and a.has_key('span'):
try: start, end = a['span']
except TypeError: start = end = a['span']
else:
start = -sys.maxint
end = sys.maxint
# Determine the position of the border
if location is None:
location = self.locations[self.position]
colnum = 1
for cell in cells:
if colnum < start or colnum > end:
colnum += 1
continue
cell.style['border-%s-style' % location] = 'solid'
cell.style['border-%s-color' % location] = 'black'
cell.style['border-%s-width' % location] = '1px'
if cell.attributes:
colnum += cell.attributes.get('colspan', 1)
else:
colnum += 1
class hline(BorderCommand):
""" Full horizontal line """
locations = ('top','bottom')
class vline(BorderCommand):
""" Vertical line """
locations = ('left','right')
#
# booktabs commands
#
class cline(hline):
""" Partial horizontal line """
args = 'span:list(-):int'
class _rule(hline):
""" Full horizontal line """
args = '[ width:str ]'
class toprule(_rule):
pass
class midrule(_rule):
pass
class bottomrule(_rule):
pass
class cmidrule(cline):
args = '[ width:str ] ( trim:str ) span:list(-):int'
class morecmidrules(Command):
pass
class addlinespace(Command):
args = '[ width:str ]'
class specialrule(Command):
args = 'width:str above:str below:str'
# end booktabs
class ArrayRow(Macro):
""" Table row class """
endToken = None
def digest(self, tokens):
# Absorb tokens until the end of the row
self.endToken = self.digestUntil(tokens, Array.EndRow)
if self.endToken is not None:
tokens.next()
self.endToken.digest(tokens)
@property
def source(self):
"""
This source property is a little different than most.
Instead of printing just the source of the row, it prints
out the entire environment with just this row as its content.
This allows renderers to render images for arrays a row
at a time.
"""
name = self.parentNode.nodeName or 'array'
escape = '\\'
s = []
argSource = sourceArguments(self.parentNode)
if not argSource:
argSource = ' '
s.append('%sbegin{%s}%s' % (escape, name, argSource))
for cell in self:
s.append(sourceChildren(cell, par=not(self.parentNode.mathMode)))
if cell.endToken is not None:
s.append(cell.endToken.source)
if self.endToken is not None:
s.append(self.endToken.source)
s.append('%send{%s}' % (escape, name))
return ''.join(s)
def applyBorders(self, tocells=None, location=None):
"""
Apply borders to every cell in the row
Keyword Arguments:
row -- the row of cells to apply borders to. If none
is given, then use the current row
"""
if tocells is None:
tocells = self
for cell in self:
horiz, vert = cell.borders
# Horizontal borders go across all columns
for border in horiz:
border.applyBorders(tocells, location=location)
# Vertical borders only get applied to the same column
for applyto in tocells:
for border in vert:
border.applyBorders([applyto], location=location)
@property
def isBorderOnly(self):
""" Does this row exist only for applying borders? """
for cell in self:
if not cell.isBorderOnly:
return False
return True
class ArrayCell(Macro):
""" Table cell class """
endToken = None
isHeader = False
def digest(self, tokens):
self.endToken = self.digestUntil(tokens, (Array.CellDelimiter,
Array.EndRow))
if isinstance(self.endToken, Array.CellDelimiter):
tokens.next()
self.endToken.digest(tokens)
else:
self.endToken = None
# Check for multicols
self.hasmulticol = False
for item in self:
if item.attributes and item.attributes.has_key('colspan'):
self.attributes['colspan'] = item.attributes['colspan']
self.hasmulticol = True
if hasattr(item, 'colspec') and not isinstance(item, Array):
self.colspec = item.colspec
if hasattr(item, 'isHeader'):
self.isHeader = item.isHeader
# Cache the border information. This must be done before
# grouping paragraphs since a paragraph might swallow
# an hline/vline/cline command.
h,v = self.borders
# Throw out the border commands, we're done with them
# for i in range(len(self)-1, -1, -1):
# if isinstance(self[i], Array.BorderCommand):
# self.pop(i)
self.paragraphs()
@property
def borders(self):
"""
Return all of the border control macros
Returns:
list of border command instances
"""
# Use cached version if it exists
if hasattr(self, '@borders'):
return getattr(self, '@borders')
horiz, vert = [], []
# Locate the border control macros at the end of the cell
for i in range(len(self)-1, -1, -1):
item = self[i]
if item.isElementContentWhitespace:
continue
if isinstance(item, Array.hline):
item.position = Array.hline.BORDER_AFTER
horiz.append(item)
continue
elif isinstance(item, Array.vline):
item.position = Array.vline.BORDER_AFTER
vert.append(item)
continue
break
# Locate border control macros at the beginning of the cell
for item in self:
if item.isElementContentWhitespace:
continue
if isinstance(item, Array.hline):
item.position = Array.hline.BORDER_BEFORE
horiz.append(item)
continue
elif isinstance(item, Array.vline):
item.position = Array.vline.BORDER_BEFORE
vert.append(item)
continue
break
setattr(self, '@borders', (horiz, vert))
return horiz, vert
@property
def isBorderOnly(self):
""" Does this cell exist only for applying borders? """
for par in self:
for item in par:
if item.isElementContentWhitespace:
continue
elif isinstance(item, Array.BorderCommand):
continue
return False
return True
@property
def source(self):
# Don't put paragraphs into math mode arrays
if self.parentNode is None:
# no parentNode, assume mathMode==False
return sourceChildren(self, True)
return sourceChildren(self,
par=not(self.parentNode.parentNode.mathMode))
class multicolumn(Command):
""" Column spanning cell """
args = 'colspan:int colspec:nox self'
isHeader = False
def invoke(self, tex):
Command.invoke(self, tex)
self.colspec = Array.compileColspec(tex, self.attributes['colspec']).pop(0)
def digest(self, tokens):
Command.digest(self, tokens)
#self.paragraphs()
def invoke(self, tex):
if self.macroMode == Macro.MODE_END:
self.ownerDocument.context.pop(self) # End of table, row, and cell
return
Environment.invoke(self, tex)
#!!!
#
# Need to handle colspec processing here so that tokens that must
# be inserted before and after columns are known
#
#!!!
if self.attributes.has_key('colspec'):
self.colspec = Array.compileColspec(tex, self.attributes['colspec'])
self.ownerDocument.context.push() # Beginning of cell
# Add a phantom row and cell to absorb the appropriate tokens
return [self, self.ownerDocument.createElement('ArrayRow'),
self.ownerDocument.createElement('ArrayCell')]
def digest(self, tokens):
Environment.digest(self, tokens)
# Give subclasses a hook before going on
self.processRows()
self.applyBorders()
self.linkCells()
def processRows(self):
"""
Subcloss hook to process rows after digest
Tables are fairly complex structures, so subclassing them
in a useful way can be difficult. This method was added
simply to allow subclasses to have access to the content of a
table immediately after the digest method.
"""
pass
def linkCells(self):
"""
Add attributes to spanning cells to indicate their start and end points
This information is added mainly for DocBook's table model.
It does spans by indicating the starting and ending points within
the table rather than just saying how many columns are spanned.
"""
self.hasmulticol = False
# Link cells to colspec
if self.colspec:
for r, row in enumerate(self):
for c, cell in enumerate(row):
if cell.hasmulticol:
self.hasmulticol = True
colspan = cell.attributes.get('colspan', 0)
if colspan > 1:
try:
cell.colspecStart = self.colspec[c]
cell.colspecEnd = self.colspec[c+colspan-1]
cell.namest = 'c%d' % (c+1)
cell.nameend = 'c%d' % (c+colspan)
except IndexError:
if hasattr(cell, 'colspecStart'):
del cell.colspecStart
if hasattr(cell, 'colspecEnd'):
del cell.colspecEnd
# Determine the number of rows by counting cells
if len(self):
cols = []
for row in self:
numcols = 0
for cell in row:
numcols += cell.attributes.get('colspan', 1)
cols.append(numcols)
self.numCols = max(cols)
self.colNames = ['c%d' % (i+1) for i in range(self.numCols)]
def applyBorders(self):
"""
Apply borders from \\(h|c|v)line and colspecs
"""
lastrow = len(self) - 1
emptyrows = []
prev = None
for i, row in enumerate(self):
if not isinstance(row, Array.ArrayRow):
continue
# If the row is only here to apply borders, apply the
# borders to the adjacent row. Empty rows are deleted later.
if row.isBorderOnly:
if i == 0 and lastrow:
row.applyBorders(self[1], 'top')
elif prev is not None:
row.applyBorders(prev, 'bottom')
emptyrows.insert(0, i)
else:
row.applyBorders()
if self.colspec:
# Expand multicolumns so that they don't mess up
# the colspec attributes
cells = []
for cell in row:
span = 1
if cell.attributes:
span = cell.attributes.get('colspan', 1)
cells += [cell] * span
for spec, cell in zip(self.colspec, cells):
spec = getattr(cell, 'colspec', spec)
cell.style.update(spec.style)
prev = row
# Pop empty rows
for i in emptyrows:
self.pop(i)
@classmethod
def compileColspec(cls, tex, colspec):
"""
Compile colspec into an object
Required Arguments:
colspec -- an unexpanded token list that contains a LaTeX colspec
Returns:
list of `ColumnType` instances
"""
output = []
colspec = iter(colspec)
before = None
leftborder = None
tex.pushToken(Array)
tex.pushTokens(colspec)
for tok in tex.itertokens():
if tok is Array:
break
if tok.isElementContentWhitespace:
continue
if tok == '|':
if not output:
leftborder = True
else:
output[-1].style['border-right'] = '1px solid black'
continue
if tok == '>':
before = tex.readArgument()
continue
if tok == '<':
output[-1].after = tex.readArgument()
continue
if tok == '@':
if output:
output[-1].between = tex.readArgument()
continue
if tok == '*':
num = tex.readArgument(type=int, expanded=True)
spec = tex.readArgument()
for i in range(num):
tex.pushTokens(spec)
continue
output.append(ColumnType.columnTypes.get(tok, ColumnType)())
if tok.lower() in ['p','d']:
tex.readArgument()
if before:
output[-1].before = before
before = None
if leftborder:
output[0].style['border-left'] = '1px solid black'
return output
@property
def source(self):
"""
This source property is a little different than most.
Instead of calling the source property of the child nodes,
it walks through the rows and cells manually. It does
this because rows and cells have special source properties
as well that don't return the correct markup for inserting
into this source property.
"""
name = self.nodeName
escape = '\\'
# \begin environment
# If self.childNodes is not empty, print out the entire environment
if self.macroMode == Macro.MODE_BEGIN:
s = []
argSource = sourceArguments(self)
if not argSource:
argSource = ' '
s.append('%sbegin{%s}%s' % (escape, name, argSource))
if self.hasChildNodes():
for row in self:
for cell in row:
s.append(sourceChildren(cell, par=not(self.mathMode)))
if cell.endToken is not None:
s.append(cell.endToken.source)
if row.endToken is not None:
s.append(row.endToken.source)
s.append('%send{%s}' % (escape, name))
return ''.join(s)
# \end environment
if self.macroMode == Macro.MODE_END:
return '%send{%s}' % (escape, name)
class array(Array):
args = '[ pos:str ] colspec:nox'
mathMode = True
class nonumber(Command):
pass
class tabular(Array):
args = '[ pos:str ] colspec:nox'
class TabularStar(tabular):
macroName = 'tabular*'
args = 'width:dimen [ pos:str ] colspec:nox'
class tabularx(Array):
args = 'width:nox colspec:nox'
class tabulary(Array):
args = 'width:nox colspec:nox'
# Style Parameters
class arraycolsep(DimenCommand):
value = DimenCommand.new(0)
class tabcolsep(DimenCommand):
value = DimenCommand.new(0)
class arrayrulewidth(DimenCommand):
value = DimenCommand.new(0)
class doublerulesep(DimenCommand):
value = DimenCommand.new(0)
class arraystretch(Command):
unicode = '1'
| [
"[email protected]"
] | |
ca361226e992558e3c170b106de71efa1cc2421d | cb491f83882fea0627460f1de1e223309eb930c3 | /src/part_two/ex10.py | be95f30ad8ea6464077227c7198a09e3cf3ff2f4 | [] | no_license | impreza555/geekbrains-python-exercises | 4b1bef4a284ac1c6f4c9191644f31f2f99a90711 | 1e56b0820cc85f516c132d8c8aa0f8c3c60daafb | refs/heads/master | 2022-06-17T17:51:36.540907 | 2020-05-04T16:25:20 | 2020-05-09T00:08:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | """
Есть файл example.txt, в нем записано несколько строк
необходимо выполнить подсчет количества строк и количества слов в каждой строке.
Вывести результат в формате:
строк - X, слов - Y
Пример файла:
```
first
second-line
third line
fourth line
```
"""
with open('example.txt') as f:
rows = f.readlines()
words = [row.split() for row in rows]
rows_count, words_count = len(rows), sum([len(word_list) for word_list in words])
print(f"строк - {rows_count}, слов - {words_count}")
| [
"[email protected]"
] | |
5ce8c78a24d4151458505b17c21bcfdc5fff63f7 | dd098f8a93f787e38676283679bb39a290ba28b4 | /samples/openapi3/client/3_0_3_unit_test/python-experimental/unit_test_api/model/ipv6_format.py | ec484a1d8a41c468b910cc4568018f0f6d1782d7 | [
"Apache-2.0"
] | permissive | InfoSec812/openapi-generator | 727c0235d3bad9b85ac12068808f844287af6003 | e0c72702c3d5dae2a627a2926f0cddeedca61e32 | refs/heads/master | 2022-10-22T00:31:33.318867 | 2022-08-20T14:10:31 | 2022-08-20T14:10:31 | 152,479,633 | 1 | 0 | Apache-2.0 | 2023-09-04T23:34:09 | 2018-10-10T19:38:43 | Java | UTF-8 | Python | false | false | 628 | py | # coding: utf-8
"""
openapi 3.0.3 sample spec
sample spec for testing openapi functionality, built from json schema tests for draft6 # noqa: E501
The version of the OpenAPI document: 0.0.1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import typing # noqa: F401
import functools # noqa: F401
from frozendict import frozendict # noqa: F401
import decimal # noqa: F401
from datetime import date, datetime # noqa: F401
from frozendict import frozendict # noqa: F401
from unit_test_api import schemas # noqa: F401
Ipv6Format = schemas.AnyTypeSchema
| [
"[email protected]"
] | |
53fac1b43cd3394624481aba748efd21b8096893 | c0e819c144aa85b860c9da29d5b7a93d5fad1ee6 | /exercises/05_basic_scripts/test_task_5_1.py | 68d9cc60b0dfa25c038ef7e435b50cf410968caf | [] | no_license | haskhr/pyneng-examples-exercises-en | ecf9fa78e57409cbab3e94d3d7a952ac966c0477 | 52e804f2942afefd626ebbddd8f4ec8a2b467b69 | refs/heads/main | 2023-03-12T14:41:43.293908 | 2021-03-10T05:32:25 | 2021-03-10T05:32:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,713 | py | from importlib import reload
import sys
import pytest
# Checking that the test is called via pytest ... and not python ...
from _pytest.assertion.rewrite import AssertionRewritingHook
if not isinstance(__loader__, AssertionRewritingHook):
print(f"Tests should be called using this expression:\npytest {__file__}\n\n")
def test_task_r2(capsys, monkeypatch):
"""
Task check for r2
"""
monkeypatch.setattr("builtins.input", lambda x=None: "r2")
import task_5_1
out, err = capsys.readouterr()
r2_dict = {
"location": "21 New Globe Walk",
"vendor": "Cisco",
"model": "4451",
"ios": "15.4",
"ip": "10.255.0.2",
}
assert (
out
), "Nothing is printed to stdout. It is necessary not only to get the correct result, but also to print it to the stdout using printprint"
assert (
str(r2_dict) in out.strip()
), "Wrong output is printed to stdout"
def test_task_sw1(capsys, monkeypatch):
"""
Task check for sw1
"""
monkeypatch.setattr("builtins.input", lambda x=None: "sw1")
if sys.modules.get("task_5_1"):
reload(sys.modules["task_5_1"])
import task_5_1
out, err = capsys.readouterr()
sw1_dict = {
"location": "21 New Globe Walk",
"vendor": "Cisco",
"model": "3850",
"ios": "3.6.XE",
"ip": "10.255.0.101",
"vlans": "10,20,30",
"routing": True,
}
assert (
out
), "Nothing is printed to stdout. It is necessary not only to get the correct result, but also to print it to the stdout using printprint"
assert (
str(sw1_dict) in out.strip()
), "Wrong output is printed to stdout"
| [
"[email protected]"
] | |
a55f535653ad76ffb57e459e3eb819f76a4d41bc | 962feeffee41625ff841f6590f97bb09cef9be4c | /torch_glow/tests/nodes/sigmoid_test.py | d7959a93086ff7d53a580260fb035b023882494c | [
"Apache-2.0"
] | permissive | SushantDaga/glow | 8c4c3fbc58c3ae760bdd8e1df2e8c05a72ff07bc | aab22c3e0421dadd29950c2ebfa88b86027cecf5 | refs/heads/master | 2022-11-03T08:39:33.958233 | 2020-06-19T17:03:14 | 2020-06-19T17:05:42 | 273,568,864 | 2 | 0 | Apache-2.0 | 2020-06-19T19:12:31 | 2020-06-19T19:12:30 | null | UTF-8 | Python | false | false | 781 | py | from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests.utils import jitVsGlow
import unittest
class TestSigmoid(unittest.TestCase):
def test_sigmoid_basic(self):
"""Basic test of the PyTorch sigmoid Node on Glow"""
def sigmoid_basic(a):
c = a + a
return c.sigmoid()
x = torch.randn(6)
jitVsGlow(sigmoid_basic, x, expected_fused_ops={"aten::sigmoid"})
def test_sigmoid_inplace(self):
"""Test of the inplace PyTorch sigmoid Node on Glow"""
def sigmoid_inplace(a):
c = a + a
return c.sigmoid_()
x = torch.randn(6)
jitVsGlow(sigmoid_inplace, x, expected_fused_ops={"aten::sigmoid_"})
| [
"[email protected]"
] | |
c1c5cf8d4cdec5bc603ee6a8b608d8826d56dc84 | 4910c0f3d03935fc8ee03f1e9dc20dfdb2c7c04b | /Resueltos/Luciano_Chavarria/Python/WERTYU.py | 95ff18884e0265efec156419c034b72763c5a589 | [] | no_license | roca12/gpccodes | ab15eeedc0cadc0735651262887b44f1c2e65b93 | aa034a3014c6fb879ec5392c51f9714bdc5b50c2 | refs/heads/master | 2023-02-01T13:49:27.563662 | 2023-01-19T22:50:58 | 2023-01-19T22:50:58 | 270,723,328 | 3 | 5 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | while True:
try:
res = ''
l = ('`', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '-', '=',
'Q', 'W', 'E', 'R', 'T', 'Y', 'U', 'I', 'O', 'P', '[', ']','\\',
'A', 'S', 'D', 'F', 'G', 'H', 'J', 'K', 'L', ';', "'",
'Z', 'X', 'C', 'V', 'B', 'N', 'M', ',', '.', '/')
string = input()
for i in range(len(string)):
if string[i] == ' ':
res += ' '
else:
res += l[l.index(string[i])-1]
print(res)
except EOFError:
break
| [
"[email protected]"
] | |
81ff25c56aa409ab69cb2482550934bbdb000ca9 | d0758e0ca004226cec8ad8b26c9565c98534a8b8 | /02-core/notebook2slides.py | 0cb0af330774502355decf098328bb702e6ddd7c | [] | no_license | pythoncanarias/eoi | 334d64a96afc76ac1fa10282378f291b6d8c94b3 | 349367254f85e3e4273cede067ca950913a1332c | refs/heads/master | 2023-07-06T08:00:11.366345 | 2023-06-30T15:19:33 | 2023-06-30T15:19:33 | 222,742,870 | 26 | 19 | null | 2023-06-25T16:03:46 | 2019-11-19T16:41:25 | Jupyter Notebook | UTF-8 | Python | false | false | 2,458 | py | #!/usr/bin/env python
'''
Inspired by https://goo.gl/SYWRbM and https://t.ly/8LAeY
Convert a jupyter notebook to slides (html) and apply some changes to default
settings (reveal.js, mathjax, ...)
Usage:
> nb.py <notebook.ipynb>
'''
import fileinput
import re
import shlex
import subprocess
import sys
from pathlib import Path
from prettyconf import config
# https://pygments.org/demo/#try
PYGMENTS_STYLE = config('PYGMENTS_STYLE', default='default')
# list of modifications to be made after generating the html slides
# each tuple has the form: (pattern, replacement) as regex
SETTINGS = [
(
r"(Reveal.addEventListener\('slidechanged', setScrollingSlide\);)",
# next slide with right cursor, previous slide with left cursor
# source: https://github.com/hakimel/reveal.js#keyboard-bindings
"Reveal.configure({ keyboard: {37:'prev', 39:'next',} });"
),
(
r'(MathJax.Hub.Config\({)',
# show the equation numbers
'TeX: { equationNumbers: {autoNumber: \"AMS\"} },'
),
(
r'(http[\S]+/reveal.js/)\d\.\d\.\d',
# update version of reveal.js
# https://cdnjs.com/libraries/reveal.js/3.7.0
'3.7.0'
),
(
r'(href=")custom.css',
# common css for all notebooks
'../custom.css'
)
]
def notebook_to_slides(ipynbfile_path):
print(f'Converting {ipynbfile_path} to html...')
notebook_path = Path(ipynbfile_path)
html_path = notebook_path.parent.joinpath(notebook_path.stem +
'.slides.html')
cmd = shlex.split(f'''
jupyter nbconvert {notebook_path}
--to slides --CSSHTMLHeaderPreprocessor.style={PYGMENTS_STYLE}''')
subprocess.run(cmd)
return html_path
def change_settings(htmlfile_path):
print(f'Changing settings of {htmlfile_path}...')
with fileinput.input(files=htmlfile_path, inplace=True) as f:
for line in f:
for setting in SETTINGS:
pattern, replace = setting
if re.search(pattern, line):
new_line = re.sub(pattern, rf'\g<1>{replace}', line)
break
else:
new_line = line
print(new_line, end='')
for file in sys.argv[1:]:
rendered_html_file = notebook_to_slides(file)
change_settings(rendered_html_file)
| [
"[email protected]"
] | |
36c86b6336cccb99ca8f04fc10b155ab44100c37 | 890612db0bc6209134b6d7017775d5a86604b285 | /tests/data/text/bpe_test.py | 33e5ecb73b283cce3f305c3f6b8775c656b05f4c | [
"Apache-2.0"
] | permissive | hiyoung-asr/st | 6277fc5c1f123b5c6b09bb9ebbad779f6e08c987 | 634a71e3f1860c0db2f4f304a7828bb5560c34f0 | refs/heads/master | 2023-03-15T04:30:15.652714 | 2020-11-12T03:47:18 | 2020-11-12T03:47:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,179 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import tempfile
import tensorflow as tf
from neurst.data.text.bpe import BPE
def test():
codes = ["技 术</w>", "发 展</w>"]
tmp_file = tempfile.NamedTemporaryFile(delete=False)
with tf.io.gfile.GFile(tmp_file.name, "w") as fw:
fw.write("version\n")
fw.write("\n".join(codes) + "\n")
bpe = BPE(lang="zh",
glossaries=["迅速", "<-neplhd-hehe>"])
bpe.init_subtokenizer(tmp_file.name)
tokens = bpe.tokenize("技术 发展 迅猛", return_str=True)
assert tokens == "技术 发展 迅@@ 猛"
assert bpe.detokenize(tokens) == "技术 发展 迅猛"
tokens = bpe.tokenize("技术发展迅猛", return_str=True)
assert tokens == "技@@ 术@@ 发@@ 展@@ 迅@@ 猛"
assert bpe.detokenize(tokens) == "技术发展迅猛"
tokens = bpe.tokenize("技术迅速发展迅速 迅速 <-neplhd-hehe>", return_str=True)
assert tokens == "技术@@ 迅速@@ 发展@@ 迅速 迅速 <-neplhd-hehe>"
assert bpe.detokenize(tokens) == "技术迅速发展迅速 迅速 <-neplhd-hehe>"
os.remove(tmp_file.name)
if __name__ == "__main__":
test()
| [
"[email protected]"
] | |
b7723e87a26067ac539b187244e80cd998ae5c3a | f5cd89e46b7e9fb22b422557a3c4d0354e501110 | /app/main/admin.py | b8aafe5cb4c14a84808c29044c111203e8256f69 | [] | no_license | Alerion/Pharmacology-DB | 14d081fbab80db974258ebad7db4ab285ccdfda5 | 86ef48feecedce6fc1adc9aa1c4363044e9454f0 | refs/heads/master | 2021-07-25T00:55:14.142794 | 2021-02-12T18:18:12 | 2021-02-12T18:18:12 | 302,310 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | # -*- coding: utf-8 -*-
from django.contrib import admin
from models import Drug, FarmAction, Illness
from django.http import HttpResponse
class DrugAdmin(admin.ModelAdmin):
def edit_vector(self, request, pk):
return HttpResponse('Hello %s' % pk)
def get_urls(self):
from django.conf.urls.defaults import patterns, url
urls = super(DrugAdmin, self).get_urls()
my_urls = patterns('',
url(r'^em/(?P<pk>\d+)/$', self.admin_site.admin_view(self.edit_vector), name='edit_vector')
)
return my_urls + urls
admin.site.register(Drug, DrugAdmin)
admin.site.register(FarmAction)
admin.site.register(Illness) | [
"[email protected]"
] | |
140d8a10408bebea7a12712c607cf0a7278e11a1 | 010c5fbc97731286be00028ff33fc981d943bca3 | /primal/src/code/impute/tests/data/pedigree/pedigree_old_study.py | bffae63b82e6442b7adfd83f8252213996c0fefb | [] | no_license | orenlivne/ober | 6ce41e0f75d3a8baebc53e28d7f6ae4aeb645f30 | 810b16b2611f32c191182042240851152784edea | refs/heads/master | 2021-01-23T13:48:49.172653 | 2014-04-03T13:57:44 | 2014-04-03T13:57:44 | 6,902,212 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,911 | py | '''
============================================================
A pedigree loaded from an input file from a previous study
at the Ober Lab. Provided by Jessica and Gaixin. Includes
node annotations (generated by Mark) - old_generation #.
Here 'old' refers to 'from the study'.
Created on May 30, 2012
@author: Oren Livne <[email protected]>
============================================================
'''
import numpy as np
from impute.data.Pedigree import Pedigree
from impute.data import io_pedigree
class PedigreeOldStudy(Pedigree):
def __init__(self, pedigree, old_generation):
'''Constructor'''
super(PedigreeOldStudy, self).__init__(pedigree.graph,
sample_id=pedigree.sample_id,
sex=pedigree.sex,
phenotype=pedigree.phenotype,
node_type=pedigree.node_type,
sample_index=pedigree.sample_index,
num_genotyped=pedigree.num_genotyped)
# Generation number of each node provided by the input file from the study
self.old_generation = old_generation
class PedigreeOldStudyReader(object):
#---------------------------------------------
# Methods
#---------------------------------------------
def read(self, file_name, genotyped_id_file=None):
'''Load pedigree from file in old format.'''
p = io_pedigree.read(file_name, genotyped_id_file)
# Load data from text file a second time to read the old-study-specific-column. Not efficient.
data = np.genfromtxt(file_name, np.dtype(int))
old_generation = dict(data[:,(1,6)])
# Wrap by old pedigree object
return PedigreeOldStudy(p, old_generation)
| [
"[email protected]"
] | |
c9887605af1e76e43622492bb7772873c7c8cd08 | 5a4436884af5341ce855c0e84866b972a0f61c05 | /day1/datatypes/dict/8.py | 3e38da9824b0c2c05be0754dd4c16a59e8c5f405 | [] | no_license | sreejithev/pythoncodes | 74a420c4f025b893e27f17ba85632a4a096f17fd | 70df14871a9687916d1c4ada76c055607f13e8ce | refs/heads/master | 2021-01-21T20:59:47.056167 | 2017-06-19T09:43:17 | 2017-06-19T09:43:17 | 92,292,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py |
a = {'x' : 10, 'y' : 20}
print a.setdefault('x', 20)
print a
print a.setdefault('z', 30)
print a
| [
"[email protected]"
] | |
5aaf61fe69ee9ad1529a5d0daae9be1d9ed286b2 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=3.0_rd=0.8_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=82/sched.py | 485e768f9a31d6cd6d14a9155b7252114127319a | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | -X FMLP -Q 0 -L 3 96 300
-X FMLP -Q 0 -L 3 80 400
-X FMLP -Q 0 -L 3 75 400
-X FMLP -Q 1 -L 1 63 250
-X FMLP -Q 1 -L 1 60 200
-X FMLP -Q 1 -L 1 48 150
-X FMLP -Q 2 -L 1 41 300
-X FMLP -Q 2 -L 1 33 125
-X FMLP -Q 2 -L 1 32 100
-X FMLP -Q 3 -L 1 27 200
-X FMLP -Q 3 -L 1 26 150
-X FMLP -Q 3 -L 1 24 300
18 150
18 150
7 100
| [
"[email protected]"
] | |
de20a03609dd733f2af03a1ae3dbe1f42b81c5d3 | e0c4fc01dd17afaa62ce329d36b1a689d948c6a4 | /moya/trace.py | 998e12ff5f02ba0d628fc53ae414ede71b9cb2d9 | [
"MIT"
] | permissive | ui-frontend/moya | 90f79b17c4142da9778aa338848321fdbb601fed | e61deb0ad4bd9d0f0cf217fb4b0cf7c64b0a0d1b | refs/heads/master | 2021-01-15T22:23:32.093510 | 2015-02-22T00:34:11 | 2015-02-22T00:34:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,608 | py | """A container for Moya code tracebacks"""
from __future__ import print_function
from . import syntax
from .console import Console, Cell
from .template.errors import (TagError,
RenderError,
TemplateError,
MissingTemplateError)
from .context.expression import ExpressionError
from .context.errors import SubstitutionError
from .logic import MoyaException
from .compat import implements_to_string, text_type
import io
import sys
import traceback as pytraceback
_PYTHON_ERROR_TEXT = """A Python Exception may indicate either a bug in a Python extension, or Moya itself.
Consider reporting this to the Moya developers."""
class Frame(object):
def __init__(self,
code,
location,
lineno,
path=None,
obj=None,
cols=None,
one_line=False,
code_start=1,
libid=None,
format="xml",
raw_location=None):
self.code = code
self._location = location
self.lineno = lineno
self.obj = obj
self.cols = cols
self.one_line = one_line
self.code_start = code_start
self.format = format
self.libid = libid
self._raw_location = raw_location
@property
def location(self):
if self.obj:
return 'File "%s", line %s, in %s' % (self._location, self.lineno, self.obj)
else:
if self.cols:
return 'File "%s", line %s, col %s' % (self._location, self.lineno, self.cols[0])
else:
return 'File "%s"' % (self._location, self.lineno)
@property
def raw_location(self):
return self._raw_location or self._location
@property
def snippet(self):
try:
if not self.code:
return ''
if self.one_line:
return self.code
return syntax.highlight(self.format,
self.code,
self.lineno - 3,
self.lineno + 3,
highlight_lines=[self.lineno],
highlight_range=[self.lineno, self.cols[0], self.cols[1]] if self.cols else None)
except Exception as e:
raise
from traceback import print_exc
print_exc(e)
@implements_to_string
class Traceback(object):
def __init__(self, url=None, method=None, handler=None, exc=None):
self.url = url
self.method = method
self.handler = handler
self.moyastack = []
self.pystack = []
self.exception = None
self.tb = None
self.error_message = None
self.exc = exc
self.exc_info = None
self.msg = None
self.error_type = "internal error"
self._displayed = False
self.diagnosis = getattr('exc', 'diagnosis', None)
@property
def console_error(self):
console = Console(html=True)
console.obj(None, self.exc)
return console.get_text()
def add_frame(self, frame):
self.moyastack.append(frame)
def add_pyframe(self, frame):
self.pystack.append(frame)
@property
def stack(self):
return self.moyastack + self.pystack
def __str__(self):
console = Console(text=True)
self.__moyaconsole__(console)
return console.get_text()
def __moyaconsole__(self, console):
stack = (self.moyastack)
console.div("Logic Error", bold=True, fg="red")
for frame in stack:
console.text(frame.location)
if frame.one_line:
console.text(" " + frame.code)
elif frame.code:
console.xmlsnippet(frame.code, frame.lineno, extralines=2)
if self.tb:
console.nl()
console.exception(self.tb, tb=True)
else:
console.error(self.msg)
if self.diagnosis:
console.table([[Cell(self.diagnosis, italic=True)]])
console.div()
def build(context, stack, node, exc, exc_info, request, py_traceback=True):
add_pytraceback = True
if node is not None:
node = getattr(node, 'node', node)
if stack is None:
stack = context.get('.callstack', [])
if request is not None:
traceback = Traceback(request.path_info, request.method, exc=exc)
else:
traceback = Traceback(exc=exc)
traceback.diagnosis = getattr(exc, 'diagnosis', None)
add_pytraceback = not getattr(exc, 'hide_py_traceback', False)
traceback.error_type = getattr(exc, 'error_type', 'internal error')
for s in stack:
e = getattr(s, 'element', None)
if e and e._code:
frame = Frame(e._code,
e._location,
e.source_line or 1,
obj=text_type(e),
libid=e.libid)
traceback.add_frame(frame)
element = getattr(exc, 'element', None)
if element is not None and hasattr(element.document, 'structure'):
frame = Frame(element.document.structure.xml,
element._location,
element.source_line or 1,
obj=text_type(element),
libid=element.libid)
traceback.add_frame(frame)
add_pytraceback = False
elif hasattr(node, '_location') and hasattr(node, 'source_line'):
if node._code:
frame = Frame(node._code,
node._location,
node.source_line or 1,
obj=text_type(node),
libid=node.libid)
traceback.add_frame(frame)
if isinstance(exc, MoyaException):
traceback.error_type = "Moya Exception"
traceback.moya_exception_type = exc.type
add_pytraceback = False
elif isinstance(exc, ExpressionError):
traceback.error_type = "Expression Error"
add_pytraceback = False
elif isinstance(exc, SubstitutionError):
traceback.error_type = "Substitution Error"
add_pytraceback = False
elif isinstance(exc, RenderError):
traceback.error_type = "Template Render Error"
if hasattr(exc, 'template_stack'):
for ts in exc.template_stack[:-1]:
if 'node' in ts:
node = ts['node']
frame = Frame(node.code,
node.template.path,
node.location[0],
raw_location=node.template.raw_path,
cols=node.location[1:],
format="moyatemplate")
traceback.add_frame(frame)
frame = Frame(exc.code,
exc.path,
exc.lineno,
raw_location=getattr(exc, 'raw_path', None),
cols=(exc.start, exc.end),
format="moyatemplate")
traceback.add_frame(frame)
add_pytraceback = False
if exc.original:
exc = exc.original
if isinstance(exc, (TagError,
ExpressionError,
SubstitutionError,
MissingTemplateError)):
add_pytraceback = False
elif isinstance(exc, TemplateError):
traceback.error_type = "Template Error"
frame = Frame(exc.code,
exc.path,
raw_location=exc.raw_path,
lineno=exc.lineno,
cols=(exc.start, exc.end),
format="moyatemplate")
traceback.add_frame(frame)
add_pytraceback = False
traceback.exception = exc
traceback.msg = text_type(exc)
traceback.diagnosis = traceback.diagnosis or getattr(exc, 'diagnosis', None)
if context.get('.develop', False):
add_pytraceback = True
if add_pytraceback and exc_info and py_traceback:
traceback.error_type = "Python Exception"
tb_type, tb_value, tb = exc_info
traceback.tb = ''.join(pytraceback.format_exception(tb_type, tb_value, tb))
pyframes = pytraceback.extract_tb(tb)
for i, f in enumerate(reversed(pyframes)):
if f[2] == 'logic':
pyframes = pyframes[len(pyframes) - i - 1:]
break
for (filename, line_number, function_name, text) in pyframes:
try:
with io.open(filename, 'rt') as f:
code = f.read()
except:
code = None
frame = Frame(code,
filename,
line_number,
one_line=False,
obj=function_name,
format="python")
traceback.add_pyframe(frame)
traceback.msg = text_type(exc)
if traceback.diagnosis is None:
traceback.diagnosis = _PYTHON_ERROR_TEXT
return traceback
def format_trace(context, stack, node, exc_info=None):
if exc_info is None:
exc_info = sys.exc_info()
request = context.get('.request', None)
moya_trace = build(context, stack, None, node, exc_info, request, py_traceback=False)
return text_type(moya_trace)
| [
"[email protected]"
] | |
6e67540d0a1f799bb87d998cdd83312283346dab | 3c8701e04900389adb40a46daedb5205d479016c | /test/fortresstest/fortress_lfzb/test.py | 8d8c20326ce571cfe13ee15976a521188594afda | [] | no_license | huboa/xuexi | 681300653b834eaf506f49987dcca83df48e8db7 | 91287721f188b5e24fbb4ccd63b60a80ed7b9426 | refs/heads/master | 2020-07-29T16:39:12.770272 | 2018-09-02T05:39:45 | 2018-09-02T05:39:45 | 73,660,825 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | #!/bin/python
# # -*- coding: utf-8 -*-
# import sys,json,urllib,urllib2
# reload(sys)
# sys.setdefaultencoding('utf-8')
#
# url = "http://cd-ztree-api.inc-mtime.com/getalluserpassword"
# result = urllib2.urlopen(url).read()
# result = json.loads(result)
#
# for i in result:
# for k,v in i.items():
# if k == 'jie.wang':
# print v
| [
"[email protected]"
] | |
e853121de9b9ac889b80e8139983297bc65d2faa | 7a88fc18f30d5dd3ac935877d4d9268a56c296be | /di_website/blog/migrations/0020_auto_20191023_0650.py | 55f86e1326edb75e8034c449a21e59133ae334f2 | [] | no_license | devinit/DIwebsite-redesign | 745a480b7ba0feffa34dc664548ee4c5a7b4d470 | 9ec46823c67cdd4f35be255896bf30d8f6362666 | refs/heads/develop | 2023-08-30T04:06:20.951203 | 2023-08-07T12:06:07 | 2023-08-07T12:06:07 | 184,287,370 | 1 | 0 | null | 2023-08-28T14:34:57 | 2019-04-30T15:29:25 | HTML | UTF-8 | Python | false | false | 782 | py | # Generated by Django 2.2.2 on 2019-10-23 06:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0019_auto_20191011_1357'),
]
operations = [
migrations.AlterField(
model_name='blogarticlepage',
name='hero_image_credit_name',
field=models.TextField(blank=True, help_text='Name of source of image used in hero if any', null=True, verbose_name='Image credit name'),
),
migrations.AlterField(
model_name='blogindexpage',
name='hero_image_credit_name',
field=models.TextField(blank=True, help_text='Name of source of image used in hero if any', null=True, verbose_name='Image credit name'),
),
]
| [
"[email protected]"
] | |
241e55691c7d2fafa5f5c642cc5c07b5e879dd3a | 9990c9561b72398d9f6a2cb29b7ee63a68cf9607 | /.history/higherarchy/urls_20200305110448.py | 5d91d8f22444402b018957370c116c6baae075f4 | [] | no_license | Imraj423/HierarchyD | 46c78ea3be6836039ce357b06a3a3e32140d1868 | f175f57bc0afd3f8366bec9d03c964d228877c4a | refs/heads/master | 2021-02-19T01:43:25.018148 | 2020-03-05T20:50:20 | 2020-03-05T20:50:20 | 245,262,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 753 | py | """higherarchy URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"[email protected]"
] | |
3c6b94c42cc74ed86c4168785aa1625444219fae | c2d436fecd486a412eae5171882110e324b2fc1c | /chap8/78.py | ec1d6a31f6f9efeb259b6ef3476282a255d11d7d | [] | no_license | uenewsar/nlp100fungos | 0150bacf835f3734dd76a25b079ec6c61efb4d83 | 7f745abb97c3129818ec6cf5f69abca15c50e451 | refs/heads/master | 2020-04-14T23:47:20.482910 | 2019-01-12T13:32:09 | 2019-01-12T13:32:36 | 164,216,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,023 | py | # -*- coding: utf-8 -*-
'''
78. 5分割交差検定
76-77の実験では,学習に用いた事例を評価にも用いたため,
正当な評価とは言えない.すなわち,分類器が訓練事例を丸暗記する
際の性能を評価しており,モデルの汎化性能を測定していない.
そこで,5分割交差検定により,極性分類の正解率,適合率,再現率,
F1スコアを求めよ.
'''
import re
import sys
import numpy as np
import stemming.porter2
from sklearn.linear_model import LogisticRegression
# as stemming.porter2.stem is a little bit slow, use cache.
stem_cache = {}
def stem(inp):
if inp not in stem_cache:
stem_cache[inp] = stemming.porter2.stem(inp)
return stem_cache[inp]
# from https://gist.github.com/sebleier/554280
stop_words = {
"i", "me", "my", "myself", "we", "our",
"ours", "ourselves", "you", "your", "yours",
"yourself", "yourselves", "he", "him", "his", "himself",
"she", "her", "hers", "herself", "it", "its",
"itself", "they", "them", "their", "theirs", "themselves",
"what", "which", "who", "whom", "this", "that",
"these", "those", "am", "is", "are", "was",
"were", "be", "been", "being", "have", "has",
"had", "having", "do", "does", "did", "doing",
"a", "an", "the", "and", "but", "if",
"or", "because", "as", "until", "while", "of",
"at", "by", "for", "with", "about", "against",
"between", "into", "through", "during", "before", "after",
"above", "below", "to", "from", "up", "down",
"in", "out", "on", "off", "over", "under",
"again", "further", "then", "once", "here", "there",
"when", "where", "why", "how", "all", "any",
"both", "each", "few", "more", "most", "other",
"some", "such", "no", "nor", "not", "only",
"own", "same", "so", "than", "too", "very",
"s", "t", "can", "will", "just", "don", "should",
"now"}
class Instance(object):
# class to store one instance of training/evaluation data
def __init__(self):
self.label = None
self.sentence = None
self.words = None
self.feat = None
self.feat_vec = None
def __str__(self):
ret = 'label=[{}]'.format(self.label)
ret += ', sentence="{}"'.format(self.sentence)
ret += ', words={}'.format(self.words)
ret += ', feat={}'.format(self.feat)
ret += ', feat_vec={}'.format(self.feat_vec)
return ret
def create_feat(org_words, feat2id=None):
# make unigram and bigram feat
# to avoid changing original memory
words = list(org_words)
# delete symbol tokens
tmp = []
for e in words:
if not re.search(r'^[^0-9a-zA-Z]+$', e):
# use if the word is NOT only-symbol word
tmp.append(e)
words = tmp
# stemming
for i in range(len(words)):
words[i] = stem(words[i])
# assign flag for showing stop words
for i in range(len(words)):
if is_stop_word(words[i]):
words[i] = '__stop__'
feat = {}
# add BOS and EOS
words.insert(0, 'BOS')
words.append('EOS')
## make unigram
for i in range(len(words)):
if words[i] == '__stop__':
continue
feat[words[i]] = 1
## make bigram
for i in range(len(words)-1):
if words[i] == '__stop__' or words[i+1] == '__stop__':
continue
feat['{}_{}'.format(words[i], words[i+1])] = 1
# no matter how much one feature exist in one sentence,
# the value of feature is set to 1.
# if each feature is not defined in feat2id, delete
vec = None
if feat2id is not None:
tmp = {}
for ef in feat.keys():
if ef in feat2id:
tmp[ef] = 1
feat = tmp
# also make feature vector
vec = [0.0] * len(feat2id)
for ef in feat.keys():
vec[feat2id[ef]] = 1.0
# debug
#sys.stderr.write('[{}]\n -> [{}]\n'.format(' '.join(org_words), ' '.join(sorted(feat.keys()))))
return (feat, vec)
def normalize_stc(inp):
# delete duplicated space
inp = re.sub(r' +', ' ', inp)
# lower
inp = inp.lower()
return inp
def read_data(fn):
data = []
fr = open(fn, 'r', encoding='utf-8')
for e in fr:
e = e.rstrip()
e = normalize_stc(e)
tab = e.split(' ')
# label -> [0]
label = int(tab[0])
# words -> [1, 2, ...]
words = tab[1:]
# sentence
sentence = ' '.join(tab[1:])
ins = Instance()
ins.label = label
ins.words = words
ins.sentence = sentence
data.append(ins)
fr.close()
return data
def is_stop_word(inp):
if inp in stop_words:
return True
else:
return False
def make_feat_to_be_used(data):
# from raw features, extract actual features to be used.
# creat feat vs. freq
feat2freq = {}
for e in data:
for ef in e.feat:
if ef not in feat2freq:
feat2freq[ef] = 0
feat2freq[ef] += 1
# delete singleton and make feat to be used
feat2id = {}
for k, v in feat2freq.items():
if v>1:
feat2id[k] = len(feat2id)
else:
#print('{} is deleted.'.format(k))
pass
return feat2id
## main
data = read_data('sentiment.txt')
# divide data to 5 folds
data_fold = {}
for i in range(len(data)):
fold_idx = int(float(i) / len(data) * 5)
if fold_idx not in data_fold:
data_fold[fold_idx] = []
data_fold[fold_idx].append(data[i])
# reset metrics
mat = {'TP':0, 'FN':0, 'FP':0, 'TN':0}
cor = 0
# loop all folds
for fold_idx in sorted(data_fold.keys()):
print('fold: {}/{}'.format(fold_idx+1, len(data_fold)))
# make evaluation data
eval_data = data_fold[fold_idx]
#for e in eval_data:
# print(e)
# make training data
train_data = []
for i in sorted(data_fold.keys()):
if i != fold_idx:
train_data.extend(data_fold[i])
#for e in train_data:
# print(e)
print(' num of eval data: {}'.format(len(eval_data)))
print(' num of train data: {}'.format(len(train_data)))
## train
# first, makes all possible features
for ed in train_data:
(ed.feat, _) = create_feat(ed.words)
# make actual features to be used
feat2id = make_feat_to_be_used(train_data)
#for k, v in feat2id.items():
# print(' {} {}'.format(k, v))
# make feature vector
for ed in train_data:
(ed.feat, ed.feat_vec) = create_feat(ed.words, feat2id)
#print(' feat: {}'.format(ed.feat))
#print(' feat_vec: {}'.format(ed.feat_vec))
# model training
x = []
y = []
for ed in train_data:
#print('ed.feat_vec: {}'.format(list(ed.feat_vec)))
#x.append(list(ed.feat_vec))
x.append(ed.feat_vec)
y.append(ed.label)
#print('x:{}'.format(x))
#print('y:{}'.format(y))
lr = LogisticRegression(solver='liblinear')
lr.fit(x, y)
#exit()
# evaluation
for ed in eval_data:
(ed.feat, ed.feat_vec) = create_feat(ed.words, feat2id)
est_label = lr.predict([ed.feat_vec])[0]
est_prob = lr.predict_proba([ed.feat_vec])[0][np.where(lr.classes_==est_label)][0]
if est_label==ed.label:
cor += 1
if est_label==1 and ed.label==1:
mat['TP'] += 1
elif est_label==1 and ed.label==-1:
mat['FP'] += 1
elif est_label==-1 and ed.label==1:
mat['FN'] += 1
elif est_label==-1 and ed.label==-1:
mat['TN'] += 1
else:
raise Exception('error')
print(' accuracy: {}'.format(float(cor)/len(data)))
precision = float(mat['TP']) / (mat['TP']+mat['FP'])
print('precision: {}'.format(precision))
recall = float(mat['TP']) / (mat['TP']+mat['FN'])
print(' recall: {}'.format(recall))
print(' f1: {}'.format( 2 * precision * recall / (precision + recall) ))
| [
"none@none"
] | none@none |
49bd991042559fc02150d178e511e172a8bb31e5 | 5f845ebbc2c9b40eea702833c91928ae90ae7ee5 | /data-structures/array-left-rotation.py | d20dde7750484854c21b9132f1597e1f7a1f439a | [
"MIT"
] | permissive | imgeekabhi/HackerRank | 7a1917fee5af01976aebb9c82aa1045a36487016 | 7fe4a308abad85ce446a28328324be480672e6fc | refs/heads/master | 2022-12-28T19:13:49.098090 | 2020-10-11T09:29:08 | 2020-10-11T09:29:08 | 300,023,395 | 1 | 0 | MIT | 2020-09-30T18:48:12 | 2020-09-30T18:48:11 | null | UTF-8 | Python | false | false | 408 | py | #!/bin/python3
import sys
def leftRotation(a, d):
out = list(a)
a_len = len(a)
for ind, el in enumerate(a):
out[(ind + a_len - d) % a_len] = el
return out
if __name__ == "__main__":
n, d = input().strip().split(' ')
n, d = [int(n), int(d)]
a = list(map(int, input().strip().split(' ')))
result = leftRotation(a, d)
print (" ".join(map(str, result)))
| [
"[email protected]"
] | |
1c157b3dc596401cbdacaf303f49abd65fd7dc33 | a686db263a544c42ccfea566f19fba5443515357 | /server.py | 7a61d8d1b1fc25d4593cfbce61fbe3bf85d13541 | [] | no_license | merli027/apis | 8fd3ea6489f416d2dd1304db51dae5d3a23cffc1 | 4136e10fcbdfc36b7665233eddce913888e1e59f | refs/heads/master | 2022-12-13T13:03:05.026252 | 2020-02-25T22:28:05 | 2020-02-25T22:28:05 | 243,116,270 | 0 | 0 | null | 2022-12-08T03:41:20 | 2020-02-25T22:27:30 | HTML | UTF-8 | Python | false | false | 2,292 | py | from flask import Flask, render_template, request
from pprint import pformat
import os
import requests
app = Flask(__name__)
app.secret_key = 'SECRETSECRETSECRET'
API_KEY = os.environ['TICKETMASTER_KEY']
@app.route('/')
def homepage():
"""Show homepage."""
return render_template('homepage.html')
@app.route('/afterparty')
def show_afterparty_form():
"""Show event search form"""
return render_template('search-form.html')
@app.route('/afterparty/search')
def find_afterparties():
"""Search for afterparties on Eventbrite"""
keyword = request.args.get('keyword', '')
postalcode = request.args.get('zipcode', '')
radius = request.args.get('radius', '')
unit = request.args.get('unit', '')
sort = request.args.get('sort', '')
url = 'https://app.ticketmaster.com/discovery/v2/events'
payload = {'apikey': API_KEY, 'keyword': keyword, 'postalcode': postalcode,
'radius': radius, 'unit': unit, 'sort': sort}
res = requests.get(url, params=payload)
data = res.json()
print(data.keys())
#events = data['_embedded']['events']
events = []
# TODO: Make a request to the Event Search endpoint to search for events
#
# - Use form data from the user to populate any search parameters
#
# - Make sure to save the JSON data from the response to the `data`
# variable so that it can display on the page. This is useful for
# debugging purposes!
#
# - Replace the empty list in `events` with the list of events from your
# search results
# data = {'Test': ['This is just some test data'],
# 'page': {'totalElements': 1}}
return render_template('search-results.html',
pformat=pformat,
data=data,
results=events)
# ===========================================================================
# FURTHER STUDY
# ===========================================================================
@app.route('/event/<id>')
def get_event_details(id):
"""View the details of an event."""
# TODO: Finish implementing this view function
return render_template('event-details.html')
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0')
| [
"[email protected]"
] | |
95f4c2bf1d1943ec5cf66207d1c6179d21703460 | f47863b3a595cbe7ec1c02040e7214481e4f078a | /plugins/scan/esccms/2555.py | dbdde80f807c5821d4d411301730134a2ac42e6a | [] | no_license | gobiggo/0bscan | fe020b8f6f325292bda2b1fec25e3c49a431f373 | 281cf7c5c2181907e6863adde27bd3977b4a3474 | refs/heads/master | 2020-04-10T20:33:55.008835 | 2018-11-17T10:05:41 | 2018-11-17T10:05:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | #!/usr/bin/evn python
#-*-:coding:utf-8 -*-
#Author:404
#Name:易创思教育建站系统未授权访问可查看所有注册用户
#Refer:http://www.wooyun.org/bugs/wooyun-2010-086704
def assign(service,arg):
if service=="esccms":
return True,arg
def audit(arg):
url=arg+"operationmanage/selectunitmember.aspx"
code,head,res,errcode,_=curl.curl2(url)
if code==200 and "doPostBack" in res and 'gvUnitMember' in res:
security_hole(url)
if __name__=="__main__":
audit(assign('esccms','http://www.yclfzx.com/')[1])
audit(assign('esccms','http://www.qzxx.net/')[1]) | [
"[email protected]"
] | |
383c97c1e717ee09c481c9a9bcaafaf22a6aa0cd | 4144df22392350035a9a24fcbc23fd1c6bce5c12 | /Lib/glyphNameFormatter/rangeProcessors/katakana.py | 080574bddaaaa12ee38391a29264d9162345e529 | [
"BSD-3-Clause",
"Adobe-Glyph"
] | permissive | danielgrumer/glyphNameFormatter | 55b6076684bed7ff4cc6e37ce4a0bb0e2ce86a4a | 9a41b3ef02c01cd18afe0232f6e436a2f7379178 | refs/heads/master | 2020-12-11T05:35:47.835908 | 2016-03-19T09:50:33 | 2016-03-19T09:50:33 | 53,578,090 | 0 | 0 | null | 2016-03-10T11:07:31 | 2016-03-10T11:07:30 | null | UTF-8 | Python | false | false | 357 | py |
def process(self):
self.edit("KATAKANA-HIRAGANA", "kana")
self.edit("SOUND MARK")
self.edit("MARK")
self.edit("LETTER")
self.edit("SMALL", "small")
self.editToFinal("KATAKANA", "katakana")
self.lower()
self.compress()
if __name__ == "__main__":
from glyphNameFormatter.test import printRange
printRange("Katakana")
| [
"[email protected]"
] | |
d1bdf920154ffffe0e5e7314a926015d1e892b85 | 90c6262664d013d47e9a3a9194aa7a366d1cabc4 | /tests/operations/opR2kM14LSbSGpKxeZWzfXaj32AP29B2iJ88hss1mZRxXAMkR2U/test_forge_opR2kM.py | fe34ad1cf8192a126f57b15b7ea1af6b39a5c26b | [
"MIT"
] | permissive | tqtezos/pytezos | 3942fdab7aa7851e9ea81350fa360180229ec082 | a4ac0b022d35d4c9f3062609d8ce09d584b5faa8 | refs/heads/master | 2021-07-10T12:24:24.069256 | 2020-04-04T12:46:24 | 2020-04-04T12:46:24 | 227,664,211 | 1 | 0 | MIT | 2020-12-30T16:44:56 | 2019-12-12T17:47:53 | Python | UTF-8 | Python | false | false | 567 | py | from unittest import TestCase
from tests import get_data
from pytezos.operation.forge import forge_operation_group
class OperationForgingTestopR2kM(TestCase):
def setUp(self):
self.maxDiff = None
def test_forge_opR2kM(self):
expected = get_data(
path='operations/opR2kM14LSbSGpKxeZWzfXaj32AP29B2iJ88hss1mZRxXAMkR2U/forged.hex')
actual = forge_operation_group(get_data(
path='operations/opR2kM14LSbSGpKxeZWzfXaj32AP29B2iJ88hss1mZRxXAMkR2U/unsigned.json'))
self.assertEqual(expected, actual)
| [
"[email protected]"
] | |
41b9f5fefd62fafb4c0703fcbb3f4278fb7479a8 | 8e1668e35a8df9968ab14d16db089b51dbe6dd51 | /python/algorithms/arrays/distributed_candies.py | 36ce7af5034497a847bd5c0a47921763dfd79336 | [] | no_license | Chalmiller/competitive_programming | f1ec0184d1ff247201522ab90ca8e66b3f326afc | b437080d1ba977c023baf08b7dc5c3946784e183 | refs/heads/master | 2021-03-24T05:11:59.383916 | 2020-08-24T22:07:41 | 2020-08-24T22:07:41 | 247,519,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | from typing import *
import collections
class Solution:
def distributeCandies(self, candies: List[int]) -> int:
return min(len(candies) / 2, len(set(candies)))
obj = Solution()
print(obj.distributeCandies([1,1,2,3]))
| [
"[email protected]"
] | |
340c22294da42b53386bdaea4cfe8593715817c1 | 644b019a4792b6c7d9e5352e6330069850cc07e7 | /dentexchange/apps/matches/jobs/daily/periodic_automatches_email.py | a7eb338df9d3424e9594a60e71c556e2f72d00b6 | [
"BSD-3-Clause"
] | permissive | jpchauvel/dentexchange | db0611c8c45365db30bdc15e3005c6eeac104c73 | 58ae303e842404fc9e1860f294ec8044a332bef3 | refs/heads/master | 2021-10-10T12:19:00.985034 | 2014-09-24T03:42:20 | 2014-09-24T03:42:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | # -*- coding:utf-8 -*-
import calendar
from django_extensions.management.jobs import DailyJob
from django.utils.timezone import now
from ...tasks import SendPeriodicAutomatchesEmailTask
from ... import constants
class Job(DailyJob):
help = '''
Sends periodic email notifications to users notifying the total automatches
they have in their profiles
'''
def execute(self):
today = now()
week_day = calendar.weekday(today.year, today.month, today.day)
if week_day in constants.PERIODIC_AUTOMATCHES_PROGRAMMED_WEEK_DAYS:
SendPeriodicAutomatchesEmailTask.delay()
| [
"[email protected]"
] | |
4e7cd1f106c73485b089537adf4a40e89a4adc54 | aa0270b351402e421631ebc8b51e528448302fab | /sdk/servicefabricmanagedclusters/azure-mgmt-servicefabricmanagedclusters/generated_samples/application_get_operation_example.py | f7d2aec71c49e97b87b077dfa6dfe7232f1e77d0 | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | fangchen0601/azure-sdk-for-python | d04a22109d0ff8ff209c82e4154b7169b6cb2e53 | c2e11d6682e368b2f062e714490d2de42e1fed36 | refs/heads/master | 2023-05-11T16:53:26.317418 | 2023-05-04T20:02:16 | 2023-05-04T20:02:16 | 300,440,803 | 0 | 0 | MIT | 2020-10-16T18:45:29 | 2020-10-01T22:27:56 | null | UTF-8 | Python | false | false | 1,724 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.servicefabricmanagedclusters import ServiceFabricManagedClustersManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-servicefabricmanagedclusters
# USAGE
python application_get_operation_example.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = ServiceFabricManagedClustersManagementClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-0000-0000-0000-000000000000",
)
response = client.applications.get(
resource_group_name="resRg",
cluster_name="myCluster",
application_name="myApp",
)
print(response)
# x-ms-original-file: specification/servicefabricmanagedclusters/resource-manager/Microsoft.ServiceFabric/preview/2022-08-01-preview/examples/ApplicationGetOperation_example.json
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
d4f4e5362e3781d0329078dc23911c801727ea8a | 3806db5b4bb7a638f30c818a29ccaf2b0ddb2836 | /test_188.py | 184ae4a7f28d2b1a7ebb44b0521862a3a9e86548 | [] | no_license | EomAA/fenics-qa | d0a687a7b84c51417e96eeeef9855c0d4ba27dea | c37a36a14450d0e7f6432c4726c5d96e0d6c4e96 | refs/heads/master | 2021-12-15T12:07:10.316478 | 2017-08-18T09:16:01 | 2017-08-18T09:16:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,256 | py | from dolfin import *
import numpy as np
# Create mesh and define function space
mesh = UnitSquareMesh(4, 4)
V = FunctionSpace(mesh, "Lagrange", 1)
u_e=Expression('1+x[0]*x[0]+2*x[1]*x[1]') #exact solutin
# Define Dirichlet boundary (x = 0 or x = 1)
class Left(SubDomain):
def inside(self, x, on_boundary):
return on_boundary and near(x[0]*(1-x[0]),0.0)
#Define the right dirichlet boundary condition
class Right(SubDomain):
def inside(self, x, on_boundary):
return on_boundary and near(x[1]*(1-x[1]), 0.0)
left=Left()
right=Right()
# Define boundary condition
u0 = Expression('1+x[0]*x[0]+2*x[1]*x[1]')
bc = DirichletBC(V, u0, left)
# Define variational problem
u = TrialFunction(V)
v = TestFunction(V)
f = Expression("-6")
ur = Expression('4*x[1]')
a = inner(grad(u), grad(v))*dx
L = f*v*dx + ur*v*ds
# Compute solution
u = Function(V) # u is the solution with CG method
solve(a == L, u, bc)
u_e_Ve = interpolate(u_e, V)
error = (u - u_e_Ve)**2*dx
k=sqrt(assemble(u_e_Ve**2*dx))
E = assemble(error)
print E
k=sqrt(assemble(u_e_Ve**2*dx)) #to get relative L2 norm
#print k
#print E
print('L2 norm using CG Method : ',E/k)
#plot(u)
#plot(u_e_Ve)
#interactive()
| [
"[email protected]"
] | |
3c53678e97a6f2552793138d9aeca60f467499e7 | 3a121f4953c430e450c448417ca40e7dfae9db9a | /analysis/visualization.py | 6efc277f9555343a2669a6bfd4681c32de907bb9 | [
"MIT"
] | permissive | sadscv/sentiment.datalogue | cdcbaa71a16be07f99f6ae502e2da3a4df08cd3f | 3e7bde9e03394774bfab2582bd936c090639ddc2 | refs/heads/master | 2021-05-01T21:53:53.478139 | 2017-03-01T08:11:06 | 2017-03-01T08:11:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,051 | py | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sys, os
sns.set_style('white')
def training_plot(history, outfile, metric='categorical_accuracy', title=''):
"""
Plot training accuracy for each epoch
"""
## Set output file for plot
basepath = os.path.split(os.path.expanduser(outfile))[0]
plotfile = basepath + '_train_plot.png'
## Plot accuracy
f = plt.figure()
ax = f.add_subplot(111)
ax.plot(history.history['val_'+metric], label='test')
ax.plot(history.history[metric], label='train')
ax.set_title(title)
ax.set_xlabel('Epochs')
ax.set_ylabel(metric)
ax.legend()
f.savefig(plotfile)
return f, ax
def plot_single_auc(fpr, tpr, auc_, ax=None, c='b', label=''):
"""
Plots the receiver operating characteristic curve for a single
sequence of false positive rates, true postive rates and auc
"""
ax_ = ax
if ax is None:
f = plt.figure()
ax = f.add_subplot(111)
ax.plot(fpr, tpr, lw=2, color=c,\
label=label + ' AUC:' + str(auc_) )
if ax_ is None:
return f, ax
else:
return ax
def plot_auc(fprs, tprs, aucs, title='Receiver Operating Characteristc', labels=None):
assert len(fprs) == len(tprs), 'must have equal number of FPRs and TPRS'
assert len(tprs) == len(aucs), 'must have equal number of tprs and aucs'
COLORS = sns.color_palette(n_colors=len(aucs))
fig = plt.figure()
ax = fig.add_subplot(111)
labels = [''] * len(aucs) if not labels else labels
assert len(labels) == len(aucs), 'must have equal number of labels as aucs'
# should probably be more descirptive with variable names...
for f, t, a, c, l in zip(fprs, tprs, aucs, COLORS, labels):
plot_single_auc(f, t, a, ax=ax, c=c, label= l)
ax.plot([0, 1], [0, 1], lw=2, linestyle='--', color='k', label='Random')
ax.set_xlabel('false positive rates')
ax.set_ylabel('true positive rates')
ax.legend()
ax.set_title(title)
return fig, ax
| [
"[email protected]"
] | |
8d740f23f7e5f925e7a63395c8a015063f25184d | b0742d240520af3a35fab31f71cfb1cd71c73696 | /Python/EditMLTkinter/MLTkiter/app.py | f456d112f855883d38134312a8654b978de58022 | [] | no_license | subhamrex/Coding_Practice | 90c95e74f403781a90cd39ca0b441251dc4974d5 | 677579dbb4d92c9f3a2a7d5403b14c9f6f51014b | refs/heads/master | 2023-07-29T02:01:43.678180 | 2021-09-08T15:48:36 | 2021-09-08T15:48:36 | 373,430,589 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,992 | py | from tkinter import Frame, LabelFrame, StringVar, IntVar, Label, Tk, Entry, Button, TclError, Scrollbar,Toplevel, Canvas, Checkbutton, Radiobutton
from tkinter.constants import *
import numpy as np
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.metrics import confusion_matrix, classification_report, mean_absolute_error,mean_squared_error
from sklearn.preprocessing import LabelEncoder
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
class MachineLearning:
def __init__(self):
self.data = None
self.selection_x = None
self.table = None
self.selection_y = None
self.X = None self.y = None
self.X_test_l = None
self.X_train_l = None
self.y_test_l = None
self.y_train_l = None
self.X_test = None
self.X_train = None
self.y_test = None
self.y_train = None
self.le = LabelEncoder()
self.linreg_model = None
self.linreg_predictions = None
self.logreg_model = None
self.logreg_predictions = None
self.dtree_model = None
self.dtree_predictions = None
self.rforest_model = None
self.rforest_predictions = None
self.window = Tk()
self.color = 'grey95'
self.window.geometry('620x700')
self.window.resizable(False, False)
self.window.configure(background=self.color)
self.window.title('Machine Learning')
self.window.iconbitmap('py.ico')
# Heading
self.heading = Label(self.window, text='Machine Learning', bg=self.color, pady=20, font=('Helvetica'
, 35, 'bold'))
self.heading.place(width=620, height=100, bordermode=OUTSIDE, x=0, y=0)
# File Selection and viewing
self.frame = LabelFrame(self.window, text='File Selection', bg=self.color)
self.frame.place(width=580, height=80, bordermode=OUTSIDE, x=20, y=100)
self.name_label = Label(self.frame, text='File Name : ', bg=self.color, padx=10, pady=10,
font=('Helvetica', 15))
self.name_label.place(width=120, height=30, bordermode=INSIDE, x=10, y=13)
self.name = StringVar()
self.name_entry = Entry(self.frame, exportselection=False, textvariable=self.name, font=('Helvetica', 12))
self.name_entry.place(width=250, height=30, bordermode=INSIDE, x=130, y=13)
self.name_select = Button(self.frame, text='Select', command=lambda: self.select())
self.name_select.place(width=50, height=30, bordermode=INSIDE, x=395, y=13)
self.df_show = Button(self.frame, text='Show', command=lambda: self.create_table(), state=
DISABLED)
self.df_show.place(width=50, height=30, bordermode=INSIDE, x=455, y=13)
self.df_hide = Button(self.frame, text='Hide', command=lambda: self.hide(), state=DISABLED)
self.df_hide.place(width=50, height=30, bordermode=INSIDE, x=515, y=13)
# Train Test Split
self.ttsplit = LabelFrame(self.window, text='Train Test Split', bg=self.color)
self.ttsplit.place(width=580, height=80, bordermode=OUTSIDE, x=20, y=200)
self.select_x = Button(self.ttsplit, text='X', command=lambda: self.get_x(), state=DISABLED)
self.select_x.place(width=80, height=30, bordermode=INSIDE, x=10, y=13)
self.select_y = Button(self.ttsplit, text='y', command=lambda: self.get_y(), state=DISABLED)
self.select_y.place(width=80, height=30, bordermode=INSIDE, x=100, y=13)
self.test_size_label = Label(self.ttsplit, text='Test Size : ', bg=self.color)
self.test_size_label.place(width=60, height=30, bordermode=INSIDE, x=200, y=13)
self.test_size = StringVar()
self.test_size.set('0.25')
self.test_size_entry = Entry(self.ttsplit, exportselection=False, textvariable=self.test_size, font=('
Helvetica', 10))
self.test_size_entry.place(width=50, height=30, bordermode=INSIDE, x=260, y=13)
self.rstate_label = Label(self.ttsplit, text='Random State : ', bg=self.color)
self.rstate_label.place(width=100, height=30, bordermode=INSIDE, x=330, y=13)
self.rstate = StringVar()
self.rstate.set('None')
self.rstate_entry = Entry(self.ttsplit, exportselection=False, textvariable=self.rstate, font=('Helvetica
', 10))
self.rstate_entry.place(width=50, height=30, bordermode=INSIDE, x=430, y=13)
self.split_button = Button(self.ttsplit, text='Split', command=lambda: self.split(), state=DISABLED)
self.split_button.place(width=80, height=30, bordermode=INSIDE, x=490, y=13)
# Linear Regression
105 self.linreg = LabelFrame(self.window, text='Linear Regression', bg=self.color)
106 self.linreg.place(width=580, height=80, bordermode=OUTSIDE, x=20, y=300)
107
self.linreg_pred = Button(self.linreg, text='Predict', command=lambda: self.pred_linreg(), state=
DISABLED)
108
109 self.linreg_pred.place(width=125, height=30, bordermode=INSIDE, x=8, y=13)
110
self.coefficients = Button(self.linreg, text='Coefficients', command=lambda: self.coeff(), state=
DISABLED)
111
112 self.coefficients.place(width=125, height=30, bordermode=INSIDE, x=153, y=13)
113
self.scatter_button = Button(self.linreg, text='Scatter Plot', command=lambda: self.scatter(), state=
DISABLED)
114
115 self.scatter_button.place(width=125, height=30, bordermode=INSIDE, x=298, y=13)
116
self.linreg_error = Button(self.linreg, text='Error', command=lambda: self.errors_linreg(), state=
DISABLED)
117
118 self.linreg_error.place(width=125, height=30, bordermode=INSIDE, x=443, y=13)
119
120 # Logistic Regression
121 self.logreg = LabelFrame(self.window, text='Logistic Regression', bg=self.color)
122 self.logreg.place(width=580, height=80, bordermode=OUTSIDE, x=20, y=400)
123
self.logreg_pred = Button(self.logreg, text='Predict', command=lambda: self.pred_logreg(), state=
DISABLED)
124
125 self.logreg_pred.place(width=125, height=30, bordermode=INSIDE, x=8, y=13)
126
self.logreg_cm = Button(self.logreg, text='Confusion Matrix', command=lambda: self.cm_logreg(),
state=DISABLED)
127
128 self.logreg_cm.place(width=125, height=30, bordermode=INSIDE, x=153, y=13)
129
self.logreg_cr = Button(self.logreg, text='Classification Report', command=lambda: self.cr_logreg(),
state=DISABLED)
130
131 self.logreg_cr.place(width=125, height=30, bordermode=INSIDE, x=298, y=13)
132
self.logreg_error = Button(self.logreg, text='Error', command=lambda: self.errors_logreg(), state=
DISABLED)
133
134 self.logreg_error.place(width=125, height=30, bordermode=INSIDE, x=443, y=13)
135
136 # Decision Tree
137 self.dtree = LabelFrame(self.window, text='Decision Tree', bg=self.color)
138 self.dtree.place(width=580, height=80, bordermode=OUTSIDE, x=20, y=500)
139
self.dtree_pred = Button(self.dtree, text='Predict', command=lambda: self.pred_dtree(), state=
DISABLED)
140
141 self.dtree_pred.place(width=125, height=30, bordermode=INSIDE, x=8, y=13)
142
self.dtree_cm = Button(self.dtree, text='Confusion Matrix', command=lambda: self.cm_dtree(),
state=DISABLED)
143
144 self.dtree_cm.place(width=125, height=30, bordermode=INSIDE, x=153, y=13)
145
self.dtree_cr = Button(self.dtree, text='Classification Report', command=lambda: self.cr_dtree(),
state=DISABLED)
146
147 self.dtree_cr.place(width=125, height=30, bordermode=INSIDE, x=298, y=13)
148
self.dtree_error = Button(self.dtree, text='Error', command=lambda: self.errors_dtree(), state=
DISABLED)
149
150 self.dtree_error.place(width=125, height=30, bordermode=INSIDE, x=443, y=13)
151
152 # Random Forest
153 self.rforest = LabelFrame(self.window, text='Random Forest', bg=self.color)
154 self.rforest.place(width=580, height=80, bordermode=OUTSIDE, x=20, y=600)
155
self.rforest_pred = Button(self.rforest, text='Predict', command=lambda: self.pred_rforest(), state=
DISABLED)
156
157 self.rforest_pred.place(width=125, height=30, bordermode=INSIDE, x=8, y=13)
158
self.rforest_cm = Button(self.rforest, text='Confusion Matrix', command=lambda: self.cm_rforest()
, state=DISABLED)
159
160 self.rforest_cm.place(width=125, height=30, bordermode=INSIDE, x=153, y=13)
161
self.rforest_cr = Button(self.rforest, text='Classification Report', command=lambda: self.cr_rforest
(), state=DISABLED)
162
163 self.rforest_cr.place(width=125, height=30, bordermode=INSIDE, x=298, y=13)
164
self.rforest_error = Button(self.rforest, text='Error', command=lambda: self.errors_rforest(), state=
DISABLED)
165
166 self.rforest_error.place(width=125, height=30, bordermode=INSIDE, x=443, y=13)
167
168 self.window.mainloop()
169
170 def select(self):
171 try:
172 self.data = pd.read_csv(self.name.get())
173 self.df_show['state'] = NORMAL
174 self.df_hide['state'] = NORMAL
175 self.name_entry['state'] = DISABLED
176 self.name_select['state'] = DISABLED
177 self.select_x['state'] = NORMAL
178 except FileNotFoundError:
179 self.name.set('Invalid')
180
181 def create_table(self):
182 try:
183 self.table.window.deiconify()
184 except AttributeError:
185 if self.data.shape[0] > 50:
186 self.table = Table(self.data.head(50), self.window, self.name.get())
187 else:
188 self.table = Table(self.data, self.window, self.name.get())
189 except TclError:
190 if self.data.shape[0] > 50:
191 self.table = Table(self.data.head(50), self.window, self.name.get())
192 else:
193 self.table = Table(self.data, self.window, self.name.get())
194
195 def hide(self):
196 try:
197 self.table.window.withdraw()
198 except TclError:
199 return
200 except AttributeError:
201 return
202
203 def get_x(self):
204 self.selection_x = SelectionX(self.window, self.data)
205 self.X = []
206 for i in range(len(self.data.columns)):
207 if self.selection_x.variables[i].get() == 1:
208 self.X.append(self.data.columns[i])
209
210 self.select_x['state'] = DISABLED
211 self.select_y['state'] = NORMAL
212
213 def get_y(self):
214 self.selection_y = SelectionY(self.window, self.data)
215 self.y = self.data.columns[self.selection_y.variable.get()]
216 if self.y not in self.X:
217 self.split_button['state'] = NORMAL
218 self.select_y['state'] = DISABLED
219
220 def split(self):
221 test_size = 0.25
222 try:
223 test_size = float(self.test_size.get())
224 if test_size <= 0 or test_size >= 1:
225 test_size = 0.25
226 except ValueError:
227 test_size = 0.25
228 self.test_size.set('0.25')
229 random_state = None
230 if self.rstate.get() != 'None':
231 try:
232 random_state = int(self.rstate.get())
233 except ValueError:
234 random_state = None
235 self.rstate.set('None')
236
self.X_train_l, self.X_test_l, self.y_train_l, self.y_test_l = train_test_split(self.data[self.X], self.data[
self.y], test_size=test_size, random_state=random_state)
237
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.data[self.X], self.le.
fit_transform(self.data[self.y]), test_size=test_size, random_state=random_state)
238
239
240 self.linreg_pred['state'] = NORMAL
241 self.coefficients['state'] = DISABLED
242 self.scatter_button['state'] = DISABLED
243 self.linreg_error['state'] = DISABLED
244
245 self.logreg_pred['state'] = NORMAL
246 self.logreg_cr['state'] = DISABLED
247 self.logreg_cm['state'] = DISABLED
248 self.logreg_error['state'] = DISABLED
249
250 self.dtree_pred['state'] = NORMAL
251 self.dtree_cr['state'] = DISABLED
252 self.dtree_cm['state'] = DISABLED
253 self.dtree_error['state'] = DISABLED
254
255 self.rforest_pred['state'] = NORMAL
256 self.rforest_cm['state'] = DISABLED
257 self.rforest_cr['state'] = DISABLED
258 self.rforest_error['state'] = DISABLED
259
260 def pred_linreg(self):
261 self.linreg_model = LinearRegression()
262 self.linreg_model.fit(self.X_train_l, self.y_train_l)
263 self.linreg_predictions = self.linreg_model.predict(self.X_test_l)
264
265 self.linreg_error['state'] = NORMAL
266 self.scatter_button['state'] = NORMAL
267 self.coefficients['state'] = NORMAL
268
269 def scatter(self):
270 Scatter(self.window, self.y_test_l, self.linreg_predictions)
271
272 def coeff(self):
273 Coefficients(self.window, self.linreg_model.intercept_, self.linreg_model.coef_, self.X)
274
275 def errors_linreg(self):
temp = [mean_absolute_error(self.y_test, self.linreg_predictions), mean_squared_error(self.y_test
, self.linreg_predictions), np.sqrt(mean_squared_error(self.y_test, self.linreg_predictions))]
276
277 Errors(self.window, temp, 'Linear Regression')
278
279 def pred_logreg(self):
280 self.logreg_model = LogisticRegression(solver='liblinear')
281 self.logreg_model.fit(self.X_train, self.y_train)
282 self.logreg_predictions = self.logreg_model.predict(self.X_test)
283
284 self.logreg_cr['state'] = NORMAL
285 self.logreg_cm['state'] = NORMAL
286 self.logreg_error['state'] = NORMAL
287
288 def cm_logreg(self):
ConfusionMatrix(self.window, confusion_matrix(self.le.inverse_transform(self.y_test), self.le.
inverse_transform(self.logreg_predictions)), 'Logistic Regression', self.le.classes_)
289
290
291 def cr_logreg(self):
ClassificationReport(self.window, classification_report(self.le.inverse_transform(self.y_test), self.
le.inverse_transform(self.logreg_predictions)), 'Logistic Regression')
292
293
294 def errors_logreg(self):
temp = [mean_absolute_error(self.y_test, self.logreg_predictions), mean_squared_error(self.
y_test, self.logreg_predictions), np.sqrt(mean_squared_error(self.y_test, self.logreg_predictions))]
295
296 Errors(self.window, temp, 'Logistic Regression')
297
298 def pred_dtree(self):
299 self.dtree_model = DecisionTreeClassifier()
300 self.dtree_model.fit(self.X_train, self.y_train)
301 self.dtree_predictions = self.dtree_model.predict(self.X_test)
302
303 self.dtree_cr['state'] = NORMAL
304 self.dtree_cm['state'] = NORMAL
305 self.dtree_error['state'] = NORMAL
306
307 def cm_dtree(self):
ConfusionMatrix(self.window, confusion_matrix(self.le.inverse_transform(self.y_test), self.le.
inverse_transform(self.dtree_predictions)), 'Decision Tree', self.le.classes_)
308
309
310 def cr_dtree(self):
ClassificationReport(self.window, classification_report(self.le.inverse_transform(self.y_test), self.
le.inverse_transform(self.dtree_predictions)), 'Decision Tree')
311
312
313 def errors_dtree(self):
temp = [mean_absolute_error(self.y_test, self.dtree_predictions), mean_squared_error(self.y_test
, self.dtree_predictions), np.sqrt(mean_squared_error(self.y_test, self.dtree_predictions))]
314
315 Errors(self.window, temp, 'Decision Tree')
316
317 def pred_rforest(self):
318 self.rforest_model = RandomForestClassifier(n_estimators=100)
319 self.rforest_model.fit(self.X_train, self.y_train)
320 self.rforest_predictions = self.rforest_model.predict(self.X_test)
321
322 self.rforest_cr['state'] = NORMAL
323 self.rforest_cm['state'] = NORMAL
324 self.rforest_error['state'] = NORMAL
325
326 def cm_rforest(self):
ConfusionMatrix(self.window, confusion_matrix(self.le.inverse_transform(self.y_test), self.le.
inverse_transform(self.rforest_predictions)), 'Random Forest', self.le.classes_)
327
328
329 def cr_rforest(self):
ClassificationReport(self.window, classification_report(self.le.inverse_transform(self.y_test), self.
le.inverse_transform(self.rforest_predictions)), 'Random Forest')
330
331
332 def errors_rforest(self):
temp = [mean_absolute_error(self.y_test, self.rforest_predictions), mean_squared_error(self.
y_test, self.rforest_predictions), np.sqrt(mean_squared_error(self.y_test, self.rforest_predictions))]
333
334 Errors(self.window, temp, 'Random Forest')
335
336
337 class Table:
338 def __init__(self, data, master, name):
339 self.master = master
340 self.window = Toplevel(self.master)
341 self.data = data
342 self.name = name
343 self.window.title(self.name)
344 self.window.geometry('600x600')
345 self.window.minsize(250, 250)
346
347 self.frame = Frame(self.window)
348 self.frame.pack(expand=True, fill=BOTH)
349
350 self.canvas = Canvas(self.frame, background='white')
351
352 self.h_scroll = Scrollbar(self.frame, orient=HORIZONTAL, command=self.canvas.xview)
353 self.h_scroll.pack(side=BOTTOM, fill=X)
354 self.v_scroll = Scrollbar(self.frame, orient=VERTICAL, command=self.canvas.yview)
355 self.v_scroll.pack(side=RIGHT, fill=Y)
356
357 self.canvas['xscrollcommand'] = self.h_scroll.set
358 self.canvas['yscrollcommand'] = self.v_scroll.set
359 self.canvas.pack(expand=True, fill=BOTH)
360
361 self.label_frame = LabelFrame(self.canvas)
362 self.canvas.create_window((0, 0), window=self.label_frame, anchor=N + W)
363
364 self.shape = (data.shape[0], data.shape[1])
365
366 Table.add_label(self, 0, 0, '#', font=('Helvetica', 15, 'bold'))
367 for j in range(self.shape[1]):
368 Table.add_label(self, 0, j + 1, self.data.columns[j], font=('Helvetica', 12, 'bold'))
369 self.height = 20
370 for i in range(self.shape[0]):
371 Table.add_label(self, i + 1, 0, str(i + 1))
372 ar = data.iloc[i].values
373 for j in range(len(ar)):
374 Table.add_label(self, i + 1, j + 1, ar[j])
375 self.window.update()
376 self.canvas.configure(scrollregion=self.label_frame.bbox(ALL))
377
378 def add_label(self, i, j, text, font=('Helvetica', 10)):
379 if j % 2 == 0:
380 color = 'white'
381 else:
382 color = 'antique white'
383 label = Label(self.label_frame, text=text, font=font, bg=color)
384 label.grid(row=i, column=j, sticky=E+N+W+S)
385
386
387 class SelectionX:
388 def __init__(self, master, data):
389 self.master = master
390 self.data = data
391 self.columns = self.data.columns
392 self.variables = [IntVar() for _ in range(len(self.columns))]
393
394 self.window = Toplevel(self.master)
395 self.window.grab_set()
396 self.window.title('Independent Variables')
397 self.window.geometry('400x400')
398 self.window.minsize(250, 250)
399
400 self.frame = Frame(self.window)
401 self.frame.pack(expand=True, fill=BOTH)
402
403 self.canvas = Canvas(self.frame, background='antique white')
404
405 self.v_scroll = Scrollbar(self.frame, orient=VERTICAL, command=self.canvas.yview)
406 self.v_scroll.pack(side=RIGHT, fill=Y)
407
408 self.canvas['yscrollcommand'] = self.v_scroll.set
409 self.canvas.pack(expand=True, fill=BOTH)
410
411 self.frame2 = Frame(self.canvas, bg='antique white')
412 self.canvas.create_window((0, 0), window=self.frame2, anchor=N + W)
413
414 for i in range(len(self.columns)):
Checkbutton(self.frame2, variable=self.variables[i], text=self.columns[i], bg='antique white').
pack(anchor=N+W)
415
416
self.all = Button(self.canvas, text='Select All', height=2, width=10, command=lambda: self.select_all
())
417
418 self.all.pack(anchor=E, padx=20, pady=20)
419
self.none = Button(self.canvas, text='Select None', height=2, width=10, command=lambda: self.
select_none())
420
421 self.none.pack(anchor=E, padx=20, pady=0)
422
self.none = Button(self.canvas, text='Confirm', height=2, width=10, command=lambda: self.
confirm())
423
424 self.none.pack(anchor=E, padx=20, pady=20)
425
426 self.window.update()
427
428 self.canvas.configure(scrollregion=self.canvas.bbox(ALL))
429
430 self.window.mainloop()
431
432 def select_all(self):
433 for i in self.variables:
434 i.set(1)
435
436 def select_none(self):
437 for i in self.variables:
438 i.set(0)
439
440 def confirm(self):
441 self.window.grab_release()
442 self.window.quit()
443 self.window.destroy()
444
445
446 class SelectionY:
447 def __init__(self, master, data):
448 self.master = master
449 self.data = data
450 self.columns = self.data.columns
451 self.variable = IntVar()
452
453 self.window = Toplevel(self.master)
454 self.window.grab_set()
455 self.window.title('Dependent Variables')
456 self.window.geometry('400x400')
457 self.window.minsize(250, 250)
458
459 self.frame = Frame(self.window)
460 self.frame.pack(expand=True, fill=BOTH)
461
462 self.canvas = Canvas(self.frame, background='antique white')
463
464 self.v_scroll = Scrollbar(self.frame, orient=VERTICAL, command=self.canvas.yview)
465 self.v_scroll.pack(side=RIGHT, fill=Y)
466
467 self.canvas['yscrollcommand'] = self.v_scroll.set
468 self.canvas.pack(expand=True, fill=BOTH)
469
470 self.frame2 = Frame(self.canvas, bg='antique white')
471 self.canvas.create_window((0, 0), window=self.frame2, anchor=N + W)
472
473 for i in range(len(self.columns)):
Radiobutton(self.frame2, variable=self.variable, value=i, text=self.columns[i], bg='antique white'
).pack(anchor=N+W)
474
475
self.none = Button(self.canvas, text='Confirm', height=2, width=10, command=lambda: self.
confirm())
476
477 self.none.pack(anchor=E, padx=20, pady=20)
478
479 self.canvas.configure(scrollregion=self.canvas.bbox(ALL))
480
481 self.window.mainloop()
482
483 def confirm(self):
484 self.window.grab_release()
485 self.window.quit()
486 self.window.destroy()
487
488
489 class ConfusionMatrix:
490 def __init__(self, master, data, name, labels):
491 self.data = data
492 self.master = master
493 self.name = name
494 self.labels = sorted(labels)
495
496 self.total = np.sum(self.data)
497
498 self.window = Toplevel(self.master)
499 self.window.title(self.name + ' Confusion Matrix')
500 self.window.resizable(False, False)
501
self.total_label = Label(self.window, text=f'Total = {self.total}', font=('Helvetica', 15, 'bold'), bg='
antique white')
502
503 self.total_label.grid(row=0, column=0, sticky=(N, S, E, W))
504
505 for i in range(len(self.labels)):
506 if i % 2 == 0:
507 color = 'white'
508 else:
509 color = 'antique white'
Label(self.window, text=f'Predicted\n{self.labels[i]}', font=('Helvetica', 15, 'bold'), bg=color).grid
(row=0, column=i+1, sticky=(N, S, E, W))
510
511
512 for i in range(len(self.labels)):
513 if i % 2 == 0:
514 color = 'white'
515 else:
516 color = 'antique white'
Label(self.window, text=f'Actual\n{self.labels[i]}', font=('Helvetica', 15, 'bold'), bg=color).grid(
row=i+1, column=0, sticky=(N, S, E, W))
517
518 for j in range(len(self.labels)):
519 color = ['grey90', 'grey80', 'grey70']
Label(self.window, text=str(self.data[i][j]), font=('Helvetica', 15, 'bold'), bg=color[(i + j) % 3]).
grid(row=i+1, column=j+1, sticky=(N, S, E, W))
520
521
522
523 class Errors:
524 def __init__(self, master, data, name):
525 self.master = master
526 self.data = data
527 self.name = name
528
529 self.window = Toplevel(self.master)
530 self.window.title(self.name + ' Errors')
531 self.window.geometry('500x180')
532 self.window.resizable(False, False)
533
534 self.frame = Frame(self.window)
535 self.frame.place(width=504, height=184, bordermode=OUTSIDE, x=0, y=0)
536
self.text1 = Label(self.frame, text='Mean Absolute Error :', font=('Helvetica', 15, 'bold'), bg='
antique white')
537
538 self.text1.place(width=260, height=60, bordermode=INSIDE, x=0, y=0)
539 self.text2 = Label(self.frame, text='Mean Squared Error :', font=('Helvetica', 15, 'bold'), bg='white')
540 self.text2.place(width=260, height=60, bordermode=INSIDE, x=0, y=60)
self.text3 = Label(self.frame, text='Root Mean Squared Error: ', font=('Helvetica', 15, 'bold'), bg='
antique white')
541
542 self.text3.place(width=260, height=60, bordermode=INSIDE, x=0, y=120)
543
544 self.value1 = Label(self.frame, text=str(data[0]), font=('Helvetica', 15, 'bold'), bg='antique white')
545 self.value1.place(width=240, height=60, bordermode=INSIDE, x=260, y=0)
546 self.value2 = Label(self.frame, text=str(data[1]), font=('Helvetica', 15, 'bold'), bg='white')
547 self.value2.place(width=240, height=60, bordermode=INSIDE, x=260, y=60)
548 self.value3 = Label(self.frame, text=str(data[2]), font=('Helvetica', 15, 'bold'), bg='antique white')
549 self.value3.place(width=240, height=60, bordermode=INSIDE, x=260, y=120)
550
551
552 class ClassificationReport:
553 def __init__(self, master, data, name):
554 self.master = master
555 self.data = data
556 self.name = name
557
558 self.window = Toplevel(self.master)
559 self.window.title(self.name + ' Classification Report')
560 self.window.configure(background='white')
561 self.window.resizable(False, False)
562 y = 0
563
Label(self.window, text='precision', font=('Helvetica', 15, 'bold'), anchor=E, bg='antique white').
place(width=100, height=50, bordermode=INSIDE, x=150, y=y)
564
Label(self.window, text='recall', font=('Helvetica', 15, 'bold'), anchor=E, bg='white').place(width=
100, height=50, bordermode=INSIDE, x=250, y=0)
565
Label(self.window, text='f1‐score', font=('Helvetica', 15, 'bold'), anchor=E, bg='antique white').
place(width=100, height=50, bordermode=INSIDE, x=350, y=y)
566
Label(self.window, text='support', font=('Helvetica', 15, 'bold'), anchor=E, bg='white').place(width
=100, height=50, bordermode=INSIDE, x=450, y=y)
567
568 y = y + 50
569
Label(self.window, bg='antique white').place(width=100, height=10, bordermode=INSIDE, x=150, y
=y)
570
Label(self.window, bg='antique white').place(width=100, height=10, bordermode=INSIDE, x=350, y
=y)
571
572 y = y + 10
573
574 self.ar = self.data.split('\n\n')[1:]
575 self.part1 = self.ar[0].split('\n')
576
577 for i in self.part1:
578 temp = i.split()
Label(self.window, text=temp[0], font=('Helvetica', 12, 'bold'), anchor=E, bg='white').place(width
=150, height=30, bordermode=INSIDE, x=0, y=y)
579
Label(self.window, text=temp[1], font=('Helvetica', 12), anchor=E, bg='antique white').place(
width=100, height=30, bordermode=INSIDE, x=150, y=y)
580
Label(self.window, text=temp[2], font=('Helvetica', 12), anchor=E, bg='white').place(width=100,
height=30, bordermode=INSIDE, x=250, y=y)
581
Label(self.window, text=temp[3], font=('Helvetica', 12), anchor=E, bg='antique white').place(
width=100, height=30, bordermode=INSIDE, x=350, y=y)
582
Label(self.window, text=temp[4], font=('Helvetica', 12), anchor=E, bg='white').place(width=100,
height=30, bordermode=INSIDE, x=450, y=y)
583
584 y = y + 30
585
Label(self.window, bg='antique white').place(width=100, height=20, bordermode=INSIDE, x=150, y
=y)
586
Label(self.window, bg='antique white').place(width=100, height=20, bordermode=INSIDE, x=350, y
=y)
587
588 y = y + 20
589
590 self.part2 = self.ar[1].split('\n')
591
592 for i in self.part2:
593 if i == '':
594 continue
595 temp = i.split()
Label(self.window, text=temp.pop(), font=('Helvetica', 12), anchor=E, bg='white').place(width=
100, height=30, bordermode=INSIDE, x=450, y=y)
596
Label(self.window, text=temp.pop(), font=('Helvetica', 12), anchor=E, bg='antique white').place(
width=100, height=30, bordermode=INSIDE, x=350, y=y)
597
598 if len(temp) != 1:
Label(self.window, text=temp.pop(), font=('Helvetica', 12), anchor=E, bg='white').place(width=
100, height=30, bordermode=INSIDE, x=250, y=y)
599
600 if len(temp) != 1:
Label(self.window, text=temp.pop(), font=('Helvetica', 12), anchor=E, bg='antique white').place
(width=100, height=30, bordermode=INSIDE, x=150, y=y)
601
602 else:
Label(self.window, bg='antique white').place(width=100, height=30, bordermode=INSIDE, x=
150, y=y)
603
Label(self.window, text=' '.join(temp), font=('Helvetica', 12, 'bold'), anchor=E, bg='white').place(
width=150, height=30, bordermode=INSIDE, x=0, y=y)
604
605 y = y + 30
606
607 self.window.geometry('550x'+str(y))
608
609
610 class Scatter:
611 def __init__(self, master, y_test, pred):
612 self.master = master
613 self.y_test = y_test
614 self.pred = pred
615
616 self.window = Toplevel(self.master)
617 self.window.title('Scatter Plot (y_test vs predictions)')
618 self.window.configure(background='white')
619 self.window.resizable(False, False)
620
621 self.figure = Figure(figsize=(5, 5), dpi=100)
622 self.sub = self.figure.add_subplot(111)
623 self.sub.scatter(self.y_test, self.pred, edgecolor='black')
624 self.sub.plot()
625
626 self.canvas = FigureCanvasTkAgg(self.figure, master=self.window)
627 self.canvas.get_tk_widget().pack()
628 self.canvas.draw()
629
630
631 class Coefficients:
632 def __init__(self, master, intercept, coef, columns):
633 self.master = master
634 self.intercept = intercept
635 self.coef = coef
636 self.columns = columns
637
638 self.window = Toplevel(self.master)
639 self.window.title('Intercept and Coefficients')
640 self.window.configure(background='white')
641 self.window.resizable(False, False)
642
self.intercept_label = Label(self.window, text='Intercept :', font=('Helvetica', 15, 'bold'), bg='
antique white')
643
644 self.intercept_label.grid(row=0, column=0, sticky=(N, S, E, W))
645 self.intercept_value = Label(self.window, text=str(self.intercept), font=('Helvetica', 15), bg='white')
646 self.intercept_value.grid(row=0, column=1, sticky=(N, S, E, W))
647
648 self.coefs = Label(self.window, text='Coefficients', font=('Helvetica', 15, 'bold'), bg='white')
649 self.coefs.grid(row=1, column=0, columnspan=2, sticky=(N, S, E, W))
650
651 for i in range(len(self.coef)):
Label(self.window, text=self.columns[i], font=('Helvetica', 12), bg='antique white').grid(row=i+2,
column=0, sticky=(N, S, E, W))
652
Label(self.window, text=str(self.coef[i]), font=('Helvetica', 12), bg='white').grid(row=i+2, column=
1, sticky=(N, S, E, W))
653
654
655
656 if __name__ == '__main__':
657 MachineLearning() | [
"[email protected]"
] | |
a94bd8b5497a0c76c0e2d552e57e1fbcfae2cd6f | 8f436dff6c0681a673d517a1973b6f6b9a43674e | /liberapay/testing/mangopay.py | 4239d88976094ed3b87124cb95f85d07e308d40d | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | ddai00bit/liberapay.com | fc483c9b18dcc016bac84f5b4ccf397a3cb25214 | 78c5eb910877e936b91d1dae274b8cf1f82f3191 | refs/heads/master | 2023-04-05T21:44:45.641171 | 2021-05-04T07:28:31 | 2021-05-04T07:28:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,193 | py | import itertools
from unittest import mock
from mangopay.resources import (
BankAccount, CardRegistration, NaturalUser, Wallet,
)
import requests
from liberapay.i18n.currencies import Money
from liberapay.models.exchange_route import ExchangeRoute
from liberapay.testing import Harness
from liberapay.testing.vcr import use_cassette
class MangopayHarness(Harness):
def setUp(self):
Harness.setUp(self)
self.david = self.make_participant(
'david', mangopay_user_id=self.david_id,
mangopay_wallet_id=self.david_wallet_id, email='[email protected]'
)
self.janet = self.make_participant(
'janet', mangopay_user_id=self.janet_id,
mangopay_wallet_id=self.janet_wallet_id, email='[email protected]'
)
self.janet_route = ExchangeRoute.insert(
self.janet, 'mango-cc', self.card_id, 'chargeable', currency='EUR'
)
self.homer = self.make_participant(
'homer', mangopay_user_id=self.homer_id,
mangopay_wallet_id=self.homer_wallet_id, email='[email protected]'
)
self.homer_route = ExchangeRoute.insert(
self.homer, 'mango-ba', self.bank_account.Id, 'chargeable'
)
def fake_transfer(tr):
tr.Status = 'SUCCEEDED'
tr.ErrorCoce = '000000'
tr.ErrorMessage = None
tr.Id = -1
def fake_wallet(w):
w.Balance = Money.ZEROS[w.Currency]
w.Id = -next(FakeTransfersHarness.wallet_id_serial)
class FakeTransfersHarness(Harness):
wallet_id_serial = itertools.count(1000000)
def setUp(self):
super().setUp()
self.transfer_patch = mock.patch('mangopay.resources.Transfer.save', autospec=True)
_mock = self.transfer_patch.__enter__()
_mock.side_effect = fake_transfer
self.transfer_mock = _mock
self.wallet_patch = mock.patch('mangopay.resources.Wallet.save', autospec=True)
_mock = self.wallet_patch.__enter__()
_mock.side_effect = fake_wallet
self.wallet_mock = _mock
def tearDown(self):
self.transfer_patch.__exit__(None, None, None)
self.wallet_patch.__exit__(None, None, None)
super().tearDown()
def make_mangopay_account(FirstName):
account = NaturalUser()
account.FirstName = FirstName
account.LastName = 'Foobar'
account.CountryOfResidence = 'BE'
account.Nationality = 'BE'
account.Birthday = 0
account.Email = '[email protected]'
account.save()
return account.Id
def make_wallet(mangopay_user_id):
w = Wallet()
w.Owners = [mangopay_user_id]
w.Description = 'test wallet'
w.Currency = 'EUR'
w.save()
return w
def create_card(mangopay_user_id):
cr = CardRegistration()
cr.UserId = mangopay_user_id
cr.Currency = 'EUR'
cr.CardType = 'CB_VISA_MASTERCARD'
cr.save()
data = dict(
accessKeyRef=cr.AccessKey,
cardNumber='3569990000000132',
cardExpirationDate='1234',
cardCvx='123',
data=cr.PreregistrationData,
)
cr.RegistrationData = requests.post(cr.CardRegistrationURL, data).text
cr.save()
return cr
with use_cassette('MangopayOAuth'):
import mangopay
mangopay.get_default_handler().auth_manager.get_token()
with use_cassette('MangopayHarness'):
cls = MangopayHarness
cls.david_id = make_mangopay_account('David')
cls.david_wallet_id = make_wallet(cls.david_id).Id
cls.janet_id = make_mangopay_account('Janet')
cls.janet_wallet_id = make_wallet(cls.janet_id).Id
cr = create_card(cls.janet_id)
cls.card_id = cr.CardId
del cr
cls.homer_id = make_mangopay_account('Homer')
cls.homer_wallet_id = make_wallet(cls.homer_id).Id
ba = BankAccount(user_id=cls.homer_id, type='IBAN')
ba.OwnerName = 'Homer Jay'
ba.OwnerAddress = {
'AddressLine1': 'Somewhere',
'City': 'The City of Light',
'PostalCode': '75001',
'Country': 'FR',
}
ba.IBAN = 'FR1420041010050500013M02606'
ba.save()
cls.bank_account = ba
ba = BankAccount()
ba.Type = 'IBAN'
ba.IBAN = 'IR861234568790123456789012'
cls.bank_account_outside_sepa = ba
| [
"[email protected]"
] | |
316a57fe50150f51e9655515eaec2356b5cbcff5 | 8f64d50494507fd51c0a51010b84d34c667bd438 | /BeautyForMe/myvenv/Lib/site-packages/phonenumbers/shortdata/region_GU.py | 05be0b455abfd583e4469c62b75308e3f386e1f1 | [
"MIT"
] | permissive | YooInKeun/CAU_CSE_Capstone_3 | 5a4a61a916dc13c8635d25a04d59c21279678477 | 51405c4bed2b55661aa0708c8acea17fe72aa701 | refs/heads/master | 2022-12-11T15:39:09.721019 | 2021-07-27T08:26:04 | 2021-07-27T08:26:04 | 207,294,862 | 6 | 1 | MIT | 2022-11-22T04:52:11 | 2019-09-09T11:37:13 | Python | UTF-8 | Python | false | false | 654 | py | """Auto-generated file, do not edit by hand. GU metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_GU = PhoneMetadata(id='GU', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='9\\d\\d', possible_length=(3,)),
toll_free=PhoneNumberDesc(national_number_pattern='911', example_number='911', possible_length=(3,)),
emergency=PhoneNumberDesc(national_number_pattern='911', example_number='911', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='911', example_number='911', possible_length=(3,)),
short_data=True)
| [
"[email protected]"
] | |
35e1031be1362e0bcb23587c0b39087847e40de3 | db053c220094368ecb784fbe62375378c97457c2 | /680.valid-palindrome-ii.py | f8da057ab7cd5e88321a11b6221d0afbf1d7bfce | [] | no_license | thegamingcoder/leetcode | 8c16e7ac9bda3e34ba15955671a91ad072e87d94 | 131facec0a0c70d319982e78e772ed1cb94bc461 | refs/heads/master | 2020-03-22T14:51:45.246495 | 2018-07-09T00:00:06 | 2018-07-09T00:00:06 | 140,211,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | #
# [680] Valid Palindrome II
#
# https://leetcode.com/problems/valid-palindrome-ii/description/
#
# algorithms
# Easy (32.37%)
# Total Accepted: 34.1K
# Total Submissions: 105.4K
# Testcase Example: '"aba"'
#
#
# Given a non-empty string s, you may delete at most one character. Judge
# whether you can make it a palindrome.
#
#
# Example 1:
#
# Input: "aba"
# Output: True
#
#
#
# Example 2:
#
# Input: "abca"
# Output: True
# Explanation: You could delete the character 'c'.
#
#
#
# Note:
#
# The string will only contain lowercase characters a-z.
# The maximum length of the string is 50000.
#
#
#
class Solution(object):
def validPalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
| [
"[email protected]"
] | |
85aee261c4e212bd790c51b226e4a375b1524019 | 26ea89d8f679629e59ba6798d5e7b7c443eac2d7 | /express_checkout/tests/__init__.py | a0c28330d3dcdafccb19afb3757ac9c51315ec5a | [] | no_license | jobiols/jeo | f91d4dcefcef73deb60d07ef59ee7065cdbfcbfa | 41e02a8363d15a3a54d3c481fe9c65c18ea4be84 | refs/heads/8.0 | 2020-12-11T14:20:57.885362 | 2019-09-07T16:29:27 | 2019-09-07T16:29:27 | 49,533,283 | 2 | 0 | null | 2017-10-19T00:32:19 | 2016-01-12T22:33:05 | Python | UTF-8 | Python | false | false | 993 | py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2016 jeo Software (http://www.jeosoft.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################################
import test_express_checkout
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"[email protected]"
] | |
acd12224b507826a13418f4571a4bf7e1932ceaa | 74ace85cc5b5e721f6c2433153277c60135f356a | /jlm/src/jlm/tests/conftest.py | 467723d5465a376bcfd3f986602b9c1e1fd87ea7 | [
"MIT"
] | permissive | tkf/JuliaManager.jl | c24839777bf8d11bf72eeeaf0d0fe5d59715c1fe | be4586e0965a7beb6248ea503ac48ac3d43ec0f0 | refs/heads/master | 2020-05-03T16:59:36.062145 | 2020-02-10T08:47:38 | 2020-02-10T08:47:38 | 178,736,172 | 9 | 2 | MIT | 2020-02-10T08:47:40 | 2019-03-31T20:04:01 | Python | UTF-8 | Python | false | false | 311 | py | import pytest # type: ignore
from .. import cli
from .testing import changingdir
@pytest.fixture
def cleancwd(tmp_path):
newcwd = tmp_path / "cleancwd"
with changingdir(newcwd):
yield newcwd
@pytest.fixture
def initialized(cleancwd):
cli.run(["--verbose", "init"])
return cleancwd
| [
"[email protected]"
] | |
076707f145a54563bd0cbe046327482dd9339a70 | 0728513cfd064b8f6c130d42ad8ef79f49b6b9b2 | /test/test_tpc_gain.py | 49c8104d09f97361636986a1b645e67262dc1a47 | [] | no_license | XENONnT/pmts-api-client | 7e70574e45c3e1e639b066513c7f07047ac4dd30 | 2b1025fc6cec01726e2d555f609c148891c6d879 | refs/heads/master | 2022-12-10T02:04:12.942994 | 2020-09-27T15:39:09 | 2020-09-27T15:39:09 | 276,297,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,886 | py | # coding: utf-8
"""
XENON PMT API
API for the XENON PMT database # noqa: E501
The version of the OpenAPI document: 1.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import xepmts
from xepmts.models.tpc_gain import TpcGain # noqa: E501
from xepmts.rest import ApiException
class TestTpcGain(unittest.TestCase):
"""TpcGain unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test TpcGain
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = xepmts.models.tpc_gain.TpcGain() # noqa: E501
if include_optional :
return TpcGain(
detector = 'tpc',
experiment = 'xenonnt',
run_id = '0',
timestamp = 56,
pmt_index = 56,
gain = 1.337,
gain_err = 1.337,
gain_stat_err = 1.337,
gain_sys_err = 1.337,
voltage = 1.337,
occupancy = 1.337,
occupancy_err = 1.337,
id = '0'
)
else :
return TpcGain(
detector = 'tpc',
experiment = 'xenonnt',
run_id = '0',
pmt_index = 56,
gain = 1.337,
gain_err = 1.337,
)
def testTpcGain(self):
"""Test TpcGain"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
838191e594abd158113cbf1be59f22fbe13cc79a | 31aa2380ea51c98f2bd14c43e83766090fff81d8 | /src/dropbot_chip_qc/video.py | d2393c39fafcd4219699441ba4caeda3295d052e | [
"BSD-3-Clause"
] | permissive | MIKA-SSS/dropbot-chip-qc | ee9a25999bdc0b1cb99bd6d45ff6105aff943a3d | e5944b88c0d423163f55a3f49ebf84bb27e229bc | refs/heads/master | 2023-03-17T04:43:39.576126 | 2019-10-24T17:15:12 | 2019-10-24T17:15:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,551 | py | # -*- encoding: utf-8 -*-
from __future__ import print_function, absolute_import, unicode_literals
import logging
import threading
import time
import blinker
import numpy as np
import pandas as pd
import pyzbar.pyzbar as pyzbar
try:
import cv2
except ImportError:
raise Exception('Error: OpenCv is not installed')
from .async import asyncio, show_chip
# XXX The `device_corners` device AruCo marker locations in the normalized
# video frame were determined empirically.
delta = 2 * 45
device_height = 480 - 3.925 * delta
corner_indices = [
(1, 'top-right'),
(1, 'top-left'),
(0, 'top-left'),
(0, 'top-right'),
]
def bbox_corners(x, y, width, height):
return pd.DataFrame([(x, y), (x + delta, y), (x + delta, y + 1.5 * delta), (x, y + 1.5 * delta)],
columns=['x', 'y'],
index=['top-left', 'bottom-left', 'bottom-right', 'top-right'], # Top/bottom of top plate
dtype='float32')
x_zoom_delta = 50
y_zoom_delta = 45
y_zoom_offset = -37.5
device_corners = pd.concat((bbox_corners(x, y, delta, delta)
for x, y in
# Top/bottom of top-plate
[(640 + x_zoom_delta,
y_zoom_offset + 480 - delta -
.5 * device_height + y_zoom_delta),
(-delta - x_zoom_delta,
y_zoom_offset + .5 * device_height -
y_zoom_delta)]),
keys=range(2))
device_corners.loc[1, :] = np.roll(device_corners.loc[1].values, -4)
device_corners /= 640, 480
class FPS(object):
def __init__(self):
self._times = []
def update(self):
self._times.append(time.time())
self._times = self._times[-10:]
@property
def framerate(self):
if len(self._times) > 1:
return 1 / np.diff(self._times).mean()
else:
return 0.
def chip_video_process(signals, width=1920, height=1080, device_id=0):
'''
Continuously monitor webcam feed for DMF chip.
Repeatedly perform the following tasks:
- read video frame from the webcam
- detect AruCo markers in the frame, and draw overlay to indicate markers
(if available)
- apply perspective correction based on detected AruCo marker positions
(if applicable)
- detect chip UUID from QR code (if available)
- combine raw video frame and perspective-corrected frame into a single
frame
- write the chip UUID as text in top-left corner of the combined video
frame
Layout of the combined video frame::
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ ┃
┃ Raw video frame (AruCo markers highlighted) ┃
┃ ┃
┃ ┃
┠┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┨
┃ ┃
┃ Perspective-corrected video frame ┃
┃ based on AruCo markers ┃
┃ ┃
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛
Parameters
----------
signals : blinker.Namespace
The following signals are sent::
- ``frame-ready``: video frame is ready; keyword arguments include::
- ``frame``: combined video frame
- ``raw_frame``: raw frame from webcam
- ``warped``: perspective-corrected frame
- ``transform``: perspective-correction transformation matrix
- ``fps``: rate of frame processing in frames per second
- ``chip_uuid``: UUID currently detected chip (``None`` if no chip is
detected)
- ``closed``: process has been closed (in response to a
``exit-request`` signal).
- ``chip-detected``: new chip UUID has been detected
- ``chip-removed``: chip UUID no longer detected
width : int, optional
Video width.
height : int, optional
Video height.
device_id : int, optional
OpenCV video source id (starts at zero).
'''
capture = cv2.VideoCapture(device_id)
# Set format to MJPG (instead of YUY2) to _dramatically_ improve frame
# rate. For example, using Logitech C920 camera, frame rate increases from
# 10 FPS to 30 FPS (not including QR code detection, warping, etc.).
#
# See: https://github.com/opencv/opencv/issues/9084#issuecomment-324477425
fourcc_int = np.fromstring(bytes('MJPG'), dtype='uint8').view('uint32')[0]
capture.set(cv2.CAP_PROP_FOURCC, fourcc_int)
capture.set(cv2.CAP_PROP_AUTOFOCUS, True)
capture.set(cv2.CAP_PROP_FRAME_WIDTH, width)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
if capture.isOpened(): # try to get the first frame
frame_captured, frame = capture.read()
else:
raise IOError('No frame.')
corners_by_id = {}
start = time.time()
frame_count = 0
# Transformation matrix for perspective-corrected device view.
M = None
# Counter to debounce detection of missing chip; helps prevent spurious
# `chip-detected`/`chip-removed` events where chip has not actually moved.
not_detected_count = 0
decodedObjects = []
exit_requested = threading.Event()
chip_detected = threading.Event()
fps = 1
signals.signal('exit-request').connect(lambda sender: exit_requested.set(),
weak=False)
# Font used for UUID label.
font = cv2.FONT_HERSHEY_SIMPLEX
fps = FPS()
while frame_captured and not exit_requested.is_set():
# Find barcodes and QR codes
if not chip_detected.is_set():
decodedObjects = pyzbar.decode(frame)
if decodedObjects:
chip_detected.decoded_objects = decodedObjects
chip_detected.set()
# Find font scale to fit UUID to width of frame.
text = chip_detected.decoded_objects[0].data
scale = 4
thickness = 1
text_size = cv2.getTextSize(text, font, scale, thickness)
while text_size[0][0] > frame.shape[0]:
scale *= .95
text_size = cv2.getTextSize(text, font, scale, thickness)
chip_detected.label = {'uuid': text, 'scale': scale,
'thickness': 1, 'text_size': text_size}
signals.signal('chip-detected')\
.send('chip_video_process',
decoded_objects=chip_detected.decoded_objects)
logging.info('chip detected: `%s`',
chip_detected.decoded_objects[0].data)
detect_dict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_1000)
corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(frame,
detect_dict)
cv2.aruco.drawDetectedMarkers(frame, corners, ids)
corners_by_id_i = (dict(zip(ids[:, 0], corners)) if ids is not None
else {})
updated = False
for i in range(2):
if i in corners_by_id_i:
corners_list_i = corners_by_id.setdefault(i, [])
corners_list_i.append(corners_by_id_i[i])
del corners_list_i[:-5]
updated = True
if updated and all(i in corners_by_id_i for i in range(2)):
not_detected_count = 0
mean_corners = pd.concat((pd.DataFrame(np.array(corners_by_id[i])
.mean(axis=0)[0],
columns=['x', 'y'],
index=['top-left',
'top-right',
'bottom-right',
'bottom-left'])
for i in range(2)), keys=range(2))
M = cv2.getPerspectiveTransform(mean_corners.loc[corner_indices]
.values,
(device_corners.loc[corner_indices]
* frame.shape[:2][::-1]).values)
elif chip_detected.is_set():
M = None
not_detected_count += 1
if M is None and not_detected_count >= 10:
not_detected_count = 0
# AruCo markers have not been detected for the previous 10 frames;
# assume chip has been removed.
chip_detected.clear()
signals.signal('chip-removed').send('chip_video_process')
if M is not None:
warped = cv2.warpPerspective(frame, M, frame.shape[:2][::-1])
else:
warped = frame
display_frame = np.concatenate([frame, warped])
display_frame = cv2.resize(display_frame,
tuple(np.array(display_frame.shape[:2]) /
2))
if chip_detected.is_set():
kwargs = chip_detected.label.copy()
cv2.putText(display_frame, kwargs['uuid'],
(10, 10 + kwargs['text_size'][0][-1]), font,
kwargs['scale'], (255,255,255),
kwargs['thickness'], cv2.LINE_AA)
chip_uuid = chip_detected.label['uuid']
else:
chip_uuid = None
signals.signal('frame-ready').send('chip_video_process',
frame=display_frame, transform=M,
raw_frame=frame, warped=warped,
fps=fps, chip_uuid=chip_uuid)
frame_captured, frame = capture.read()
fps.update()
# When everything done, release the capture
capture.release()
signals.signal('closed').send('chip_video_process')
def main(signals=None, resolution=(1280, 720), device_id=0):
'''
Launch chip webcam monitor thread and view window.
'''
if signals is None:
signals = blinker.Namespace()
thread = threading.Thread(target=chip_video_process,
args=(signals, resolution[0], resolution[1],
device_id))
thread.start()
loop = asyncio.get_event_loop()
# Launch window to view chip video.
loop.run_until_complete(show_chip(signals))
# Close background thread.
signals.signal('exit-request').send('main')
| [
"[email protected]"
] | |
3cacf1d37e787bfb185abf4a6735e3618ff9d9a5 | 2491df3f643539e6055bb0b2a4b659474c57491f | /computeFactorial.py | 6c4b4ad44d45c903c4df51a2cc44c0863dc5ec5f | [] | no_license | ghilbing/Ejemplos | 85efc91346028b8a3d26d7680d9286b26234c771 | 339a45ef48c9a61002a01f7c823cc42d34fab409 | refs/heads/master | 2021-05-13T13:58:33.010157 | 2018-02-26T20:44:44 | 2018-02-26T20:44:44 | 116,724,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | def factorial(A):
if A <= 1:
return 1
else:
A = A * factorial(A-1)
return A
A = 6
print factorial(A) | [
"[email protected]"
] | |
cf276eaa8c48568cb7e03f69d2a7c6e9aa282b40 | c1e7082dc5a3e667f5e6c373670a7971dceeb4fa | /gym/spaces/graph.py | 2f393c2c470626a669d8316c1d33ba6b81759e6d | [
"MIT"
] | permissive | thomascherickal/gym | afd8ef9817bc9f7f52b6e29f1bf94f7ce448e9c6 | 53d784eafed28d31ec41c36ebd9eee14b0dc6d41 | refs/heads/master | 2022-09-26T19:59:16.286645 | 2022-09-16T20:40:07 | 2022-09-16T20:40:07 | 161,881,517 | 2 | 1 | NOASSERTION | 2020-10-13T08:47:43 | 2018-12-15T07:33:35 | Python | UTF-8 | Python | false | false | 9,756 | py | """Implementation of a space that represents graph information where nodes and edges can be represented with euclidean space."""
from typing import NamedTuple, Optional, Sequence, Tuple, Union
import numpy as np
from gym.logger import warn
from gym.spaces.box import Box
from gym.spaces.discrete import Discrete
from gym.spaces.multi_discrete import MultiDiscrete
from gym.spaces.space import Space
class GraphInstance(NamedTuple):
"""A Graph space instance.
* nodes (np.ndarray): an (n x ...) sized array representing the features for n nodes, (...) must adhere to the shape of the node space.
* edges (Optional[np.ndarray]): an (m x ...) sized array representing the features for m nodes, (...) must adhere to the shape of the edge space.
* edge_links (Optional[np.ndarray]): an (m x 2) sized array of ints representing the two nodes that each edge connects.
"""
nodes: np.ndarray
edges: Optional[np.ndarray]
edge_links: Optional[np.ndarray]
class Graph(Space):
r"""A space representing graph information as a series of `nodes` connected with `edges` according to an adjacency matrix represented as a series of `edge_links`.
Example usage::
self.observation_space = spaces.Graph(node_space=space.Box(low=-100, high=100, shape=(3,)), edge_space=spaces.Discrete(3))
"""
def __init__(
self,
node_space: Union[Box, Discrete],
edge_space: Union[None, Box, Discrete],
seed: Optional[Union[int, np.random.Generator]] = None,
):
r"""Constructor of :class:`Graph`.
The argument ``node_space`` specifies the base space that each node feature will use.
This argument must be either a Box or Discrete instance.
The argument ``edge_space`` specifies the base space that each edge feature will use.
This argument must be either a None, Box or Discrete instance.
Args:
node_space (Union[Box, Discrete]): space of the node features.
edge_space (Union[None, Box, Discrete]): space of the node features.
seed: Optionally, you can use this argument to seed the RNG that is used to sample from the space.
"""
assert isinstance(
node_space, (Box, Discrete)
), f"Values of the node_space should be instances of Box or Discrete, got {type(node_space)}"
if edge_space is not None:
assert isinstance(
edge_space, (Box, Discrete)
), f"Values of the edge_space should be instances of None Box or Discrete, got {type(node_space)}"
self.node_space = node_space
self.edge_space = edge_space
super().__init__(None, None, seed)
@property
def is_np_flattenable(self):
"""Checks whether this space can be flattened to a :class:`spaces.Box`."""
return False
def _generate_sample_space(
self, base_space: Union[None, Box, Discrete], num: int
) -> Optional[Union[Box, MultiDiscrete]]:
if num == 0 or base_space is None:
return None
if isinstance(base_space, Box):
return Box(
low=np.array(max(1, num) * [base_space.low]),
high=np.array(max(1, num) * [base_space.high]),
shape=(num,) + base_space.shape,
dtype=base_space.dtype,
seed=self.np_random,
)
elif isinstance(base_space, Discrete):
return MultiDiscrete(nvec=[base_space.n] * num, seed=self.np_random)
else:
raise TypeError(
f"Expects base space to be Box and Discrete, actual space: {type(base_space)}."
)
def sample(
self,
mask: Optional[
Tuple[
Optional[Union[np.ndarray, tuple]],
Optional[Union[np.ndarray, tuple]],
]
] = None,
num_nodes: int = 10,
num_edges: Optional[int] = None,
) -> GraphInstance:
"""Generates a single sample graph with num_nodes between 1 and 10 sampled from the Graph.
Args:
mask: An optional tuple of optional node and edge mask that is only possible with Discrete spaces
(Box spaces don't support sample masks).
If no `num_edges` is provided then the `edge_mask` is multiplied by the number of edges
num_nodes: The number of nodes that will be sampled, the default is 10 nodes
num_edges: An optional number of edges, otherwise, a random number between 0 and `num_nodes`^2
Returns:
A NamedTuple representing a graph with attributes .nodes, .edges, and .edge_links.
"""
assert (
num_nodes > 0
), f"The number of nodes is expected to be greater than 0, actual value: {num_nodes}"
if mask is not None:
node_space_mask, edge_space_mask = mask
else:
node_space_mask, edge_space_mask = None, None
# we only have edges when we have at least 2 nodes
if num_edges is None:
if num_nodes > 1:
# maximal number of edges is `n*(n-1)` allowing self connections and two-way is allowed
num_edges = self.np_random.integers(num_nodes * (num_nodes - 1))
else:
num_edges = 0
if edge_space_mask is not None:
edge_space_mask = tuple(edge_space_mask for _ in range(num_edges))
else:
if self.edge_space is None:
warn(
f"The number of edges is set ({num_edges}) but the edge space is None."
)
assert (
num_edges >= 0
), f"Expects the number of edges to be greater than 0, actual value: {num_edges}"
assert num_edges is not None
sampled_node_space = self._generate_sample_space(self.node_space, num_nodes)
sampled_edge_space = self._generate_sample_space(self.edge_space, num_edges)
assert sampled_node_space is not None
sampled_nodes = sampled_node_space.sample(node_space_mask)
sampled_edges = (
sampled_edge_space.sample(edge_space_mask)
if sampled_edge_space is not None
else None
)
sampled_edge_links = None
if sampled_edges is not None and num_edges > 0:
sampled_edge_links = self.np_random.integers(
low=0, high=num_nodes, size=(num_edges, 2)
)
return GraphInstance(sampled_nodes, sampled_edges, sampled_edge_links)
def contains(self, x: GraphInstance) -> bool:
"""Return boolean specifying if x is a valid member of this space."""
if isinstance(x, GraphInstance):
# Checks the nodes
if isinstance(x.nodes, np.ndarray):
if all(node in self.node_space for node in x.nodes):
# Check the edges and edge links which are optional
if isinstance(x.edges, np.ndarray) and isinstance(
x.edge_links, np.ndarray
):
assert x.edges is not None
assert x.edge_links is not None
if self.edge_space is not None:
if all(edge in self.edge_space for edge in x.edges):
if np.issubdtype(x.edge_links.dtype, np.integer):
if x.edge_links.shape == (len(x.edges), 2):
if np.all(
np.logical_and(
x.edge_links >= 0,
x.edge_links < len(x.nodes),
)
):
return True
else:
return x.edges is None and x.edge_links is None
return False
def __repr__(self) -> str:
"""A string representation of this space.
The representation will include node_space and edge_space
Returns:
A representation of the space
"""
return f"Graph({self.node_space}, {self.edge_space})"
def __eq__(self, other) -> bool:
"""Check whether `other` is equivalent to this instance."""
return (
isinstance(other, Graph)
and (self.node_space == other.node_space)
and (self.edge_space == other.edge_space)
)
def to_jsonable(self, sample_n: NamedTuple) -> list:
"""Convert a batch of samples from this space to a JSONable data type."""
# serialize as list of dicts
ret_n = []
for sample in sample_n:
ret = {}
ret["nodes"] = sample.nodes.tolist()
if sample.edges is not None:
ret["edges"] = sample.edges.tolist()
ret["edge_links"] = sample.edge_links.tolist()
ret_n.append(ret)
return ret_n
def from_jsonable(self, sample_n: Sequence[dict]) -> list:
"""Convert a JSONable data type to a batch of samples from this space."""
ret = []
for sample in sample_n:
if "edges" in sample:
ret_n = GraphInstance(
np.asarray(sample["nodes"]),
np.asarray(sample["edges"]),
np.asarray(sample["edge_links"]),
)
else:
ret_n = GraphInstance(
np.asarray(sample["nodes"]),
None,
None,
)
ret.append(ret_n)
return ret
| [
"[email protected]"
] | |
f1edb501954b262818ad2951e48337e3c1f506aa | a5103b7d5066138ac1a9aabc273361491a5031cd | /daily/8/DeepLearning/myproject/beatifulFace/blend.py | bbc8d6693925aac1e83b1ac66618bd37ee1b3f74 | [] | no_license | mckjzhangxk/deepAI | 0fa2f261c7899b850a4ec432b5a387e8c5f13e83 | 24e60f24b6e442db22507adddd6bf3e2c343c013 | refs/heads/master | 2022-12-13T18:00:12.839041 | 2021-06-18T03:01:10 | 2021-06-18T03:01:10 | 144,862,423 | 1 | 1 | null | 2022-12-07T23:31:01 | 2018-08-15T14:19:10 | Jupyter Notebook | UTF-8 | Python | false | false | 4,159 | py | import cv2
import numpy as np
from collections import defaultdict
from scipy.sparse import csc_matrix
from scipy.sparse.linalg import spsolve,cg,eigsh
def gauss_pyramid(I):
ret=[I]
n=int(np.ceil(np.log2(min(I.shape[:2])//16)))
for i in range(1,n+1):
ret.append(cv2.pyrDown(ret[i-1]))
return ret
def laplacian_pyramid(gs):
ret=[gs[-1]]
n=len(gs)
for i in range(n-2,-1,-1):
g=gs[i]
H,W=g.shape[:2]
L=cv2.subtract(g,cv2.pyrUp(gs[i+1],dstsize=(W,H)))
ret.append(L)
ret.reverse()
return ret
def blend_laplician_pyramid(ls_a,ls_b,gs_mask):
final_la=[]
for m,la,lb in zip(gs_mask,ls_a,ls_b):
m=m[:,:,np.newaxis]
final_la.append(m*la+(1-m)*lb)
return final_la
def sum_laplacian_pyramid(ls):
ret=ls[-1]
n=len(ls)
for i in range(n-2,-1,-1):
L=ls[i]
H,W=L.shape[:2]
ret=cv2.add(L,cv2.pyrUp(ret,dstsize=(W,H)))
return ret
def blend(img_a,img_b,mask):
la_=laplacian_pyramid(gauss_pyramid(img_a))
lb_=laplacian_pyramid(gauss_pyramid(img_b))
g_mask=gauss_pyramid(mask)
return sum_laplacian_pyramid(blend_laplician_pyramid(la_,lb_,g_mask))
def isOMEGA(mask):
nz=np.nonzero(mask)
return set(zip(nz[1],nz[0]))
def getBoundary(mask):
kernel=np.ones((3,3),'int')
inside=cv2.erode(mask,kernel)
boundary=cv2.bitwise_xor(mask,inside)
return isOMEGA(boundary),boundary
def point2VectorIndex(pts):
return {(x[0],x[1]):i for i,x in enumerate(pts)}
def adj(x,y):
return [(x-1,y),(x+1,y),(x,y-1),(x,y+1)]
def grid_matrix_param(mask):
'''
:param mask:array(H,W) 0/1
:return:
data:(x,y,value)
N:矩阵的大小
T:key =矩阵的行索引, value=(x,y) 表示邻接点的坐标
'''
pts=isOMEGA(mask)
boundary_pts,_=getBoundary(mask)
dict_index=point2VectorIndex(pts)
N=len(pts)
data=[]
row=[]
col=[]
T=defaultdict(list)
def f(p):
pindex=dict_index[p]
data.append(4.0)
row.append(pindex)
col.append(pindex)
if p not in boundary_pts:
for q in adj(*p):
data.append(-1.0)
row.append(pindex)
col.append(dict_index[q])
else:
for q in adj(*p):
if q in pts:
data.append(-1.0)
row.append(pindex)
col.append(dict_index[q])
else:
T[pindex].append(q)
for _ in map(f,pts):pass
return (data,(row,col)),N,T,dict_index
def dict_index_to_array(data):
index,xs,ys=[],[],[]
for pts,i in data.items():
index.append(i)
xs.append(pts[0])
ys.append(pts[1])
return index,xs,ys
def process(source, target, mask):
data,N,T,dict_index=grid_matrix_param(mask)
indexes,xs,ys=dict_index_to_array(dict_index)
A = csc_matrix(data, dtype=float)
# Create B matrix
channels=source.shape[2]
b = np.zeros((N,channels), dtype=float)
b[indexes]=source[ys,xs]
for index,pts in T.items():
for p in pts:
b[index]+=target[p[1],p[0]]
composite = np.copy(target)
# x = spsolve(A, b)
for i in range(channels):
x=cg(A,b[:,i])
composite[ys,xs,i]=np.clip(x[0][indexes],0,255)
return composite
from datetime import datetime
if __name__ == '__main__':
mask=np.zeros((800,600),'uint8')
mask[30:130,70:150]=1
src=np.zeros((800,600,3),'uint8')
target=np.zeros((800,600,3),'uint8')
# omada=isOMEGA(mask)
#
# boundary,boundary_img=getBoundary(mask)
#
# for x,y in boundary:
# mask[y,x]=128
# d=point2VectorIndex(omada)
# print(len(d))
# print(boundary)
# data,N,T,dict_index=grid_matrix_param(mask)
# a,b,c=dict_index_to_array(dict_index)
# assert N==len(dict_index)
# for k,v in T.items():
# for vv in v:
# mask[vv[1],vv[0]]=128
# cv2.imshow('mask',mask*255)
# cv2.waitKey(0)
s=datetime.now()
sss=process(src,target,mask)
print(sss.dtype)
print(datetime.now()-s) | [
"[email protected]"
] | |
f727a53af8f9c8d1bfa78ce5468ab0fbad85aca9 | abc422f58ad053bcbb6653ba15b66e46d220a199 | /tcutils/pkgs/Traffic/traffic/utils/util.py | 61b1ab3bf3f6a08f841d5824248dd1046f7f4d8e | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | tungstenfabric/tf-test | d3efff59bca931b614d0008260b2c0881d1fc009 | 4b9eca7eb182e5530223131ecab09d3bdf366407 | refs/heads/master | 2023-02-26T19:14:34.345423 | 2023-01-11T08:45:18 | 2023-01-11T10:37:25 | 265,231,958 | 8 | 22 | null | 2023-02-08T00:53:29 | 2020-05-19T11:46:12 | Python | UTF-8 | Python | false | false | 299 | py | import socket
def is_v4(address):
try:
socket.inet_pton(socket.AF_INET, address)
except socket.error:
return False
return True
def is_v6(address):
try:
socket.inet_pton(socket.AF_INET6, address)
except socket.error:
return False
return True
| [
"[email protected]"
] | |
f010a4d16c12e85270a596fc2f31a8841ac64dc2 | 9a04de8acae6b9d5f134ab04ce4573acd05be10c | /facebook_pages/factories.py | 7b37712ec2d1dfb0311b86476d9e42424e912116 | [
"BSD-3-Clause"
] | permissive | bmcool/django-facebook-pages | 046fb5727008dc0f5bf20a6201006466e89bec1d | 44ae645c93a37e741ceda018daaa8def10acd1ad | refs/heads/master | 2021-01-18T07:48:13.249597 | 2013-06-09T13:37:16 | 2013-06-09T13:37:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | from models import Page
import factory
import random
class PageFactory(factory.Factory):
FACTORY_FOR = Page
graph_id = factory.Sequence(lambda n: n) | [
"[email protected]"
] | |
e2cd93ae33ad1783ad4ed4faeafd03fbf503f425 | 515a97129ce1b2b8eecca4b2087fde8985b82d5b | /Code-Scraps/old_modules/SpiceBot/Main/muricah.py | 703d9a78bcb6c74111c29fcabd8c8e38187eb98e | [] | no_license | SpiceBot/scraps | 3ad6e81ac75e2b6a684fea64eb7e75477b0f4f63 | 90125e1397b57ac87cae5f3e506363aa04ddffdc | refs/heads/master | 2020-05-02T21:51:01.297114 | 2019-03-28T15:38:28 | 2019-03-28T15:38:28 | 178,232,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,152 | py | #!/usr/bin/env python
# coding=utf-8
from __future__ import unicode_literals, absolute_import, print_function, division
import sopel.module
import sys
import os
moduledir = os.path.dirname(__file__)
shareddir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
sys.path.append(shareddir)
from BotShared import *
# author jimender2
@sopel.module.commands('muricah')
def mainfunction(bot, trigger):
enablestatus, triggerargsarray, botcom, instigator = spicebot_prerun(bot, trigger, 'muricah')
if not enablestatus:
# IF "&&" is in the full input, it is treated as multiple commands, and is split
commands_array = spicemanip(bot, triggerargsarray, "split_&&")
if commands_array == []:
commands_array = [[]]
for command_split_partial in commands_array:
triggerargsarray_part = spicemanip(bot, command_split_partial, 'create')
execute_main(bot, trigger, triggerargsarray_part, botcom, instigator)
def execute_main(bot, trigger, triggerargsarray, botcom, instigator):
msg = trigger.nick + " shoots a toaster or something."
osd(bot, trigger.sender, 'say', msg)
| [
"[email protected]"
] | |
31000cc65e540af23728898e88f779605b40c038 | 2cd0a84aefb8a7141d1c8da99845a8ada0cc009c | /tensorflow/python/ops/nn_grad.py | ec79476d6c56a844306f7fb61ef270da90c74545 | [
"Apache-2.0"
] | permissive | hholst80/tensorflow-old | d466cee96eac717524ab8e4ee85275ce28bb5d68 | 79df325975402e03df89747947ff5b7f18407c52 | refs/heads/master | 2022-12-20T22:07:40.427519 | 2016-05-13T09:57:24 | 2016-05-13T09:57:24 | 58,914,336 | 1 | 1 | Apache-2.0 | 2022-12-09T21:52:14 | 2016-05-16T08:00:04 | C++ | UTF-8 | Python | false | false | 12,062 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in nn_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import gen_nn_ops
@ops.RegisterGradient("Conv2DBackpropInput")
def _Conv2DBackpropGrad(op, grad):
"""The derivatives for deconvolution.
Args:
op: the Deconvolution op.
grad: the tensor representing the gradient w.r.t. the output
Returns:
the gradients w.r.t. the input and the filter
"""
return [None,
nn_ops.conv2d_backprop_filter(
grad, array_ops.shape(op.inputs[1]), op.inputs[2],
op.get_attr("strides"), op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"), op.get_attr("data_format")),
nn_ops.conv2d(
grad, op.inputs[1], op.get_attr("strides"),
op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))]
@ops.RegisterGradient("Softmax")
def _SoftmaxGrad(op, grad_softmax):
"""The derivative of the softmax nonlinearity.
We assume that probs is of shape [batch_size * dim]
The formula for dsoftmax / dx = (diag(softmax) - softmax * softmax').
This matrix is diagonal minus a rank one matrix, so it is easy to implement
as follows:
grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax
Args:
op: the Softmax op.
grad_softmax: the tensor representing the gradient w.r.t. the
softmax output.
Returns:
gradient w.r.t the input to the softmax
"""
# TODO(ilyasu): assert that the tensor has two dimensions at
# graph-construction time? Alternatively: do different things
# depending on the dimensionality of the input tensors.
softmax = op.outputs[0]
grad_x = ((grad_softmax -
array_ops.reshape(math_ops.reduce_sum(grad_softmax * softmax, [1]),
[-1, 1]))
* softmax)
return grad_x
@ops.RegisterGradient("BiasAdd")
def _BiasAddGrad(op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
return (received_grad, gen_nn_ops.bias_add_grad(out_backprop=received_grad,
data_format=data_format))
@ops.RegisterGradient("BiasAddV1")
def _BiasAddGradV1(unused_bias_op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
unused_bias_op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
reduction_dim_tensor = math_ops.range(array_ops.rank(received_grad) - 1)
return (received_grad, math_ops.reduce_sum(received_grad, reduction_dim_tensor))
@ops.RegisterGradient("Relu")
def _ReluGrad(op, grad):
return gen_nn_ops._relu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Relu6")
def _Relu6Grad(op, grad):
return gen_nn_ops._relu6_grad(grad, op.inputs[0])
@ops.RegisterGradient("Elu")
def _EluGrad(op, grad):
return gen_nn_ops._elu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Softplus")
def _SoftplusGrad(op, grad):
return gen_nn_ops._softplus_grad(grad, op.inputs[0])
@ops.RegisterGradient("Softsign")
def _SoftsignGrad(op, grad):
return gen_nn_ops._softsign_grad(grad, op.inputs[0])
@ops.RegisterGradient("ReluGrad")
def _ReluGradGrad(op, grad):
x = op.inputs[1]
return (gen_nn_ops._relu_grad(grad, x),
array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype))
def _BroadcastMul(vec, mat):
"""Multiply after broadcasting vec to match dimensions of mat.
Args:
vec: A 1-D tensor of dimension [D0]
mat: A 2-D tensor of dimension [D0, D1]
Returns:
A tensor of dimension [D0, D1], the result of vec * mat
"""
# Reshape vec to [D0, 1]
vec = array_ops.expand_dims(vec, -1)
return vec * mat
@ops.RegisterGradient("SoftmaxCrossEntropyWithLogits")
def _SoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
# grad_0 is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# There is no gradient for the labels
return _BroadcastMul(grad_0, op.outputs[1]), None
@ops.RegisterGradient("SparseSoftmaxCrossEntropyWithLogits")
def _SparseSoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
# grad_0 is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# There is no gradient for the labels
return _BroadcastMul(grad_0, op.outputs[1]), None
@ops.RegisterGradient("Conv2D")
def _Conv2DGrad(op, grad):
return [nn_ops.conv2d_backprop_input(array_ops.shape(op.inputs[0]),
op.inputs[1], grad,
op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format")),
nn_ops.conv2d_backprop_filter(op.inputs[0],
array_ops.shape(op.inputs[1]), grad,
op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))]
@ops.RegisterGradient("DepthwiseConv2dNative")
def _DepthwiseConv2dNativeGrad(op, grad):
return [
nn_ops.depthwise_conv2d_native_backprop_input(
array_ops.shape(op.inputs[0]), op.inputs[1], grad,
op.get_attr("strides"), op.get_attr("padding")),
nn_ops.depthwise_conv2d_native_backprop_filter(
op.inputs[0], array_ops.shape(op.inputs[1]), grad,
op.get_attr("strides"), op.get_attr("padding"))
]
@ops.RegisterGradient("LRN")
def _LRNGrad(op, grad):
depth_radius = op.get_attr("depth_radius")
bias = op.get_attr("bias")
alpha = op.get_attr("alpha")
beta = op.get_attr("beta")
return [gen_nn_ops._lrn_grad(grad, op.inputs[0], op.outputs[0],
depth_radius, bias, alpha, beta)]
@ops.RegisterGradient("AvgPool")
def _AvgPoolGrad(op, grad):
return gen_nn_ops._avg_pool_grad(array_ops.shape(op.inputs[0]), grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format")
)
@ops.RegisterGradient("MaxPool")
def _MaxPoolGrad(op, grad):
return gen_nn_ops._max_pool_grad(op.inputs[0], op.outputs[0], grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")
)
@ops.RegisterGradient("BatchNormWithGlobalNormalization")
def _BatchNormWithGlobalNormalizationGrad(op, grad):
"""Return the gradients for the 5 inputs of BatchNormWithGlobalNormalization.
We do not backprop anything for the mean and var intentionally as they are
not being trained with backprop in the operation.
Args:
op: The BatchNormOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the BatchNormOp.
Returns:
dx: Backprop for input, which is (grad * (g * rsqrt(v + epsilon)))
dm: Backprop for mean, which is
sum_over_rest(grad * g) * (-1 / rsqrt(v + epsilon))
dv: Backprop for variance, which is
sum_over_rest(grad * g * (x - m)) * (-1/2) * (v + epsilon) ^ (-3/2)
db: Backprop for beta, which is grad reduced in all except the
last dimension.
dg: Backprop for gamma, which is (grad * ((x - m) * rsqrt(v + epsilon)))
"""
dx, dm, dv, db, dg = gen_nn_ops._batch_norm_with_global_normalization_grad(
op.inputs[0], op.inputs[1], op.inputs[2], op.inputs[4], grad,
op.get_attr("variance_epsilon"), op.get_attr("scale_after_normalization"))
return dx, dm, dv, db, dg
@ops.RegisterGradient("L2Loss")
def _L2LossGrad(op, grad):
"""Return the gradients for L2Loss.
Args:
op: The L2LossOp for which we need to generate gradients.
grad: Tensor containing a single number.
Returns:
The gradient, which is (x * grad).
"""
return op.inputs[0] * grad
@ops.RegisterGradient("TopK")
@ops.RegisterGradient("TopKV2")
def _TopKGrad(op, grad, _):
"""Return the gradients for TopK.
Args:
op: The TopKOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the TopKOp.
Returns:
A list of two tensors, the first being the gradient w.r.t to the input and
TopK, and the second being the gradient w.r.t. to the indices (all zero).
"""
in_shape = array_ops.shape(op.inputs[0])
ind_shape = array_ops.shape(op.outputs[1])
ind_lastdim = array_ops.gather(ind_shape, array_ops.size(ind_shape) - 1)
# Flatten indices to 2D.
ind_2d = array_ops.reshape(op.outputs[1], array_ops.pack([-1, ind_lastdim]))
in_lastdim = array_ops.gather(in_shape, array_ops.size(in_shape) - 1)
outerdim = array_ops.shape(ind_2d)[0]
# Compute linear indices (flattened to 1D).
ind = array_ops.reshape(ind_2d + array_ops.expand_dims(
math_ops.range(0, outerdim * in_lastdim, in_lastdim), -1), [-1])
# Substitute grad to appropriate locations and fill the rest with zeros,
# finally reshaping it to the original input shape.
return [array_ops.reshape(
sparse_ops.sparse_to_dense(ind,
array_ops.reshape(
math_ops.reduce_prod(in_shape), [1]),
array_ops.reshape(grad, [-1]),
validate_indices=False),
in_shape), array_ops.zeros(
[1],
dtype=dtypes.int32)]
| [
"[email protected]"
] | |
6e63e02f7cb85f88fae930c14c63504884d425e5 | 163808746e51d378f69a966645b8bb8a855b4625 | /MyMain1012/MyMain1012/MyModules.py | 1044ab01075533ee8a21af408e08c251ab99f0f0 | [] | no_license | 0024thiroshi/comm5.0_fall_semester | 02b26b506b759dd7b18b963295a8908cb4a78245 | db350599b7085e56fbf2c316e74cd7a5b48f02b8 | refs/heads/main | 2023-02-12T13:07:34.080809 | 2021-01-13T06:03:04 | 2021-01-13T06:03:04 | 329,202,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,963 | py |
def getDF(file_name,sheet_name):
import pandas as pd
DF1=pd.read_excel(file_name,sheet_name=sheet_name)
return DF1
def getS(DF,n1):
import pandas as pd
S1=pd.Series(DF.iloc[:,n1])
return S1
def extractDF(DF,n1,n2):
DF2=DF.iloc[n1:n1+n2,:]
return DF2
def drawS(S1,S2):
import matplotlib.pyplot as plt
if len(S1)==len(S2):
plt.scatter(S1,S2)
plt.show()
else:
print("2つのSeriesのサイズが異なります")
def extractDFRow(DF,n1,n2):
DF2=DF.iloc[:,n1:n1+n2]
return DF2
def getDFAverage(DF):
import pandas as pd
a=[]
for i in range(len(DF)):
a.append(sum(DF.iloc[i])/len(DF.iloc[i]))
S1=pd.Series(a)
return S1
def get_corr(v1,v2):
import pandas as pd
V1=pd.Series(v1)
V2=pd.Series(v2)
d=V1.corr(V2)
return d
import pandas as pd
def compoundSeries(s1: pd.Series, s2:pd.Series)->pd.DataFrame:
df=pd.DataFrame([s1,s2])
return df
def get_sin(a: list, Nsample: int, time_step: float)->list:
import math
amp=[0]*Nsample
for i in range(len(a)):
for j in range(Nsample):
amp[j]+=(math.sin(2*math.pi*a[i]*j*time_step))
return amp
from scipy.signal import butter,lfilter
def butter_bandpass(lowcut,highcut, fs, order=5):
nyq = 0.5*fs
low = lowcut/nyq
high = highcut/nyq
b,a = butter(order,[low, high],btype='band')
return b,a
def butter_bandpass_filter(data,lowcut,highcut, fs, order=5):
b, a = butter_bandpass(lowcut,highcut, fs, order=5)
y = lfilter(b, a, data)
return y
def myConv(stim: list, base:list)->list:
import numpy as np
conv=np.convolve(stim,base)
return conv
def myConvError(stim:list, base:list, data:list)->float:
import numpy as np
conv=np.convolve(stim,base)
sum=0
for i in range(len(data)):
sum+=(data[i]-conv[i])**2
return sum
| [
"“[email protected]”"
] | |
7490de25b48546de6bcaab679534fa3ff4ee5100 | ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3 | /python/baiduads-sdk-auto/baiduads/platproduct/model/get_product_list_response_wrapper.py | 8718ac280f30c3ec5cc9496bb9a043f7910a4cfc | [
"Apache-2.0"
] | permissive | baidu/baiduads-sdk | 24c36b5cf3da9362ec5c8ecd417ff280421198ff | 176363de5e8a4e98aaca039e4300703c3964c1c7 | refs/heads/main | 2023-06-08T15:40:24.787863 | 2023-05-20T03:40:51 | 2023-05-20T03:40:51 | 446,718,177 | 16 | 11 | Apache-2.0 | 2023-06-02T05:19:40 | 2022-01-11T07:23:17 | Python | UTF-8 | Python | false | false | 11,668 | py | """
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from baiduads.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from baiduads.exceptions import ApiAttributeError
def lazy_import():
from baiduads.common.model.api_response_header import ApiResponseHeader
from baiduads.platproduct.model.get_product_list_response_wrapper_body import GetProductListResponseWrapperBody
globals()['ApiResponseHeader'] = ApiResponseHeader
globals()['GetProductListResponseWrapperBody'] = GetProductListResponseWrapperBody
class GetProductListResponseWrapper(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'header': (ApiResponseHeader,), # noqa: E501
'body': (GetProductListResponseWrapperBody,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'header': 'header', # noqa: E501
'body': 'body', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""GetProductListResponseWrapper - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
header (ApiResponseHeader): [optional] # noqa: E501
body (GetProductListResponseWrapperBody): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""GetProductListResponseWrapper - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
header (ApiResponseHeader): [optional] # noqa: E501
body (GetProductListResponseWrapperBody): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| [
"[email protected]"
] | |
fa9400116b1cf68b3c2af2c6480e3869053378ed | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2573/60719/278572.py | ef5e2025d2f5fb9f2a0e35de41649f4a13d5b420 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | def handle_each_use_case():
return 2 ** (int(input())-1)
num = int(input())
for i in range(num):
res = handle_each_use_case()
print(res) | [
"[email protected]"
] | |
abf2176a428b1c4899142a03ef98e32f7dd5ecda | c5becab2d4201f2e828d052c22b4496a3bbe4927 | /src/transformers/models/mobilevit/image_processing_mobilevit.py | b600009c2eada9b13a028d77806e7096118d795a | [
"Apache-2.0"
] | permissive | thomwolf/transformers | ba665c456b2acd636d8e3876a87ea446ae0ae092 | 166dfa88e5dfdca1d99197e5006e4e2ea9e49cba | refs/heads/master | 2023-03-08T03:37:13.519336 | 2023-02-15T15:00:01 | 2023-02-15T15:00:01 | 238,908,404 | 4 | 1 | Apache-2.0 | 2023-02-25T16:09:30 | 2020-02-07T11:40:04 | Python | UTF-8 | Python | false | false | 16,736 | py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image processor class for MobileViT."""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from transformers.utils import is_torch_available, is_torch_tensor, is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, get_resize_output_image_size, rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
def flip_channel_order(image: np.ndarray, data_format: Optional[ChannelDimension]) -> np.ndarray:
"""
Flip the color channels from RGB to BGR or vice versa.
Args:
image (`np.ndarray`):
The image, represented as a numpy array.
data_format (`ChannelDimension`, *`optional`*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
Returns:
`np.ndarray`: The image with the flipped color channels.
"""
input_data_format = infer_channel_dimension_format(image)
if input_data_format == ChannelDimension.LAST:
image = image[..., ::-1]
elif input_data_format == ChannelDimension.FIRST:
image = image[:, ::-1, ...]
else:
raise ValueError(f"Invalid input channel dimension format: {input_data_format}")
if data_format is not None:
image = to_channel_dimension_format(image, data_format)
return image
class MobileViTImageProcessor(BaseImageProcessor):
r"""
Constructs a MobileViT image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in the `preprocess` method.
size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
Controls the size of the output image after resizing. Can be overridden by the `size` parameter in the
`preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Defines the resampling filter to use if resizing the image. Can be overridden by the `resample` parameter
in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to crop the input at the center. If the input size is smaller than `crop_size` along any edge, the
image is padded with 0's and then center cropped. Can be overridden by the `do_center_crop` parameter in
the `preprocess` method.
crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 256, "width": 256}`):
Desired output size `(size["height"], size["width"])` when applying center-cropping. Can be overridden by
the `crop_size` parameter in the `preprocess` method.
do_flip_channel_order (`bool`, *optional*, defaults to `True`):
Whether to flip the color channels from RGB to BGR. Can be overridden by the `do_flip_channel_order`
parameter in the `preprocess` method.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_resize: bool = True,
size: Dict[str, int] = None,
resample: PILImageResampling = PILImageResampling.BILINEAR,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_center_crop: bool = True,
crop_size: Dict[str, int] = None,
do_flip_channel_order: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"shortest_edge": 224}
size = get_size_dict(size, default_to_square=False)
crop_size = crop_size if crop_size is not None else {"height": 256, "width": 256}
crop_size = get_size_dict(crop_size, param_name="crop_size")
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_flip_channel_order = do_flip_channel_order
def resize(
self,
image: np.ndarray,
size: Dict[str, int],
resample: PILImageResampling = PIL.Image.BILINEAR,
data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image.
Args:
image (`np.ndarray`):
Image to resize.
size (`Dict[str, int]`):
Controls the size of the output image. The shortest edge of the image will be resized to
`size["shortest_edge"]` while maintaining the aspect ratio.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
"""
size = get_size_dict(size, default_to_square=False)
if "shortest_edge" not in size:
raise ValueError(f"The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}")
output_size = get_resize_output_image_size(image, size=size["shortest_edge"], default_to_square=False)
return resize(image, size=output_size, resample=resample, data_format=data_format, **kwargs)
def center_crop(
self,
image: np.ndarray,
size: Dict[str, int],
data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Center crop an image to size `(size["height], size["width"])`. If the input size is smaller than `size` along
any edge, the image is padded with 0's and then center cropped.
Args:
image (`np.ndarray`):
Image to center crop.
size (`Dict[str, int]`):
Size of the output image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
"""
size = get_size_dict(size)
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(image, size=(size["height"], size["width"]), data_format=data_format, **kwargs)
def rescale(
self,
image: np.ndarray,
scale: Union[int, float],
data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
):
"""
Rescale an image by a scale factor. image = image * scale.
Args:
image (`np.ndarray`):
Image to rescale.
scale (`int` or `float`):
Scale to apply to the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
"""
return rescale(image, scale=scale, data_format=data_format, **kwargs)
def flip_channel_order(
self, image: np.ndarray, data_format: Optional[Union[str, ChannelDimension]] = None
) -> np.ndarray:
"""
Flip the color channels from RGB to BGR or vice versa.
Args:
image (`np.ndarray`):
The image, represented as a numpy array.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
"""
return flip_channel_order(image, data_format=data_format)
def preprocess(
self,
images: ImageInput,
do_resize: bool = None,
size: Dict[str, int] = None,
resample: PILImageResampling = None,
do_rescale: bool = None,
rescale_factor: float = None,
do_center_crop: bool = None,
crop_size: Dict[str, int] = None,
do_flip_channel_order: bool = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
**kwargs,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image by rescale factor.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the center crop if `do_center_crop` is set to `True`.
do_flip_channel_order (`bool`, *optional*, defaults to `self.do_flip_channel_order`):
Whether to flip the channel order of the image.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
do_flip_channel_order = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size, param_name="crop_size")
images = make_list_of_images(images)
if not valid_images(images):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray."
)
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_resize:
images = [self.resize(image=image, size=size, resample=resample) for image in images]
if do_center_crop:
images = [self.center_crop(image=image, size=crop_size) for image in images]
if do_rescale:
images = [self.rescale(image=image, scale=rescale_factor) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
images = [self.flip_channel_order(image=image) for image in images]
images = [to_channel_dimension_format(image, data_format) for image in images]
data = {"pixel_values": images}
return BatchFeature(data=data, tensor_type=return_tensors)
def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple] = None):
"""
Converts the output of [`MobileViTForSemanticSegmentation`] into semantic segmentation maps. Only supports
PyTorch.
Args:
outputs ([`MobileViTForSemanticSegmentation`]):
Raw outputs of the model.
target_sizes (`List[Tuple]`, *optional*):
A list of length `batch_size`, where each item is a `Tuple[int, int]` corresponding to the requested
final size (height, width) of each prediction. If left to None, predictions will not be resized.
Returns:
`List[torch.Tensor]`:
A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width)
corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each
`torch.Tensor` correspond to a semantic class id.
"""
# TODO: add support for other frameworks
logits = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(logits) != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
if is_torch_tensor(target_sizes):
target_sizes = target_sizes.numpy()
semantic_segmentation = []
for idx in range(len(logits)):
resized_logits = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = logits.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| [
"[email protected]"
] | |
e028a43d424f376814f87e346f021b1ca842d883 | 6c898145b3581b87b76a2b16658ad1d0a2aeee4a | /demo4_redrect.py | ab84ca2e0e3c203f437ab67ac1b26e110626d070 | [] | no_license | Jasonmes/Flask-model | 080f3e44f64d7684c9fe1edf731cf7481615ea0f | 99f9ff9141434baedc7d048ac3bfb51134919591 | refs/heads/master | 2020-03-26T11:47:39.081133 | 2018-08-15T13:59:40 | 2018-08-15T13:59:40 | 144,860,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 656 | py | from flask import Flask,redirect,url_for
app = Flask(__name__)
@app.route('/')
def index():
"""
自定义状态码
返回的形式是一个元祖
:return:
"""
return "反向函数在调用index", 666
@app.route("/demo1")
def demo():
# 重定向到黑马官网
# 参数:重定向网页即可
return redirect("http://www.itheima.com")
@app.route('/demo2')
def demo2():
# 重定向自己的主页
# url_for 反向解析函数
# 作用url_for(函数名称)根据函数名称获取到这个视图函数对应的url
return redirect(url_for('index'))
if __name__ == '__main__':
app.run(debug=True) | [
"[email protected]"
] | |
74928f2f18abb1478e911324438ca62f5b05c88f | 9f059fd982f2c0a9d6a43cb4665b5adf0552c889 | /src/models/model.py | 0d66e2a7ab258eb5b83d9f4ecd74681b12da1539 | [] | no_license | yamad07/domain-transfer-network | 2a42de636febd4da0ceaacac32832a7f9605f820 | b767628f9afa6e760a0708dedd22e6a530cd730b | refs/heads/master | 2020-06-12T06:06:35.578911 | 2019-07-12T05:22:52 | 2019-07-12T05:22:52 | 194,216,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,262 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .layers.cnn import encoder_layer
from .layers.cnn import decoder_layer
from .layers.cnn import discriminator_layer
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.c1 = encoder_layer(3, 64, 3)
self.c2 = encoder_layer(64, 128, 3)
self.c3 = encoder_layer(128, 256, 3)
self.c4 = nn.Sequential(
nn.Conv2d(256,
128,
stride=2,
kernel_size=4,
padding=0,
),
nn.ReLU(inplace=True)
)
def forward(self, x):
batch_size = x.size(0)
h = self.c1(x)
h = self.c2(h)
h = self.c3(h)
h = self.c4(h)
h = h.view(batch_size, -1)
return h
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.conv_block1 = decoder_layer(128, 512, 4, 0)
self.conv_block2 = decoder_layer(512, 256, 4, 1)
self.conv_block3 = decoder_layer(256, 128, 4, 1)
self.conv4 = nn.ConvTranspose2d(128, 3, kernel_size=4,
stride=2, padding=1)
def forward(self, x):
batch_size = x.size(0)
x = x.view(batch_size, 128, 1, 1)
x = self.conv_block1(x)
x = self.conv_block2(x)
x = self.conv_block3(x)
x = self.conv4(x)
return x
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.conv_block1 = discriminator_layer(3, 128)
self.conv_block2 = discriminator_layer(128, 256)
self.conv_block3 = discriminator_layer(256, 512)
self.c4 = nn.Sequential(
nn.Conv2d(512,
3,
stride=2,
kernel_size=4,
padding=0,
),
nn.ReLU(inplace=True)
)
def forward(self, x):
batch_size = x.size(0)
h = self.conv_block1(x)
h = self.conv_block2(h)
h = self.conv_block3(h)
h = self.c4(h)
return F.log_softmax(h.view(batch_size, -1), dim=1)
| [
"[email protected]"
] | |
f18c3055fb82ab2adce6fe45db715962d9b8bc34 | 6c26a9bd075d3d54a307d7c1e5a0bc67b50df8c2 | /python_basics/python3/04_less_than.py | f7630bf4edcf6b730f1c11ee4f5d8c76607a9ec6 | [] | no_license | marialobillo/dataquest | 86efc49c0339c07e6263d428b5ecd2f80d395ecb | 49e8b653adf23a12fb9eb6a972d85bc1797dba0a | refs/heads/master | 2021-08-28T08:01:36.301087 | 2017-12-11T16:02:18 | 2017-12-11T16:02:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | print(crime_rates)
second_500 = (crime_rates[1] < 500)
second_371 = (crime_rates[1] <= 371
second_last = (crime_rates[1] <= crime_rates[len(crime_rates) - 1])
| [
"[email protected]"
] | |
4391865f95a88bc614dc1f2ea5a691b2ae243675 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-servicefabric/azure/servicefabric/models/paged_secret_resource_description_list.py | 8ec32f9fc767fa8832874709ee2fc8da16810dc3 | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 1,806 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PagedSecretResourceDescriptionList(Model):
"""The list of secret resources. The list is paged when all of the results
cannot fit in a single message. The next set of results can be obtained by
executing the same query with the continuation token provided in this list.
:param continuation_token: The continuation token parameter is used to
obtain next set of results. The continuation token is included in the
response of the API when the results from the system do not fit in a
single response. When this value is passed to the next API call, the API
returns next set of results. If there are no further results, then the
continuation token is not included in the response.
:type continuation_token: str
:param items: One page of the list.
:type items: list[~azure.servicefabric.models.SecretResourceDescription]
"""
_attribute_map = {
'continuation_token': {'key': 'ContinuationToken', 'type': 'str'},
'items': {'key': 'Items', 'type': '[SecretResourceDescription]'},
}
def __init__(self, **kwargs):
super(PagedSecretResourceDescriptionList, self).__init__(**kwargs)
self.continuation_token = kwargs.get('continuation_token', None)
self.items = kwargs.get('items', None)
| [
"[email protected]"
] | |
4ee25d36a93847380f36f2e3bf144325c47882a5 | d7e65c505573b90916a953d7a13d29a801c226f9 | /test.py | 418e12a1921b1086465a0f47ec8d2d2ecd6d9422 | [] | no_license | smartfile/client-js | 1f1e60c4fb758aff3b9e371a937e7aa2c83f8dbc | 6338a1442dc6298450ea1f6e15430cb4d1a092ec | refs/heads/master | 2021-01-17T11:28:05.853979 | 2016-05-31T15:07:06 | 2016-05-31T15:07:06 | 3,065,301 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,952 | py | #!/bin/env python
import os, string, cgi, time, webbrowser, threading, socket
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
JSON = '{ text: "This is the response." }'
PORT = 8000
class LaunchBrowser(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.start()
def run(self):
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect(('localhost', PORT))
s.shutdown(2)
break
except:
time.sleep(0.5)
webbrowser.open('file://%s' % os.path.join(os.getcwd(), 'test.html'))
class TestHandler(BaseHTTPRequestHandler):
def do_GET(self):
try:
try:
self.path, qs = self.path.split('?', 2)
qs = cgi.parse_qs(qs)
except ValueError:
qs = {}
if self.path == '/ajax/':
self.send_response(200)
self.send_header('Content-type', 'text/javascript')
self.send_header('Access-Control-Allow-Origin', self.headers.get('Origin', '*'))
self.send_header('Access-Control-Allow-Credentials', 'true')
self.end_headers()
if 'callback' in qs: #jsonp:
self.wfile.write('%s(%s);' % (qs['callback'][0], JSON))
else:
self.wfile.write(JSON)
return
except Exception, e:
self.send_error(500, str(e))
self.send_error(404, 'File Not Found: %s' % self.path)
def do_POST(self):
self.send_error(404, 'File Not Found: %s' % self.path)
def main():
try:
launch = LaunchBrowser()
server = HTTPServer(('localhost', PORT), TestHandler)
server.serve_forever()
except KeyboardInterrupt:
server.socket.close()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
42189d44df4bedda4aa9fd28ec1a2b8f5dd5d4fd | d993f821da125498b6dfb01792fcd24c83ae7e34 | /AllAboutDictionaries/DictionaryMethods.py | eb5648801932cd448a1ea6c71d34ab68bef54352 | [] | no_license | Arjuna1513/Python_Practice_Programs | 2c8370d927c8bade2d2b0b5bd0345c7d5f139202 | 7c72600d72f68afee62ee64be25d961822429aeb | refs/heads/master | 2020-06-24T02:36:03.186924 | 2019-07-25T14:31:02 | 2019-07-25T14:31:02 | 198,824,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | dict1 = {1:2, 3:4, 'a':'b', 5:{1:2}}
# print(dict1)
#
# print(len(dict1)) # prints length of dict1
#
# print(dict1.items()) # Returns a list of items with both key and value pairs and since the list is
# # returned we are able to iterate over it
#
# print(dict1.values()) # returns only list of values
#
# print(dict1.keys()) # returns list of keys
#
# print(dict1.get('a')) # returns value associated with key, if not found none is returned.
#
# print(dict1.copy()) # returns a copy of the dictionary
#
# dict2 = dict1.copy()
# print(dict2)
# print(dict1.popitem()) # popitem removes the last element
# print(dict1)
# print(dict1.pop('a')) # deletes the key, value pair of mentioned key
# print(dict1)
print(dict1.__getitem__('a')) # returns the value of key 'a'
print(dict1.__contains__('a')) # returns true if 'a' key is present else returns false
print(dict1.__delitem__('a')) # deleted the given item but wont return the deleted item. | [
"[email protected]"
] | |
ca3c3609c7fadfa9093e7241d467a95b7f74bf4e | 1346ea1f255d3586442c8fc1afc0405794206e26 | /알고리즘/day16/two_string.py | 48ecca480a31b18ae28d058cc47f4bd46267826e | [] | no_license | Yun-Jongwon/TIL | 737b634b6e75723ac0043cda9c4f9acbc2a24686 | a3fc624ec340643cdbf98974bf6e6144eb06a42f | refs/heads/master | 2020-04-12T00:41:03.985080 | 2019-05-01T07:55:25 | 2019-05-01T07:55:25 | 162,208,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | T=int(input())
for t in range(T):
num1,num2=map(int,input().split())
data1=list(map(int,input().split()))
data2=list(map(int,input().split()))
if len(data1)>len(data2):
short_data=data2
long_data=data1
else:
short_data=data1
long_data=data2
sum=-500
for i in range(len(long_data)-len(short_data)+1):
new_sum=0
for j in range(len(short_data)):
new_sum+=short_data[j]*long_data[j+i]
if new_sum>sum:
sum=new_sum
print(sum)
| [
"[email protected]"
] | |
f664f43615dfd3188c09cb82b2cee07f916100ce | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-network/azure/mgmt/network/v2019_02_01/models/virtual_network.py | c9cd60b38e95b54f4fe594909f1af0f04be05a36 | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 4,724 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualNetwork(Resource):
"""Virtual Network resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param address_space: The AddressSpace that contains an array of IP
address ranges that can be used by subnets.
:type address_space: ~azure.mgmt.network.v2019_02_01.models.AddressSpace
:param dhcp_options: The dhcpOptions that contains an array of DNS servers
available to VMs deployed in the virtual network.
:type dhcp_options: ~azure.mgmt.network.v2019_02_01.models.DhcpOptions
:param subnets: A list of subnets in a Virtual Network.
:type subnets: list[~azure.mgmt.network.v2019_02_01.models.Subnet]
:param virtual_network_peerings: A list of peerings in a Virtual Network.
:type virtual_network_peerings:
list[~azure.mgmt.network.v2019_02_01.models.VirtualNetworkPeering]
:param resource_guid: The resourceGuid property of the Virtual Network
resource.
:type resource_guid: str
:param provisioning_state: The provisioning state of the PublicIP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param enable_ddos_protection: Indicates if DDoS protection is enabled for
all the protected resources in the virtual network. It requires a DDoS
protection plan associated with the resource. Default value: False .
:type enable_ddos_protection: bool
:param enable_vm_protection: Indicates if VM protection is enabled for all
the subnets in the virtual network. Default value: False .
:type enable_vm_protection: bool
:param ddos_protection_plan: The DDoS protection plan associated with the
virtual network.
:type ddos_protection_plan:
~azure.mgmt.network.v2019_02_01.models.SubResource
:param etag: Gets a unique read-only string that changes whenever the
resource is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'address_space': {'key': 'properties.addressSpace', 'type': 'AddressSpace'},
'dhcp_options': {'key': 'properties.dhcpOptions', 'type': 'DhcpOptions'},
'subnets': {'key': 'properties.subnets', 'type': '[Subnet]'},
'virtual_network_peerings': {'key': 'properties.virtualNetworkPeerings', 'type': '[VirtualNetworkPeering]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'enable_ddos_protection': {'key': 'properties.enableDdosProtection', 'type': 'bool'},
'enable_vm_protection': {'key': 'properties.enableVmProtection', 'type': 'bool'},
'ddos_protection_plan': {'key': 'properties.ddosProtectionPlan', 'type': 'SubResource'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(VirtualNetwork, self).__init__(**kwargs)
self.address_space = kwargs.get('address_space', None)
self.dhcp_options = kwargs.get('dhcp_options', None)
self.subnets = kwargs.get('subnets', None)
self.virtual_network_peerings = kwargs.get('virtual_network_peerings', None)
self.resource_guid = kwargs.get('resource_guid', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.enable_ddos_protection = kwargs.get('enable_ddos_protection', False)
self.enable_vm_protection = kwargs.get('enable_vm_protection', False)
self.ddos_protection_plan = kwargs.get('ddos_protection_plan', None)
self.etag = kwargs.get('etag', None)
| [
"[email protected]"
] | |
8f1d1c60025749c1d3af208a4bd1b6b6cfc35348 | 94fb04ab0cb16fd180b6ef0ca22176dd31dea4f8 | /code@smart_irrigation.py | 007ab4961e728e9d563d1e1a4796bc2309d6224a | [] | no_license | SmartPracticeschool/llSPS-INT-2310-smart-irrigation-system-based-on-IOT-using-random-values-n-weather-api- | 97a5fda6e640767a9ee830a709240df57cbf9750 | 1de1e04929ef8ea052e7ed70acd97b87e77bdfab | refs/heads/master | 2022-11-04T00:49:22.602410 | 2020-06-17T14:05:48 | 2020-06-17T14:05:48 | 265,819,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,609 | py | import requests
import sys
import time
import ibmiotf.application
import ibmiotf.device
import random
r=requests.get('http://api.openweathermap.org/data/2.5/weather?q=Guntur,IN&appid=42a67b9e8ecd9620c2fe1471361c3e53')
#Provide your IBM Watson Device Credentials
organization = "w1gnzn"
deviceType = "raspberrypi"
deviceId = "123456"
authMethod = "token"
authToken = "123456789"
def myCommandCallback(cmd):
print("Command received: %s" % cmd.data['command'])
if cmd.data['command']=='motoron':
print("Motor is ON")
elif cmd.data['command']=='motoroff':
print("Motor is OFF")
try:
deviceOptions = {"org": organization, "type": deviceType, "id": deviceId, "auth-method": authMethod, "auth-token": authToken}
deviceCli = ibmiotf.device.Client(deviceOptions)
#..............................................
except Exception as e:
print("Caught exception connecting device: %s" % str(e))
sys.exit()
# Connect and send a datapoint "hello" with value "world" into the cloud as an event of type "greeting" 10 times
deviceCli.connect()
#print("response is")
#print(r.json())
#for i in r.json():
#print(i)
#print(r.json()["main"])
#print("temparature value:")
#print(r.json()["main"]["temp"])
while True:
print("humidity value:")
print(r.json()["main"]["humidity"])
hum=r.json()["main"]["humidity"]
temk=r.json()["main"]["temp"]
#print("temperature in kelvin is:",temk)
temperature=temk-272.15
print("temperature in celcius is:",temperature)
mois=random.randrange(20,60,2)
print("moisture level of soil is:",mois)
if(temperature>32 | mois<35):
req_sms=requests.get('https://www.fast2sms.com/dev/bulk?authorization=TPnud1eh5Bfyt2FpHoWXGwlC7NSsKYLmIz6MEvRi8a93jgAZbDDvuxwEg9eBdjmP7OLRpJ2MsIhoZ54a&sender_id=FSTSMS&message=Temperature,Moisture%20level%20of%20soil%20are%20improper&language=english&route=p&numbers=7075001212,9121852344')
data = { 'Temperature' : temperature, 'Moisture': mois, 'Humidity': hum }
#print (data)
def myOnPublishCallback():
print ("Published Temperature = %s C" % temperature, "Humidity = %s %%" % hum, "to IBM Watson")
success = deviceCli.publishEvent("Weather", "json", data, qos=0, on_publish=myOnPublishCallback)
if not success:
print("Not connected to IoTF")
time.sleep(2)
deviceCli.commandCallback = myCommandCallback
# Disconnect the device and application from the cloud
deviceCli.disconnect()
| [
"[email protected]"
] | |
034ff7aa8e6769c53f7c8c08a4bf5c226f1a1f80 | 48114b2186c96afce9a00c86eed8739853e8a71e | /eptools/gspread_utils.py | 6ab72a98450912f2e91365ff769292cf14ce4630 | [
"MIT"
] | permissive | PythonSanSebastian/ep-tools | 78b299eca763cc345da15e2984d7d08e67dc0c8d | d9a0e3c1d97df9f8bd94023e150b568e5619a482 | refs/heads/master | 2021-01-20T21:57:06.463661 | 2018-05-31T09:46:22 | 2018-05-31T09:46:22 | 51,786,311 | 0 | 0 | null | 2016-02-15T21:15:50 | 2016-02-15T21:15:50 | null | UTF-8 | Python | false | false | 1,813 | py | """
Functions to access the data in google drive spreadsheets
"""
from docstamp.gdrive import (get_spreadsheet,
worksheet_to_dict)
def get_api_key_file():
""" Return the api_key_file path imported from the config.py file"""
try:
from .config import api_key_file
except:
raise ImportError('Could not find a path to the Google credentials file. '
'You can set it up permanently in the config.py file.')
else:
return api_key_file
def get_ws_data(api_key_file, doc_key, ws_tab_idx, header=None, start_row=1):
""" Return the content of the spreadsheet in the ws_tab_idx tab of
the spreadsheet with doc_key as a pandas DataFrame.
Parameters
----------
api_key_file: str
Path to the Google API key json file.
doc_key: str
ws_tab_idx: int
Index of the worksheet within the spreadsheet.
header: List[str]
List of values to assign to the header of the result.
start_row: int
Row index from where to start collecting the data.
Returns
-------
content: pandas.DataFrame
"""
import pandas as pd
spread = get_spreadsheet(api_key_file, doc_key)
ws = spread.get_worksheet(ws_tab_idx)
ws_dict = worksheet_to_dict(ws, header=header, start_row=start_row)
return pd.DataFrame(ws_dict)
def find_one_row(substr, df, col_name):
""" Return one row from `df`. The returned row has in `col_name` column
a value with a sub-string as `substr.
Raise KeyError if no row is found.
"""
for name in df[col_name]:
if substr.lower() in name.lower():
return df[df[col_name] == name]
raise KeyError('Could not find {} in the '
'pandas dataframe.'.format(substr))
| [
"[email protected]"
] | |
4827119c0da3a1ec929ea1870f9ff11d5289f6df | 1b461ec82c8dd1099021ce3a32a7f649fa970226 | /1.Python_basics/00. First_steps.py | de81272da5e7285c2ecc00f70c4e38d5bd64453f | [] | no_license | AdamSierzan/Learn-to-code-in-Python-3-basics | 9df20c80c33f40da8800d257ee2ec05881198419 | ef298bcba72250e19080283cb81dbecf6a245563 | refs/heads/master | 2022-11-06T00:48:17.413322 | 2020-06-16T20:52:08 | 2020-06-16T20:52:08 | 250,247,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | msg = "Hello World"
print(msg)
x = 2
y = 7232
sum = x + y
print(sum)
x = 3
print(x)
x = 2*y
y = 69780
print(x)
print(y)
print(x)
x = 2*y
print(x)
print"hello world"
help | [
"[email protected]"
] | |
291d6c66a8448ced95fc18bbfadb84c49f58a446 | 323716a35ee2b649031ec8a09b196b8e7b833e8d | /lab9/hhback/api/migrations/0001_initial.py | a18a2d38b64a756ff8b961f72e74525684e761d8 | [] | no_license | Zhaisan/WebDev | 0377cec0c553900c5126794a8addc16e2e62b558 | 959ecf5b2e5032ccd2ab704b840e8f680dbcfc42 | refs/heads/main | 2023-05-27T17:24:17.026750 | 2021-05-31T15:02:15 | 2021-05-31T15:02:15 | 334,424,629 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,506 | py | # Generated by Django 2.1 on 2021-04-13 19:47
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=300)),
('description', models.TextField(default='')),
('city', models.CharField(max_length=100)),
('address', models.TextField()),
],
options={
'verbose_name': 'Company',
'verbose_name_plural': 'Companies',
},
),
migrations.CreateModel(
name='Vacancy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=300)),
('description', models.TextField(default='')),
('salary', models.FloatField(default='')),
('company', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='vacancies', to='api.Company')),
],
options={
'verbose_name': 'Vacancy',
'verbose_name_plural': 'Vacancies',
},
),
]
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.