blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6b1a3c6752fb987270983195e5489c049ddefa3c | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/file/formats/xar/XARUtil.pyi | cc17580bacec6487f3d70bd74261a9a2bf5e6add | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 765 | pyi | import ghidra.app.util.bin
import ghidra.program.model.listing
import java.lang
class XARUtil(object):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
@overload
@staticmethod
def isXAR(__a0: ghidra.app.util.bin.ByteProvider) -> bool: ...
@overload
@staticmethod
def isXAR(__a0: ghidra.program.model.listing.Program) -> bool: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
| [
"[email protected]"
] | |
7a73d23951e7d7d1345e584df9ae95b3a3cae6b7 | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/FourteenTeV/SmuonToMuonNeutralino_M_2000_ctau_10_cff.py | 4b9be99f8384f2be51bee014b5e1f7f05c0d660b | [] | no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 50,288 | py | COM_ENERGY = 14000. # GeV
# this is the closest thing I could find:
# https://twiki.cern.ch/twiki/bin/view/LHCPhysics/SUSYCrossSections13TeVslepslep
CROSS_SECTION = 1 # pb
CTAU_POINT = 1. # mm
SLHA_TABLE = """## Important note!
## This file has been modified by hand to give the gluino and the
## stop_1 a very narrow width, such that it can be used to try out
## the R-hadron machinery. It is not a realistic SUSY scenario.
##
##******************************************************************
## MadGraph/MadEvent *
##******************************************************************
## *
## param_card corresponding the SPS point 1a (by SoftSusy 2.0.5) *
## *
##******************************************************************
## Les Houches friendly file for the (MS)SM parameters of MadGraph *
## SM parameter set and decay widths produced by MSSMCalc *
##******************************************************************
##*Please note the following IMPORTANT issues: *
## *
##0. REFRAIN from editing this file by hand! Some of the parame- *
## ters are not independent. Always use a calculator. *
## *
##1. alpha_S(MZ) has been used in the calculation of the parameters*
## This value is KEPT by madgraph when no pdf are used lpp(i)=0, *
## but, for consistency, it will be reset by madgraph to the *
## value expected IF the pdfs for collisions with hadrons are *
## used. *
## *
##2. Values of the charm and bottom kinematic (pole) masses are *
## those used in the matrix elements and phase space UNLESS they *
## are set to ZERO from the start in the model (particles.dat) *
## This happens, for example, when using 5-flavor QCD where *
## charm and bottom are treated as partons in the initial state *
## and a zero mass might be hardwired in the model definition. *
## *
## The SUSY decays have calculated using SDECAY 1.1a *
## *
##******************************************************************
#
BLOCK DCINFO # Decay Program information
1 SDECAY # decay calculator
2 1.1a # version number
#
BLOCK SPINFO # Spectrum calculator information
1 SOFTSUSY # spectrum calculator
2 2.0.5 # version number
#
BLOCK MODSEL # Model selection
1 1 sugra
#
BLOCK SMINPUTS # Standard Model inputs
1 1.27934000E+02 # alpha_em^-1(M_Z)^MSbar
2 1.16637000E-05 # G_F [GeV^-2]
3 1.18000000E-01 # alpha_S(M_Z)^MSbar
4 9.11876000E+01 # M_Z pole mass
5 4.25000000E+00 # mb(mb)^MSbar
6 1.75000000E+02 # mt pole mass
7 1.77700000E+00 # mtau pole mass
#
BLOCK MINPAR # Input parameters - minimal models
1 1.00000000E+02 # m0
2 2.50000000E+02 # m12
3 1.00000000E+01 # tanb
4 1.00000000E+00 # sign(mu)
5 -1.00000000E+02 # A0
#
BLOCK MASS # Mass Spectrum
# PDG code mass particle
5 4.88991651E+00 # b-quark pole mass calculated from mb(mb)_Msbar
6 1.75000000E+02 # mt pole mass (not read by ME)
24 7.98290131E+01 # W+
25 1.10899057E+02 # h
35 3.99960116E+05 # H
36 3.99583917E+05 # A
37 4.07879012E+05 # H+
1000001 5.68441109E+05 # ~d_L
2000001 5.45228462E+05 # ~d_R
1000002 5.61119014E+05 # ~u_L
2000002 5.49259265E+05 # ~u_R
1000003 5.68441109E+05 # ~s_L
2000003 5.45228462E+05 # ~s_R
1000004 5.61119014E+05 # ~c_L
2000004 5.49259265E+05 # ~c_R
1000005 5.13065179E+05 # ~b_1
2000005 5.43726676E+05 # ~b_2
1000006 900.00 # ~t_1
2000006 5.85785818E+05 # ~t_2
1000011 2.02915690E+05 # ~e_L
2000011 1.44102799E+05 # ~e_R
1000012 1.85258326E+05 # ~nu_eL
1000013 2000.000000 # ~mu_L - EDIT THIS LINE!!!!!
2000013 1.44102799E+05 # ~mu_R
1000014 1.85258326E+05 # ~nu_muL
1000015 1.34490864E+05 # ~tau_1
2000015 2.06867805E+05 # ~tau_2
1000016 1.84708464E+05 # ~nu_tauL
1000021 5.0E+05 # ~g
1000022 1.00000000 # ~chi_10 - MADE THIS BASICALLY MASSLESS
1000023 1.81088157E+05 # ~chi_20
1000025 -3.63756027E+05 # ~chi_30
1000035 3.81729382E+05 # ~chi_40
1000024 1.81696474E+05 # ~chi_1+
1000037 3.79939320E+05 # ~chi_2+
#
BLOCK NMIX # Neutralino Mixing Matrix
1 1 9.86364430E-01 # N_11
1 2 -5.31103553E-02 # N_12
1 3 1.46433995E-01 # N_13
1 4 -5.31186117E-02 # N_14
2 1 9.93505358E-02 # N_21
2 2 9.44949299E-01 # N_22
2 3 -2.69846720E-01 # N_23
2 4 1.56150698E-01 # N_24
3 1 -6.03388002E-02 # N_31
3 2 8.77004854E-02 # N_32
3 3 6.95877493E-01 # N_33
3 4 7.10226984E-01 # N_34
4 1 -1.16507132E-01 # N_41
4 2 3.10739017E-01 # N_42
4 3 6.49225960E-01 # N_43
4 4 -6.84377823E-01 # N_44
#
BLOCK UMIX # Chargino Mixing Matrix U
1 1 9.16834859E-01 # U_11
1 2 -3.99266629E-01 # U_12
2 1 3.99266629E-01 # U_21
2 2 9.16834859E-01 # U_22
#
BLOCK VMIX # Chargino Mixing Matrix V
1 1 9.72557835E-01 # V_11
1 2 -2.32661249E-01 # V_12
2 1 2.32661249E-01 # V_21
2 2 9.72557835E-01 # V_22
#
BLOCK STOPMIX # Stop Mixing Matrix
1 1 5.53644960E-01 # O_{11}
1 2 8.32752820E-01 # O_{12}
2 1 8.32752820E-01 # O_{21}
2 2 -5.53644960E-01 # O_{22}
#
BLOCK SBOTMIX # Sbottom Mixing Matrix
1 1 9.38737896E-01 # O_{11}
1 2 3.44631925E-01 # O_{12}
2 1 -3.44631925E-01 # O_{21}
2 2 9.38737896E-01 # O_{22}
#
BLOCK STAUMIX # Stau Mixing Matrix
1 1 2.82487190E-01 # O_{11}
1 2 9.59271071E-01 # O_{12}
2 1 9.59271071E-01 # O_{21}
2 2 -2.82487190E-01 # O_{22}
#
BLOCK ALPHA # Higgs mixing
-1.13825210E-01 # Mixing angle in the neutral Higgs boson sector
#
BLOCK HMIX Q= 4.67034192E+02 # DRbar Higgs Parameters
1 3.57680977E+02 # mu(Q)MSSM DRbar
2 9.74862403E+00 # tan beta(Q)MSSM DRba
3 2.44894549E+02 # higgs vev(Q)MSSM DRb
4 1.66439065E+05 # mA^2(Q)MSSM DRbar
#
BLOCK GAUGE Q= 4.67034192E+02 # The gauge couplings
3 1.10178679E+00 # g3(Q) MSbar
#
BLOCK AU Q= 4.67034192E+02 # The trilinear couplings
1 1 0.00000000E+00 # A_u(Q) DRbar
2 2 0.00000000E+00 # A_c(Q) DRbar
3 3 -4.98129778E+02 # A_t(Q) DRbar
#
BLOCK AD Q= 4.67034192E+02 # The trilinear couplings
1 1 0.00000000E+00 # A_d(Q) DRbar
2 2 0.00000000E+00 # A_s(Q) DRbar
3 3 -7.97274397E+02 # A_b(Q) DRbar
#
BLOCK AE Q= 4.67034192E+02 # The trilinear couplings
1 1 0.00000000E+00 # A_e(Q) DRbar
2 2 0.00000000E+00 # A_mu(Q) DRbar
3 3 -2.51776873E+02 # A_tau(Q) DRbar
#
BLOCK YU Q= 4.67034192E+02 # The Yukawa couplings
3 3 8.92844550E-01 # y_t(Q) DRbar
#
BLOCK YD Q= 4.67034192E+02 # The Yukawa couplings
3 3 1.38840206E-01 # y_b(Q) DRbar
#
BLOCK YE Q= 4.67034192E+02 # The Yukawa couplings
3 3 1.00890810E-01 # y_tau(Q) DRbar
#
BLOCK MSOFT Q= 4.67034192E+02 # The soft SUSY breaking masses at the scale Q
1 1.01396534E+02 # M_1(Q)
2 1.91504241E+02 # M_2(Q)
3 5.88263031E+02 # M_3(Q)
21 3.23374943E+04 # mH1^2(Q)
22 -1.28800134E+05 # mH2^2(Q)
31 1.95334764E+02 # meL(Q)
32 1.95334764E+02 # mmuL(Q)
33 1.94495956E+02 # mtauL(Q)
34 1.36494061E+02 # meR(Q)
35 1.36494061E+02 # mmuR(Q)
36 1.34043428E+02 # mtauR(Q)
41 5.47573466E+02 # mqL1(Q)
42 5.47573466E+02 # mqL2(Q)
43 4.98763839E+02 # mqL3(Q)
44 5.29511195E+02 # muR(Q)
45 5.29511195E+02 # mcR(Q)
46 4.23245877E+02 # mtR(Q)
47 5.23148807E+02 # mdR(Q)
48 5.23148807E+02 # msR(Q)
49 5.19867261E+02 # mbR(Q)
#
#
#
# =================
# |The decay table|
# =================
#
# - The multi-body decays for the inos, stops and sbottoms are included.
#
# PDG Width
DECAY 25 1.98610799E-03 # h decays
# BR NDA ID1 ID2
1.45642955E-01 2 15 -15 # BR(H1 -> tau- tau+)
8.19070713E-01 2 5 -5 # BR(H1 -> b bb)
3.36338173E-02 2 24 -24 # BR(H1 -> W+ W-)
1.65251528E-03 2 23 23 # BR(H1 -> Z Z)
#
# PDG Width
DECAY 35 5.74801389E-01 # H decays
# BR NDA ID1 ID2
1.39072676E-01 2 15 -15 # BR(H -> tau- tau+)
4.84110879E-02 2 6 -6 # BR(H -> t tb)
7.89500067E-01 2 5 -5 # BR(H -> b bb)
3.87681171E-03 2 24 -24 # BR(H -> W+ W-)
1.80454752E-03 2 23 23 # BR(H -> Z Z)
0.00000000E+00 2 24 -37 # BR(H -> W+ H-)
0.00000000E+00 2 -24 37 # BR(H -> W- H+)
0.00000000E+00 2 37 -37 # BR(H -> H+ H-)
1.73348101E-02 2 25 25 # BR(H -> h h)
0.00000000E+00 2 36 36 # BR(H -> A A)
#
# PDG Width
DECAY 36 6.32178488E-01 # A decays
# BR NDA ID1 ID2
1.26659725E-01 2 15 -15 # BR(A -> tau- tau+)
1.51081526E-01 2 6 -6 # BR(A -> t tb)
7.19406137E-01 2 5 -5 # BR(A -> b bb)
2.85261228E-03 2 23 25 # BR(A -> Z h)
0.00000000E+00 2 23 35 # BR(A -> Z H)
0.00000000E+00 2 24 -37 # BR(A -> W+ H-)
0.00000000E+00 2 -24 37 # BR(A -> W- H+)
#
# PDG Width
DECAY 37 5.46962813E-01 # H+ decays
# BR NDA ID1 ID2
1.49435135E-01 2 -15 16 # BR(H+ -> tau+ nu_tau)
8.46811711E-01 2 6 -5 # BR(H+ -> t bb)
3.75315387E-03 2 24 25 # BR(H+ -> W+ h)
0.00000000E+00 2 24 35 # BR(H+ -> W+ H)
0.00000000E+00 2 24 36 # BR(H+ -> W+ A)
#
# PDG Width
DECAY 1000021 0.00E+00 # gluino decays
# BR NDA ID1 ID2
2.08454202E-02 2 1000001 -1 # BR(~g -> ~d_L db)
2.08454202E-02 2 -1000001 1 # BR(~g -> ~d_L* d )
5.07075274E-02 2 2000001 -1 # BR(~g -> ~d_R db)
5.07075274E-02 2 -2000001 1 # BR(~g -> ~d_R* d )
2.89787767E-02 2 1000002 -2 # BR(~g -> ~u_L ub)
2.89787767E-02 2 -1000002 2 # BR(~g -> ~u_L* u )
4.46872773E-02 2 2000002 -2 # BR(~g -> ~u_R ub)
4.46872773E-02 2 -2000002 2 # BR(~g -> ~u_R* u )
2.08454202E-02 2 1000003 -3 # BR(~g -> ~s_L sb)
2.08454202E-02 2 -1000003 3 # BR(~g -> ~s_L* s )
5.07075274E-02 2 2000003 -3 # BR(~g -> ~s_R sb)
5.07075274E-02 2 -2000003 3 # BR(~g -> ~s_R* s )
2.89787767E-02 2 1000004 -4 # BR(~g -> ~c_L cb)
2.89787767E-02 2 -1000004 4 # BR(~g -> ~c_L* c )
4.46872773E-02 2 2000004 -4 # BR(~g -> ~c_R cb)
4.46872773E-02 2 -2000004 4 # BR(~g -> ~c_R* c )
1.05840237E-01 2 1000005 -5 # BR(~g -> ~b_1 bb)
1.05840237E-01 2 -1000005 5 # BR(~g -> ~b_1* b )
5.56574805E-02 2 2000005 -5 # BR(~g -> ~b_2 bb)
5.56574805E-02 2 -2000005 5 # BR(~g -> ~b_2* b )
4.80642793E-02 2 1000006 -6 # BR(~g -> ~t_1 tb)
4.80642793E-02 2 -1000006 6 # BR(~g -> ~t_1* t )
0.00000000E+00 2 2000006 -6 # BR(~g -> ~t_2 tb)
0.00000000E+00 2 -2000006 6 # BR(~g -> ~t_2* t )
#
# PDG Width
DECAY 1000006 1.97326971684839e-13 # stop1 decays
# BR NDA ID1 ID2
3.33333333E-01 2 5 -11
3.33333333E-01 2 5 -13
3.33333333E-01 2 5 -15
# 1.92947616E-01 2 1000022 6 # BR(~t_1 -> ~chi_10 t )
# 1.17469211E-01 2 1000023 6 # BR(~t_1 -> ~chi_20 t )
# 0.00000000E+00 2 1000025 6 # BR(~t_1 -> ~chi_30 t )
# 0.00000000E+00 2 1000035 6 # BR(~t_1 -> ~chi_40 t )
# 6.75747693E-01 2 1000024 5 # BR(~t_1 -> ~chi_1+ b )
# 1.38354802E-02 2 1000037 5 # BR(~t_1 -> ~chi_2+ b )
# 0.00000000E+00 2 1000021 6 # BR(~t_1 -> ~g t )
# 0.00000000E+00 2 1000005 37 # BR(~t_1 -> ~b_1 H+)
# 0.00000000E+00 2 2000005 37 # BR(~t_1 -> ~b_2 H+)
# 0.00000000E+00 2 1000005 24 # BR(~t_1 -> ~b_1 W+)
# 0.00000000E+00 2 2000005 24 # BR(~t_1 -> ~b_2 W+)
#
# PDG Width
DECAY 2000006 7.37313275E+00 # stop2 decays
# BR NDA ID1 ID2
2.96825635E-02 2 1000022 6 # BR(~t_2 -> ~chi_10 t )
8.68035358E-02 2 1000023 6 # BR(~t_2 -> ~chi_20 t )
4.18408351E-02 2 1000025 6 # BR(~t_2 -> ~chi_30 t )
1.93281647E-01 2 1000035 6 # BR(~t_2 -> ~chi_40 t )
2.19632356E-01 2 1000024 5 # BR(~t_2 -> ~chi_1+ b )
2.02206148E-01 2 1000037 5 # BR(~t_2 -> ~chi_2+ b )
0.00000000E+00 2 1000021 6 # BR(~t_2 -> ~g t )
3.66397706E-02 2 1000006 25 # BR(~t_2 -> ~t_1 h )
0.00000000E+00 2 1000006 35 # BR(~t_2 -> ~t_1 H )
0.00000000E+00 2 1000006 36 # BR(~t_2 -> ~t_1 A )
0.00000000E+00 2 1000005 37 # BR(~t_2 -> ~b_1 H+)
0.00000000E+00 2 2000005 37 # BR(~t_2 -> ~b_2 H+)
1.89913144E-01 2 1000006 23 # BR(~t_2 -> ~t_1 Z )
0.00000000E+00 2 1000005 24 # BR(~t_2 -> ~b_1 W+)
0.00000000E+00 2 2000005 24 # BR(~t_2 -> ~b_2 W+)
#
# PDG Width
DECAY 1000005 3.73627601E+00 # sbottom1 decays
# BR NDA ID1 ID2
4.43307074E-02 2 1000022 5 # BR(~b_1 -> ~chi_10 b )
3.56319904E-01 2 1000023 5 # BR(~b_1 -> ~chi_20 b )
5.16083795E-03 2 1000025 5 # BR(~b_1 -> ~chi_30 b )
1.04105080E-02 2 1000035 5 # BR(~b_1 -> ~chi_40 b )
4.45830064E-01 2 -1000024 6 # BR(~b_1 -> ~chi_1- t )
0.00000000E+00 2 -1000037 6 # BR(~b_1 -> ~chi_2- t )
0.00000000E+00 2 1000021 5 # BR(~b_1 -> ~g b )
0.00000000E+00 2 1000006 -37 # BR(~b_1 -> ~t_1 H-)
0.00000000E+00 2 2000006 -37 # BR(~b_1 -> ~t_2 H-)
1.37947979E-01 2 1000006 -24 # BR(~b_1 -> ~t_1 W-)
0.00000000E+00 2 2000006 -24 # BR(~b_1 -> ~t_2 W-)
#
# PDG Width
DECAY 2000005 8.01566294E-01 # sbottom2 decays
# BR NDA ID1 ID2
2.86200590E-01 2 1000022 5 # BR(~b_2 -> ~chi_10 b )
1.40315912E-01 2 1000023 5 # BR(~b_2 -> ~chi_20 b )
5.32635592E-02 2 1000025 5 # BR(~b_2 -> ~chi_30 b )
7.48748121E-02 2 1000035 5 # BR(~b_2 -> ~chi_40 b )
1.79734294E-01 2 -1000024 6 # BR(~b_2 -> ~chi_1- t )
0.00000000E+00 2 -1000037 6 # BR(~b_2 -> ~chi_2- t )
0.00000000E+00 2 1000021 5 # BR(~b_2 -> ~g b )
0.00000000E+00 2 1000005 25 # BR(~b_2 -> ~b_1 h )
0.00000000E+00 2 1000005 35 # BR(~b_2 -> ~b_1 H )
0.00000000E+00 2 1000005 36 # BR(~b_2 -> ~b_1 A )
0.00000000E+00 2 1000006 -37 # BR(~b_2 -> ~t_1 H-)
0.00000000E+00 2 2000006 -37 # BR(~b_2 -> ~t_2 H-)
0.00000000E+00 2 1000005 23 # BR(~b_2 -> ~b_1 Z )
2.65610832E-01 2 1000006 -24 # BR(~b_2 -> ~t_1 W-)
0.00000000E+00 2 2000006 -24 # BR(~b_2 -> ~t_2 W-)
#
# PDG Width
DECAY 1000002 5.47719539E+00 # sup_L decays
# BR NDA ID1 ID2
6.65240987E-03 2 1000022 2 # BR(~u_L -> ~chi_10 u)
3.19051458E-01 2 1000023 2 # BR(~u_L -> ~chi_20 u)
8.44929059E-04 2 1000025 2 # BR(~u_L -> ~chi_30 u)
1.03485173E-02 2 1000035 2 # BR(~u_L -> ~chi_40 u)
6.49499518E-01 2 1000024 1 # BR(~u_L -> ~chi_1+ d)
1.36031676E-02 2 1000037 1 # BR(~u_L -> ~chi_2+ d)
0.00000000E+00 2 1000021 2 # BR(~u_L -> ~g u)
#
# PDG Width
DECAY 2000002 1.15297292E+00 # sup_R decays
# BR NDA ID1 ID2
9.86377420E-01 2 1000022 2 # BR(~u_R -> ~chi_10 u)
8.46640647E-03 2 1000023 2 # BR(~u_R -> ~chi_20 u)
1.23894695E-03 2 1000025 2 # BR(~u_R -> ~chi_30 u)
3.91722611E-03 2 1000035 2 # BR(~u_R -> ~chi_40 u)
0.00000000E+00 2 1000024 1 # BR(~u_R -> ~chi_1+ d)
0.00000000E+00 2 1000037 1 # BR(~u_R -> ~chi_2+ d)
0.00000000E+00 2 1000021 2 # BR(~u_R -> ~g u)
#
# PDG Width
DECAY 1000001 5.31278772E+00 # sdown_L decays
# BR NDA ID1 ID2
2.32317969E-02 2 1000022 1 # BR(~d_L -> ~chi_10 d)
3.10235077E-01 2 1000023 1 # BR(~d_L -> ~chi_20 d)
1.52334771E-03 2 1000025 1 # BR(~d_L -> ~chi_30 d)
1.48849798E-02 2 1000035 1 # BR(~d_L -> ~chi_40 d)
6.06452481E-01 2 -1000024 2 # BR(~d_L -> ~chi_1- u)
4.36723179E-02 2 -1000037 2 # BR(~d_L -> ~chi_2- u)
0.00000000E+00 2 1000021 1 # BR(~d_L -> ~g d)
#
# PDG Width
DECAY 2000001 2.85812308E-01 # sdown_R decays
# BR NDA ID1 ID2
9.86529614E-01 2 1000022 1 # BR(~d_R -> ~chi_10 d)
8.44510350E-03 2 1000023 1 # BR(~d_R -> ~chi_20 d)
1.21172119E-03 2 1000025 1 # BR(~d_R -> ~chi_30 d)
3.81356102E-03 2 1000035 1 # BR(~d_R -> ~chi_40 d)
0.00000000E+00 2 -1000024 2 # BR(~d_R -> ~chi_1- u)
0.00000000E+00 2 -1000037 2 # BR(~d_R -> ~chi_2- u)
0.00000000E+00 2 1000021 1 # BR(~d_R -> ~g d)
#
# PDG Width
DECAY 1000004 5.47719539E+00 # scharm_L decays
# BR NDA ID1 ID2
6.65240987E-03 2 1000022 4 # BR(~c_L -> ~chi_10 c)
3.19051458E-01 2 1000023 4 # BR(~c_L -> ~chi_20 c)
8.44929059E-04 2 1000025 4 # BR(~c_L -> ~chi_30 c)
1.03485173E-02 2 1000035 4 # BR(~c_L -> ~chi_40 c)
6.49499518E-01 2 1000024 3 # BR(~c_L -> ~chi_1+ s)
1.36031676E-02 2 1000037 3 # BR(~c_L -> ~chi_2+ s)
0.00000000E+00 2 1000021 4 # BR(~c_L -> ~g c)
#
# PDG Width
DECAY 2000004 1.15297292E+00 # scharm_R decays
# BR NDA ID1 ID2
9.86377420E-01 2 1000022 4 # BR(~c_R -> ~chi_10 c)
8.46640647E-03 2 1000023 4 # BR(~c_R -> ~chi_20 c)
1.23894695E-03 2 1000025 4 # BR(~c_R -> ~chi_30 c)
3.91722611E-03 2 1000035 4 # BR(~c_R -> ~chi_40 c)
0.00000000E+00 2 1000024 3 # BR(~c_R -> ~chi_1+ s)
0.00000000E+00 2 1000037 3 # BR(~c_R -> ~chi_2+ s)
0.00000000E+00 2 1000021 4 # BR(~c_R -> ~g c)
#
# PDG Width
DECAY 1000003 5.31278772E+00 # sstrange_L decays
# BR NDA ID1 ID2
2.32317969E-02 2 1000022 3 # BR(~s_L -> ~chi_10 s)
3.10235077E-01 2 1000023 3 # BR(~s_L -> ~chi_20 s)
1.52334771E-03 2 1000025 3 # BR(~s_L -> ~chi_30 s)
1.48849798E-02 2 1000035 3 # BR(~s_L -> ~chi_40 s)
6.06452481E-01 2 -1000024 4 # BR(~s_L -> ~chi_1- c)
4.36723179E-02 2 -1000037 4 # BR(~s_L -> ~chi_2- c)
0.00000000E+00 2 1000021 3 # BR(~s_L -> ~g s)
#
# PDG Width
DECAY 2000003 2.85812308E-01 # sstrange_R decays
# BR NDA ID1 ID2
9.86529614E-01 2 1000022 3 # BR(~s_R -> ~chi_10 s)
8.44510350E-03 2 1000023 3 # BR(~s_R -> ~chi_20 s)
1.21172119E-03 2 1000025 3 # BR(~s_R -> ~chi_30 s)
3.81356102E-03 2 1000035 3 # BR(~s_R -> ~chi_40 s)
0.00000000E+00 2 -1000024 4 # BR(~s_R -> ~chi_1- c)
0.00000000E+00 2 -1000037 4 # BR(~s_R -> ~chi_2- c)
0.00000000E+00 2 1000021 3 # BR(~s_R -> ~g s)
#
# PDG Width
DECAY 1000011 2.13682161E-01 # selectron_L decays
# BR NDA ID1 ID2
5.73155386E-01 2 1000022 11 # BR(~e_L -> ~chi_10 e-)
1.64522579E-01 2 1000023 11 # BR(~e_L -> ~chi_20 e-)
0.00000000E+00 2 1000025 11 # BR(~e_L -> ~chi_30 e-)
0.00000000E+00 2 1000035 11 # BR(~e_L -> ~chi_40 e-)
2.62322035E-01 2 -1000024 12 # BR(~e_L -> ~chi_1- nu_e)
0.00000000E+00 2 -1000037 12 # BR(~e_L -> ~chi_2- nu_e)
#
# PDG Width
DECAY 2000011 2.16121626E-01 # selectron_R decays
# BR NDA ID1 ID2
1.00000000E+00 2 1000022 11 # BR(~e_R -> ~chi_10 e-)
0.00000000E+00 2 1000023 11 # BR(~e_R -> ~chi_20 e-)
0.00000000E+00 2 1000025 11 # BR(~e_R -> ~chi_30 e-)
0.00000000E+00 2 1000035 11 # BR(~e_R -> ~chi_40 e-)
0.00000000E+00 2 -1000024 12 # BR(~e_R -> ~chi_1- nu_e)
0.00000000E+00 2 -1000037 12 # BR(~e_R -> ~chi_2- nu_e)
#
# PDG Width
DECAY 1000013 1.9732697E-14 # smuon_L decays
# BR NDA ID1 ID2
1.000000000000 2 1000022 13 # BR(~mu_L -> ~chi_10 mu-)
# 0.00000000E+00 2 1000023 13 # BR(~mu_L -> ~chi_20 mu-)
# 0.00000000E+00 2 1000025 13 # BR(~mu_L -> ~chi_30 mu-)
# 0.00000000E+00 2 1000035 13 # BR(~mu_L -> ~chi_40 mu-)
# 0.00000000E+00 2 -1000024 14 # BR(~mu_L -> ~chi_1- nu_mu)
# 0.00000000E+00 2 -1000037 14 # BR(~mu_L -> ~chi_2- nu_mu)
#
# PDG Width
DECAY 2000013 2.16121626E-01 # smuon_R decays
# BR NDA ID1 ID2
1.00000000E+00 2 1000022 13 # BR(~mu_R -> ~chi_10 mu-)
0.00000000E+00 2 1000023 13 # BR(~mu_R -> ~chi_20 mu-)
0.00000000E+00 2 1000025 13 # BR(~mu_R -> ~chi_30 mu-)
0.00000000E+00 2 1000035 13 # BR(~mu_R -> ~chi_40 mu-)
0.00000000E+00 2 -1000024 14 # BR(~mu_R -> ~chi_1- nu_mu)
0.00000000E+00 2 -1000037 14 # BR(~mu_R -> ~chi_2- nu_mu)
#
# PDG Width
DECAY 1000015 1.48327268E-01 # stau_1 decays
# BR NDA ID1 ID2
1.00000000E+00 2 1000022 15 # BR(~tau_1 -> ~chi_10 tau-)
0.00000000E+00 2 1000023 15 # BR(~tau_1 -> ~chi_20 tau-)
0.00000000E+00 2 1000025 15 # BR(~tau_1 -> ~chi_30 tau-)
0.00000000E+00 2 1000035 15 # BR(~tau_1 -> ~chi_40 tau-)
0.00000000E+00 2 -1000024 16 # BR(~tau_1 -> ~chi_1- nu_tau)
0.00000000E+00 2 -1000037 16 # BR(~tau_1 -> ~chi_2- nu_tau)
0.00000000E+00 2 1000016 -37 # BR(~tau_1 -> ~nu_tauL H-)
0.00000000E+00 2 1000016 -24 # BR(~tau_1 -> ~nu_tauL W-)
#
# PDG Width
DECAY 2000015 2.69906096E-01 # stau_2 decays
# BR NDA ID1 ID2
5.96653046E-01 2 1000022 15 # BR(~tau_2 -> ~chi_10 tau-)
1.54536760E-01 2 1000023 15 # BR(~tau_2 -> ~chi_20 tau-)
0.00000000E+00 2 1000025 15 # BR(~tau_2 -> ~chi_30 tau-)
0.00000000E+00 2 1000035 15 # BR(~tau_2 -> ~chi_40 tau-)
2.48810195E-01 2 -1000024 16 # BR(~tau_2 -> ~chi_1- nu_tau)
0.00000000E+00 2 -1000037 16 # BR(~tau_2 -> ~chi_2- nu_tau)
0.00000000E+00 2 1000016 -37 # BR(~tau_2 -> ~nu_tauL H-)
0.00000000E+00 2 1000016 -24 # BR(~tau_2 -> ~nu_tauL W-)
0.00000000E+00 2 1000015 25 # BR(~tau_2 -> ~tau_1 h)
0.00000000E+00 2 1000015 35 # BR(~tau_2 -> ~tau_1 H)
0.00000000E+00 2 1000015 36 # BR(~tau_2 -> ~tau_1 A)
0.00000000E+00 2 1000015 23 # BR(~tau_2 -> ~tau_1 Z)
#
# PDG Width
DECAY 1000012 1.49881634E-01 # snu_eL decays
# BR NDA ID1 ID2
9.77700764E-01 2 1000022 12 # BR(~nu_eL -> ~chi_10 nu_e)
8.11554922E-03 2 1000023 12 # BR(~nu_eL -> ~chi_20 nu_e)
0.00000000E+00 2 1000025 12 # BR(~nu_eL -> ~chi_30 nu_e)
0.00000000E+00 2 1000035 12 # BR(~nu_eL -> ~chi_40 nu_e)
1.41836867E-02 2 1000024 11 # BR(~nu_eL -> ~chi_1+ e-)
0.00000000E+00 2 1000037 11 # BR(~nu_eL -> ~chi_2+ e-)
#
# PDG Width
DECAY 1000014 1.49881634E-01 # snu_muL decays
# BR NDA ID1 ID2
9.77700764E-01 2 1000022 14 # BR(~nu_muL -> ~chi_10 nu_mu)
8.11554922E-03 2 1000023 14 # BR(~nu_muL -> ~chi_20 nu_mu)
0.00000000E+00 2 1000025 14 # BR(~nu_muL -> ~chi_30 nu_mu)
0.00000000E+00 2 1000035 14 # BR(~nu_muL -> ~chi_40 nu_mu)
1.41836867E-02 2 1000024 13 # BR(~nu_muL -> ~chi_1+ mu-)
0.00000000E+00 2 1000037 13 # BR(~nu_muL -> ~chi_2+ mu-)
#
# PDG Width
DECAY 1000016 1.47518977E-01 # snu_tauL decays
# BR NDA ID1 ID2
9.85994529E-01 2 1000022 16 # BR(~nu_tauL -> ~chi_10 nu_tau)
6.25129612E-03 2 1000023 16 # BR(~nu_tauL -> ~chi_20 nu_tau)
0.00000000E+00 2 1000025 16 # BR(~nu_tauL -> ~chi_30 nu_tau)
0.00000000E+00 2 1000035 16 # BR(~nu_tauL -> ~chi_40 nu_tau)
7.75417479E-03 2 1000024 15 # BR(~nu_tauL -> ~chi_1+ tau-)
0.00000000E+00 2 1000037 15 # BR(~nu_tauL -> ~chi_2+ tau-)
0.00000000E+00 2 -1000015 -37 # BR(~nu_tauL -> ~tau_1+ H-)
0.00000000E+00 2 -2000015 -37 # BR(~nu_tauL -> ~tau_2+ H-)
0.00000000E+00 2 -1000015 -24 # BR(~nu_tauL -> ~tau_1+ W-)
0.00000000E+00 2 -2000015 -24 # BR(~nu_tauL -> ~tau_2+ W-)
#
# PDG Width
DECAY 1000024 1.70414503E-02 # chargino1+ decays
# BR NDA ID1 ID2
0.00000000E+00 2 1000002 -1 # BR(~chi_1+ -> ~u_L db)
0.00000000E+00 2 2000002 -1 # BR(~chi_1+ -> ~u_R db)
0.00000000E+00 2 -1000001 2 # BR(~chi_1+ -> ~d_L* u )
0.00000000E+00 2 -2000001 2 # BR(~chi_1+ -> ~d_R* u )
0.00000000E+00 2 1000004 -3 # BR(~chi_1+ -> ~c_L sb)
0.00000000E+00 2 2000004 -3 # BR(~chi_1+ -> ~c_R sb)
0.00000000E+00 2 -1000003 4 # BR(~chi_1+ -> ~s_L* c )
0.00000000E+00 2 -2000003 4 # BR(~chi_1+ -> ~s_R* c )
0.00000000E+00 2 1000006 -5 # BR(~chi_1+ -> ~t_1 bb)
0.00000000E+00 2 2000006 -5 # BR(~chi_1+ -> ~t_2 bb)
0.00000000E+00 2 -1000005 6 # BR(~chi_1+ -> ~b_1* t )
0.00000000E+00 2 -2000005 6 # BR(~chi_1+ -> ~b_2* t )
0.00000000E+00 2 1000012 -11 # BR(~chi_1+ -> ~nu_eL e+ )
0.00000000E+00 2 1000014 -13 # BR(~chi_1+ -> ~nu_muL mu+ )
0.00000000E+00 2 1000016 -15 # BR(~chi_1+ -> ~nu_tau1 tau+)
0.00000000E+00 2 -1000011 12 # BR(~chi_1+ -> ~e_L+ nu_e)
0.00000000E+00 2 -2000011 12 # BR(~chi_1+ -> ~e_R+ nu_e)
0.00000000E+00 2 -1000013 14 # BR(~chi_1+ -> ~mu_L+ nu_mu)
0.00000000E+00 2 -2000013 14 # BR(~chi_1+ -> ~mu_R+ nu_mu)
9.25161117E-01 2 -1000015 16 # BR(~chi_1+ -> ~tau_1+ nu_tau)
0.00000000E+00 2 -2000015 16 # BR(~chi_1+ -> ~tau_2+ nu_tau)
7.48388828E-02 2 1000022 24 # BR(~chi_1+ -> ~chi_10 W+)
0.00000000E+00 2 1000023 24 # BR(~chi_1+ -> ~chi_20 W+)
0.00000000E+00 2 1000025 24 # BR(~chi_1+ -> ~chi_30 W+)
0.00000000E+00 2 1000035 24 # BR(~chi_1+ -> ~chi_40 W+)
0.00000000E+00 2 1000022 37 # BR(~chi_1+ -> ~chi_10 H+)
0.00000000E+00 2 1000023 37 # BR(~chi_1+ -> ~chi_20 H+)
0.00000000E+00 2 1000025 37 # BR(~chi_1+ -> ~chi_30 H+)
0.00000000E+00 2 1000035 37 # BR(~chi_1+ -> ~chi_40 H+)
#
# PDG Width
DECAY 1000037 2.48689510E+00 # chargino2+ decays
# BR NDA ID1 ID2
0.00000000E+00 2 1000002 -1 # BR(~chi_2+ -> ~u_L db)
0.00000000E+00 2 2000002 -1 # BR(~chi_2+ -> ~u_R db)
0.00000000E+00 2 -1000001 2 # BR(~chi_2+ -> ~d_L* u )
0.00000000E+00 2 -2000001 2 # BR(~chi_2+ -> ~d_R* u )
0.00000000E+00 2 1000004 -3 # BR(~chi_2+ -> ~c_L sb)
0.00000000E+00 2 2000004 -3 # BR(~chi_2+ -> ~c_R sb)
0.00000000E+00 2 -1000003 4 # BR(~chi_2+ -> ~s_L* c )
0.00000000E+00 2 -2000003 4 # BR(~chi_2+ -> ~s_R* c )
0.00000000E+00 2 1000006 -5 # BR(~chi_2+ -> ~t_1 bb)
0.00000000E+00 2 2000006 -5 # BR(~chi_2+ -> ~t_2 bb)
0.00000000E+00 2 -1000005 6 # BR(~chi_2+ -> ~b_1* t )
0.00000000E+00 2 -2000005 6 # BR(~chi_2+ -> ~b_2* t )
2.00968837E-02 2 1000012 -11 # BR(~chi_2+ -> ~nu_eL e+ )
2.00968837E-02 2 1000014 -13 # BR(~chi_2+ -> ~nu_muL mu+ )
2.74507395E-02 2 1000016 -15 # BR(~chi_2+ -> ~nu_tau1 tau+)
5.20406111E-02 2 -1000011 12 # BR(~chi_2+ -> ~e_L+ nu_e)
0.00000000E+00 2 -2000011 12 # BR(~chi_2+ -> ~e_R+ nu_e)
5.20406111E-02 2 -1000013 14 # BR(~chi_2+ -> ~mu_L+ nu_mu)
0.00000000E+00 2 -2000013 14 # BR(~chi_2+ -> ~mu_R+ nu_mu)
2.82859898E-04 2 -1000015 16 # BR(~chi_2+ -> ~tau_1+ nu_tau)
5.66729336E-02 2 -2000015 16 # BR(~chi_2+ -> ~tau_2+ nu_tau)
2.31513269E-01 2 1000024 23 # BR(~chi_2+ -> ~chi_1+ Z )
6.76715120E-02 2 1000022 24 # BR(~chi_2+ -> ~chi_10 W+)
2.93654849E-01 2 1000023 24 # BR(~chi_2+ -> ~chi_20 W+)
0.00000000E+00 2 1000025 24 # BR(~chi_2+ -> ~chi_30 W+)
0.00000000E+00 2 1000035 24 # BR(~chi_2+ -> ~chi_40 W+)
1.78478848E-01 2 1000024 25 # BR(~chi_2+ -> ~chi_1+ h )
0.00000000E+00 2 1000024 35 # BR(~chi_2+ -> ~chi_1+ H )
0.00000000E+00 2 1000024 36 # BR(~chi_2+ -> ~chi_1+ A )
0.00000000E+00 2 1000022 37 # BR(~chi_2+ -> ~chi_10 H+)
0.00000000E+00 2 1000023 37 # BR(~chi_2+ -> ~chi_20 H+)
0.00000000E+00 2 1000025 37 # BR(~chi_2+ -> ~chi_30 H+)
0.00000000E+00 2 1000035 37 # BR(~chi_2+ -> ~chi_40 H+)
#
# PDG Width
DECAY 1000022 0.00000000E+00 # neutralino1 decays
#
# PDG Width
DECAY 1000023 2.07770048E-02 # neutralino2 decays
# BR NDA ID1 ID2
0.00000000E+00 2 1000022 23 # BR(~chi_20 -> ~chi_10 Z )
0.00000000E+00 2 1000024 -24 # BR(~chi_20 -> ~chi_1+ W-)
0.00000000E+00 2 -1000024 24 # BR(~chi_20 -> ~chi_1- W+)
0.00000000E+00 2 1000037 -24 # BR(~chi_20 -> ~chi_2+ W-)
0.00000000E+00 2 -1000037 24 # BR(~chi_20 -> ~chi_2- W+)
0.00000000E+00 2 1000022 25 # BR(~chi_20 -> ~chi_10 h )
0.00000000E+00 2 1000022 35 # BR(~chi_20 -> ~chi_10 H )
0.00000000E+00 2 1000022 36 # BR(~chi_20 -> ~chi_10 A )
0.00000000E+00 2 1000024 -37 # BR(~chi_20 -> ~chi_1+ H-)
0.00000000E+00 2 -1000024 37 # BR(~chi_20 -> ~chi_1- H+)
0.00000000E+00 2 1000037 -37 # BR(~chi_20 -> ~chi_2+ H-)
0.00000000E+00 2 -1000037 37 # BR(~chi_20 -> ~chi_2- H+)
0.00000000E+00 2 1000002 -2 # BR(~chi_20 -> ~u_L ub)
0.00000000E+00 2 -1000002 2 # BR(~chi_20 -> ~u_L* u )
0.00000000E+00 2 2000002 -2 # BR(~chi_20 -> ~u_R ub)
0.00000000E+00 2 -2000002 2 # BR(~chi_20 -> ~u_R* u )
0.00000000E+00 2 1000001 -1 # BR(~chi_20 -> ~d_L db)
0.00000000E+00 2 -1000001 1 # BR(~chi_20 -> ~d_L* d )
0.00000000E+00 2 2000001 -1 # BR(~chi_20 -> ~d_R db)
0.00000000E+00 2 -2000001 1 # BR(~chi_20 -> ~d_R* d )
0.00000000E+00 2 1000004 -4 # BR(~chi_20 -> ~c_L cb)
0.00000000E+00 2 -1000004 4 # BR(~chi_20 -> ~c_L* c )
0.00000000E+00 2 2000004 -4 # BR(~chi_20 -> ~c_R cb)
0.00000000E+00 2 -2000004 4 # BR(~chi_20 -> ~c_R* c )
0.00000000E+00 2 1000003 -3 # BR(~chi_20 -> ~s_L sb)
0.00000000E+00 2 -1000003 3 # BR(~chi_20 -> ~s_L* s )
0.00000000E+00 2 2000003 -3 # BR(~chi_20 -> ~s_R sb)
0.00000000E+00 2 -2000003 3 # BR(~chi_20 -> ~s_R* s )
0.00000000E+00 2 1000006 -6 # BR(~chi_20 -> ~t_1 tb)
0.00000000E+00 2 -1000006 6 # BR(~chi_20 -> ~t_1* t )
0.00000000E+00 2 2000006 -6 # BR(~chi_20 -> ~t_2 tb)
0.00000000E+00 2 -2000006 6 # BR(~chi_20 -> ~t_2* t )
0.00000000E+00 2 1000005 -5 # BR(~chi_20 -> ~b_1 bb)
0.00000000E+00 2 -1000005 5 # BR(~chi_20 -> ~b_1* b )
0.00000000E+00 2 2000005 -5 # BR(~chi_20 -> ~b_2 bb)
0.00000000E+00 2 -2000005 5 # BR(~chi_20 -> ~b_2* b )
0.00000000E+00 2 1000011 -11 # BR(~chi_20 -> ~e_L- e+)
0.00000000E+00 2 -1000011 11 # BR(~chi_20 -> ~e_L+ e-)
2.95071995E-02 2 2000011 -11 # BR(~chi_20 -> ~e_R- e+)
2.95071995E-02 2 -2000011 11 # BR(~chi_20 -> ~e_R+ e-)
0.00000000E+00 2 1000013 -13 # BR(~chi_20 -> ~mu_L- mu+)
0.00000000E+00 2 -1000013 13 # BR(~chi_20 -> ~mu_L+ mu-)
2.95071995E-02 2 2000013 -13 # BR(~chi_20 -> ~mu_R- mu+)
2.95071995E-02 2 -2000013 13 # BR(~chi_20 -> ~mu_R+ mu-)
4.40985601E-01 2 1000015 -15 # BR(~chi_20 -> ~tau_1- tau+)
4.40985601E-01 2 -1000015 15 # BR(~chi_20 -> ~tau_1+ tau-)
0.00000000E+00 2 2000015 -15 # BR(~chi_20 -> ~tau_2- tau+)
0.00000000E+00 2 -2000015 15 # BR(~chi_20 -> ~tau_2+ tau-)
0.00000000E+00 2 1000012 -12 # BR(~chi_20 -> ~nu_eL nu_eb)
0.00000000E+00 2 -1000012 12 # BR(~chi_20 -> ~nu_eL* nu_e )
0.00000000E+00 2 1000014 -14 # BR(~chi_20 -> ~nu_muL nu_mub)
0.00000000E+00 2 -1000014 14 # BR(~chi_20 -> ~nu_muL* nu_mu )
0.00000000E+00 2 1000016 -16 # BR(~chi_20 -> ~nu_tau1 nu_taub)
0.00000000E+00 2 -1000016 16 # BR(~chi_20 -> ~nu_tau1* nu_tau )
#
# PDG Width
DECAY 1000025 1.91598495E+00 # neutralino3 decays
# BR NDA ID1 ID2
1.13226601E-01 2 1000022 23 # BR(~chi_30 -> ~chi_10 Z )
2.11969194E-01 2 1000023 23 # BR(~chi_30 -> ~chi_20 Z )
2.95329778E-01 2 1000024 -24 # BR(~chi_30 -> ~chi_1+ W-)
2.95329778E-01 2 -1000024 24 # BR(~chi_30 -> ~chi_1- W+)
0.00000000E+00 2 1000037 -24 # BR(~chi_30 -> ~chi_2+ W-)
0.00000000E+00 2 -1000037 24 # BR(~chi_30 -> ~chi_2- W+)
2.13076490E-02 2 1000022 25 # BR(~chi_30 -> ~chi_10 h )
0.00000000E+00 2 1000022 35 # BR(~chi_30 -> ~chi_10 H )
0.00000000E+00 2 1000022 36 # BR(~chi_30 -> ~chi_10 A )
1.24538329E-02 2 1000023 25 # BR(~chi_30 -> ~chi_20 h )
0.00000000E+00 2 1000023 35 # BR(~chi_30 -> ~chi_20 H )
0.00000000E+00 2 1000023 36 # BR(~chi_30 -> ~chi_20 A )
0.00000000E+00 2 1000024 -37 # BR(~chi_30 -> ~chi_1+ H-)
0.00000000E+00 2 -1000024 37 # BR(~chi_30 -> ~chi_1- H+)
0.00000000E+00 2 1000037 -37 # BR(~chi_30 -> ~chi_2+ H-)
0.00000000E+00 2 -1000037 37 # BR(~chi_30 -> ~chi_2- H+)
0.00000000E+00 2 1000002 -2 # BR(~chi_30 -> ~u_L ub)
0.00000000E+00 2 -1000002 2 # BR(~chi_30 -> ~u_L* u )
0.00000000E+00 2 2000002 -2 # BR(~chi_30 -> ~u_R ub)
0.00000000E+00 2 -2000002 2 # BR(~chi_30 -> ~u_R* u )
0.00000000E+00 2 1000001 -1 # BR(~chi_30 -> ~d_L db)
0.00000000E+00 2 -1000001 1 # BR(~chi_30 -> ~d_L* d )
0.00000000E+00 2 2000001 -1 # BR(~chi_30 -> ~d_R db)
0.00000000E+00 2 -2000001 1 # BR(~chi_30 -> ~d_R* d )
0.00000000E+00 2 1000004 -4 # BR(~chi_30 -> ~c_L cb)
0.00000000E+00 2 -1000004 4 # BR(~chi_30 -> ~c_L* c )
0.00000000E+00 2 2000004 -4 # BR(~chi_30 -> ~c_R cb)
0.00000000E+00 2 -2000004 4 # BR(~chi_30 -> ~c_R* c )
0.00000000E+00 2 1000003 -3 # BR(~chi_30 -> ~s_L sb)
0.00000000E+00 2 -1000003 3 # BR(~chi_30 -> ~s_L* s )
0.00000000E+00 2 2000003 -3 # BR(~chi_30 -> ~s_R sb)
0.00000000E+00 2 -2000003 3 # BR(~chi_30 -> ~s_R* s )
0.00000000E+00 2 1000006 -6 # BR(~chi_30 -> ~t_1 tb)
0.00000000E+00 2 -1000006 6 # BR(~chi_30 -> ~t_1* t )
0.00000000E+00 2 2000006 -6 # BR(~chi_30 -> ~t_2 tb)
0.00000000E+00 2 -2000006 6 # BR(~chi_30 -> ~t_2* t )
0.00000000E+00 2 1000005 -5 # BR(~chi_30 -> ~b_1 bb)
0.00000000E+00 2 -1000005 5 # BR(~chi_30 -> ~b_1* b )
0.00000000E+00 2 2000005 -5 # BR(~chi_30 -> ~b_2 bb)
0.00000000E+00 2 -2000005 5 # BR(~chi_30 -> ~b_2* b )
5.57220455E-04 2 1000011 -11 # BR(~chi_30 -> ~e_L- e+)
5.57220455E-04 2 -1000011 11 # BR(~chi_30 -> ~e_L+ e-)
1.25266782E-03 2 2000011 -11 # BR(~chi_30 -> ~e_R- e+)
1.25266782E-03 2 -2000011 11 # BR(~chi_30 -> ~e_R+ e-)
5.57220455E-04 2 1000013 -13 # BR(~chi_30 -> ~mu_L- mu+)
5.57220455E-04 2 -1000013 13 # BR(~chi_30 -> ~mu_L+ mu-)
1.25266782E-03 2 2000013 -13 # BR(~chi_30 -> ~mu_R- mu+)
1.25266782E-03 2 -2000013 13 # BR(~chi_30 -> ~mu_R+ mu-)
5.26279239E-03 2 1000015 -15 # BR(~chi_30 -> ~tau_1- tau+)
5.26279239E-03 2 -1000015 15 # BR(~chi_30 -> ~tau_1+ tau-)
6.72814564E-03 2 2000015 -15 # BR(~chi_30 -> ~tau_2- tau+)
6.72814564E-03 2 -2000015 15 # BR(~chi_30 -> ~tau_2+ tau-)
3.18920485E-03 2 1000012 -12 # BR(~chi_30 -> ~nu_eL nu_eb)
3.18920485E-03 2 -1000012 12 # BR(~chi_30 -> ~nu_eL* nu_e )
3.18920485E-03 2 1000014 -14 # BR(~chi_30 -> ~nu_muL nu_mub)
3.18920485E-03 2 -1000014 14 # BR(~chi_30 -> ~nu_muL* nu_mu )
3.20245934E-03 2 1000016 -16 # BR(~chi_30 -> ~nu_tau1 nu_taub)
3.20245934E-03 2 -1000016 16 # BR(~chi_30 -> ~nu_tau1* nu_tau )
#
# PDG Width
DECAY 1000035 2.58585079E+00 # neutralino4 decays
# BR NDA ID1 ID2
2.15369294E-02 2 1000022 23 # BR(~chi_40 -> ~chi_10 Z )
1.85499971E-02 2 1000023 23 # BR(~chi_40 -> ~chi_20 Z )
0.00000000E+00 2 1000025 23 # BR(~chi_40 -> ~chi_30 Z )
2.49541430E-01 2 1000024 -24 # BR(~chi_40 -> ~chi_1+ W-)
2.49541430E-01 2 -1000024 24 # BR(~chi_40 -> ~chi_1- W+)
0.00000000E+00 2 1000037 -24 # BR(~chi_40 -> ~chi_2+ W-)
0.00000000E+00 2 -1000037 24 # BR(~chi_40 -> ~chi_2- W+)
6.93213268E-02 2 1000022 25 # BR(~chi_40 -> ~chi_10 h )
0.00000000E+00 2 1000022 35 # BR(~chi_40 -> ~chi_10 H )
0.00000000E+00 2 1000022 36 # BR(~chi_40 -> ~chi_10 A )
1.47602336E-01 2 1000023 25 # BR(~chi_40 -> ~chi_20 h )
0.00000000E+00 2 1000023 35 # BR(~chi_40 -> ~chi_20 H )
0.00000000E+00 2 1000023 36 # BR(~chi_40 -> ~chi_20 A )
0.00000000E+00 2 1000025 25 # BR(~chi_40 -> ~chi_30 h )
0.00000000E+00 2 1000025 35 # BR(~chi_40 -> ~chi_30 H )
0.00000000E+00 2 1000025 36 # BR(~chi_40 -> ~chi_30 A )
0.00000000E+00 2 1000024 -37 # BR(~chi_40 -> ~chi_1+ H-)
0.00000000E+00 2 -1000024 37 # BR(~chi_40 -> ~chi_1- H+)
0.00000000E+00 2 1000037 -37 # BR(~chi_40 -> ~chi_2+ H-)
0.00000000E+00 2 -1000037 37 # BR(~chi_40 -> ~chi_2- H+)
0.00000000E+00 2 1000002 -2 # BR(~chi_40 -> ~u_L ub)
0.00000000E+00 2 -1000002 2 # BR(~chi_40 -> ~u_L* u )
0.00000000E+00 2 2000002 -2 # BR(~chi_40 -> ~u_R ub)
0.00000000E+00 2 -2000002 2 # BR(~chi_40 -> ~u_R* u )
0.00000000E+00 2 1000001 -1 # BR(~chi_40 -> ~d_L db)
0.00000000E+00 2 -1000001 1 # BR(~chi_40 -> ~d_L* d )
0.00000000E+00 2 2000001 -1 # BR(~chi_40 -> ~d_R db)
0.00000000E+00 2 -2000001 1 # BR(~chi_40 -> ~d_R* d )
0.00000000E+00 2 1000004 -4 # BR(~chi_40 -> ~c_L cb)
0.00000000E+00 2 -1000004 4 # BR(~chi_40 -> ~c_L* c )
0.00000000E+00 2 2000004 -4 # BR(~chi_40 -> ~c_R cb)
0.00000000E+00 2 -2000004 4 # BR(~chi_40 -> ~c_R* c )
0.00000000E+00 2 1000003 -3 # BR(~chi_40 -> ~s_L sb)
0.00000000E+00 2 -1000003 3 # BR(~chi_40 -> ~s_L* s )
0.00000000E+00 2 2000003 -3 # BR(~chi_40 -> ~s_R sb)
0.00000000E+00 2 -2000003 3 # BR(~chi_40 -> ~s_R* s )
0.00000000E+00 2 1000006 -6 # BR(~chi_40 -> ~t_1 tb)
0.00000000E+00 2 -1000006 6 # BR(~chi_40 -> ~t_1* t )
0.00000000E+00 2 2000006 -6 # BR(~chi_40 -> ~t_2 tb)
0.00000000E+00 2 -2000006 6 # BR(~chi_40 -> ~t_2* t )
0.00000000E+00 2 1000005 -5 # BR(~chi_40 -> ~b_1 bb)
0.00000000E+00 2 -1000005 5 # BR(~chi_40 -> ~b_1* b )
0.00000000E+00 2 2000005 -5 # BR(~chi_40 -> ~b_2 bb)
0.00000000E+00 2 -2000005 5 # BR(~chi_40 -> ~b_2* b )
9.64835418E-03 2 1000011 -11 # BR(~chi_40 -> ~e_L- e+)
9.64835418E-03 2 -1000011 11 # BR(~chi_40 -> ~e_L+ e-)
3.75684470E-03 2 2000011 -11 # BR(~chi_40 -> ~e_R- e+)
3.75684470E-03 2 -2000011 11 # BR(~chi_40 -> ~e_R+ e-)
9.64835418E-03 2 1000013 -13 # BR(~chi_40 -> ~mu_L- mu+)
9.64835418E-03 2 -1000013 13 # BR(~chi_40 -> ~mu_L+ mu-)
3.75684470E-03 2 2000013 -13 # BR(~chi_40 -> ~mu_R- mu+)
3.75684470E-03 2 -2000013 13 # BR(~chi_40 -> ~mu_R+ mu-)
2.68215241E-03 2 1000015 -15 # BR(~chi_40 -> ~tau_1- tau+)
2.68215241E-03 2 -1000015 15 # BR(~chi_40 -> ~tau_1+ tau-)
1.62289809E-02 2 2000015 -15 # BR(~chi_40 -> ~tau_2- tau+)
1.62289809E-02 2 -2000015 15 # BR(~chi_40 -> ~tau_2+ tau-)
2.53796547E-02 2 1000012 -12 # BR(~chi_40 -> ~nu_eL nu_eb)
2.53796547E-02 2 -1000012 12 # BR(~chi_40 -> ~nu_eL* nu_e )
2.53796547E-02 2 1000014 -14 # BR(~chi_40 -> ~nu_muL nu_mub)
2.53796547E-02 2 -1000014 14 # BR(~chi_40 -> ~nu_muL* nu_mu )
2.54724352E-02 2 1000016 -16 # BR(~chi_40 -> ~nu_tau1 nu_taub)
2.54724352E-02 2 -1000016 16 # BR(~chi_40 -> ~nu_tau1* nu_tau )
"""
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(0),
filterEfficiency = cms.untracked.double(1),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(COM_ENERGY),
crossSection = cms.untracked.double(CROSS_SECTION),
maxEventsToPrint = cms.untracked.int32(0),
SLHATableForPythia8 = cms.string('%s' % SLHA_TABLE),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'SUSY:all = off',
'SUSY:qqbar2sleptonantislepton = on',
'1000013:tau0 = %.1f' % CTAU_POINT,
),
parameterSets = cms.vstring(
'pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters'
)
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"[email protected]"
] | |
98c7acd6912e8bc5c371f591667dc2220b9071e9 | f10230c09860f1e01dcef56daab30a9200d7eb60 | /common/admin.py | 170b802e29c547aedb5673fa213fe83ff9dd05ac | [] | no_license | DentiQ/test_web | 08280ff506439e9bd1565fb502a9780b3c79707d | b7bceab1edd670a8cf60ecf882a466c26e68fb63 | refs/heads/master | 2023-01-23T14:43:17.420477 | 2020-12-01T13:54:18 | 2020-12-01T13:54:18 | 316,973,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import Group
from django.utils.translation import ugettext_lazy as _
| [
"[email protected]"
] | |
62a32398fb83bf899044a0cfa0eab60cef85b4c2 | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/services/topic_constant_service/transports/grpc.py | f19227ebd69e1277ed3de695740a3a5b5299f611 | [
"Apache-2.0"
] | permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,337 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v7.resources.types import topic_constant
from google.ads.googleads.v7.services.types import topic_constant_service
from .base import TopicConstantServiceTransport, DEFAULT_CLIENT_INFO
class TopicConstantServiceGrpcTransport(TopicConstantServiceTransport):
"""gRPC backend transport for TopicConstantService.
Service to fetch topic constants.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(self, *,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn("api_mtls_endpoint and client_cert_source are deprecated", DeprecationWarning)
host = api_mtls_endpoint if ":" in api_mtls_endpoint else api_mtls_endpoint + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
)
@classmethod
def create_channel(cls,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_topic_constant(self) -> Callable[
[topic_constant_service.GetTopicConstantRequest],
topic_constant.TopicConstant]:
r"""Return a callable for the get topic constant method over gRPC.
Returns the requested topic constant in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.GetTopicConstantRequest],
~.TopicConstant]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_topic_constant' not in self._stubs:
self._stubs['get_topic_constant'] = self.grpc_channel.unary_unary(
'/google.ads.googleads.v7.services.TopicConstantService/GetTopicConstant',
request_serializer=topic_constant_service.GetTopicConstantRequest.serialize,
response_deserializer=topic_constant.TopicConstant.deserialize,
)
return self._stubs['get_topic_constant']
__all__ = (
'TopicConstantServiceGrpcTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
4c49ddf6a3224bc642267e24b285e2a9da8767ed | 6b6e20004b46165595f35b5789e7426d5289ea48 | /endpoints/appr/__init__.py | c998d8a958ab8159696191c64e114b0ac4e6902c | [
"Apache-2.0"
] | permissive | anwarchk/quay | 2a83d0ab65aff6a1120fbf3a45dd72f42211633b | 23c5120790c619174e7d36784ca5aab7f4eece5c | refs/heads/master | 2020-09-12T18:53:21.093606 | 2019-11-15T19:29:02 | 2019-11-15T19:29:02 | 222,517,145 | 0 | 0 | Apache-2.0 | 2019-11-18T18:32:35 | 2019-11-18T18:32:35 | null | UTF-8 | Python | false | false | 1,796 | py | import logging
from functools import wraps
from cnr.exception import Forbidden
from flask import Blueprint
from app import metric_queue
from auth.permissions import (AdministerRepositoryPermission, ReadRepositoryPermission,
ModifyRepositoryPermission)
from endpoints.appr.decorators import require_repo_permission
from util.metrics.metricqueue import time_blueprint
appr_bp = Blueprint('appr', __name__)
time_blueprint(appr_bp, metric_queue)
logger = logging.getLogger(__name__)
def _raise_method(repository, scopes):
raise Forbidden("Unauthorized access for: %s" % repository,
{"package": repository, "scopes": scopes})
def _get_reponame_kwargs(*args, **kwargs):
return [kwargs['namespace'], kwargs['package_name']]
require_app_repo_read = require_repo_permission(ReadRepositoryPermission,
scopes=['pull'],
allow_public=True,
raise_method=_raise_method,
get_reponame_method=_get_reponame_kwargs)
require_app_repo_write = require_repo_permission(ModifyRepositoryPermission,
scopes=['pull', 'push'],
raise_method=_raise_method,
get_reponame_method=_get_reponame_kwargs)
require_app_repo_admin = require_repo_permission(AdministerRepositoryPermission,
scopes=['pull', 'push'],
raise_method=_raise_method,
get_reponame_method=_get_reponame_kwargs)
| [
"[email protected]"
] | |
b692f39e14180b18642560210b11e1e6cb9edac3 | 9a355bd36f089a829eb965bb6d725534443a4f15 | /nlp-LDA/main.py | b292767164afc103ab14fa3ecb0bb9377d7e224f | [] | no_license | yflfly/nlp_tools | bf1c5745c37116b14fba417d41271c716ee9a487 | 95ddeaad18ee94cc19b715e38c9af2cda7cbe213 | refs/heads/master | 2021-06-21T10:17:57.713605 | 2021-03-07T13:00:36 | 2021-03-07T13:00:36 | 176,850,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,472 | py | # coding:utf-8
from gensim import corpora, models
import jieba.posseg as jp
import jieba
# 基于LDA主题模型的关键词提取算法实现
# 简单文本处理
def get_text(text):
flags = ('n', 'nr', 'ns', 'nt', 'eng', 'v', 'd') # 词性
stopwords = ('的', '就', '是', '用', '还', '在', '上', '作为') # 停用词
words_list = []
for text in texts:
words = [w.word for w in jp.cut(text) if w.flag in flags and w.word not in stopwords]
words_list.append(words)
return words_list
# 生成LDA模型
def LDA_model(words_list):
# 构造词典
# Dictionary()方法遍历所有的文本,为每个不重复的单词分配一个单独的整数ID,同时收集该单词出现次数以及相关的统计信息
dictionary = corpora.Dictionary(words_list)
print(dictionary)
print('打印查看每个单词的id:')
print(dictionary.token2id) # 打印查看每个单词的id
# 将dictionary转化为一个词袋
# doc2bow()方法将dictionary转化为一个词袋。得到的结果corpus是一个向量的列表,向量的个数就是文档数。
# 在每个文档向量中都包含一系列元组,元组的形式是(单词 ID,词频)
corpus = [dictionary.doc2bow(words) for words in words_list]
print('输出每个文档的向量:')
print(corpus) # 输出每个文档的向量
# LDA主题模型
# num_topics -- 必须,要生成的主题个数。
# id2word -- 必须,LdaModel类要求我们之前的dictionary把id都映射成为字符串。
# passes -- 可选,模型遍历语料库的次数。遍历的次数越多,模型越精确。但是对于非常大的语料库,遍历太多次会花费很长的时间。
lda_model = models.ldamodel.LdaModel(corpus=corpus, num_topics=2, id2word=dictionary, passes=10)
return lda_model
if __name__ == "__main__":
texts = ['作为千元机中为数不多拥有真全面屏的手机,OPPO K3一经推出,就簇拥不少粉丝', \
'很多人在冲着这块屏幕购买了OPPO K3之后,发现原来K3的过人之处不止是在屏幕上', \
'OPPO K3的消费者对这部手机总体还是十分满意的', \
'吉利博越PRO在7月3日全新吉客智能生态系统GKUI19发布会上正式亮相', \
'今年上海车展,长安CS75 PLUS首次亮相', \
'普通版车型采用的是双边共双出式排气布局;运动版本车型采用双边共四出的排气布局']
# 获取分词后的文本列表
words_list = get_text(texts)
print('分词后的文本:')
print(words_list)
# 获取训练后的LDA模型
lda_model = LDA_model(words_list)
# 可以用 print_topic 和 print_topics 方法来查看主题
# 打印所有主题,每个主题显示5个词
topic_words = lda_model.print_topics(num_topics=2, num_words=5)
print('打印所有主题,每个主题显示5个词:')
print(topic_words)
# 输出该主题的的词及其词的权重
words_list = lda_model.show_topic(0, 5)
print('输出该主题的的词及其词的权重:')
print(words_list)
''''
运行显示如下所示:
Building prefix dict from the default dictionary ...
Dumping model to file cache C:\\Users\\think\AppData\Local\Temp\jieba.cache
Loading model cost 8.649 seconds.
Prefix dict has been built successfully.
分词后的文本:
[['拥有', '真', '全面', '手机', 'OPPO', 'K3', '一经', '推出', '簇拥', '不少', '粉丝'], ['人', '屏幕', '购买', 'OPPO', 'K3', '发现', '原来', 'K3', '不止', '屏幕'], ['OPPO', 'K3', '消费者', '部手机', '总体'], ['吉利', '博越', 'PRO', '全新', '吉客', '智能', 'GKUI19', '发布会', '亮相'], ['上海', '长安', 'CS75', 'PLUS', '亮相'], ['版', '车型', '采用', '双边', '共', '出式', '排气', '布局', '版本', '车型', '采用', '双边', '共', '排气', '布局']]
Dictionary(42 unique tokens: ['K3', 'OPPO', '一经', '不少', '全面']...)
打印查看每个单词的id:
{'K3': 0, 'OPPO': 1, '一经': 2, '不少': 3, '全面': 4, '手机': 5, '拥有': 6, '推出': 7, '真': 8, '簇拥': 9, '粉丝': 10, '不止': 11, '人': 12, '原来': 13, '发现': 14, '屏幕': 15, '购买': 16, '总体': 17, '消费者': 18, '部手机': 19, 'GKUI19': 20, 'PRO': 21, '亮相': 22, '全新': 23, '博越': 24, '发布会': 25, '吉利': 26, '吉客': 27, '智能': 28, 'CS75': 29, 'PLUS': 30, '上海': 31, '长安': 32, '共': 33, '出式': 34, '双边': 35, '布局': 36, '排气': 37, '版': 38, '版本': 39, '车型': 40, '采用': 41}
输出每个文档的向量:
[[(0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1)], [(0, 2), (1, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 2), (16, 1)], [(0, 1), (1, 1), (17, 1), (18, 1), (19, 1)], [(20, 1), (21, 1), (22, 1), (23, 1), (24, 1), (25, 1), (26, 1), (27, 1), (28, 1)], [(22, 1), (29, 1), (30, 1), (31, 1), (32, 1)], [(33, 2), (34, 1), (35, 2), (36, 2), (37, 2), (38, 1), (39, 1), (40, 2), (41, 2)]]
打印所有主题,每个主题显示5个词:
[(0, '0.056*"K3" + 0.055*"屏幕" + 0.055*"亮相" + 0.034*"OPPO" + 0.033*"全新"'), (1, '0.048*"采用" + 0.048*"双边" + 0.048*"车型" + 0.048*"排气" + 0.048*"布局"')]
输出该主题的的词及其词的权重:
[('K3', 0.0563279), ('屏幕', 0.05533156), ('亮相', 0.055258993), ('OPPO', 0.033577614), ('全新', 0.033189856)]
'''
| [
"[email protected]"
] | |
2ef363ededa023eb6f443f5748066deee98c1dce | 7137161629a1003583744cc3bd0e5d3498e0a924 | /airflow/providers/google/suite/hooks/sheets.py | ce4218c36fbd5f124924350771ee3660db5f0c29 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | jbampton/airflow | 3fca85975854eb916f16143b659a9119af143963 | dcfa14d60dade3fdefa001d10013466fe4d77f0d | refs/heads/master | 2023-05-25T22:31:49.104069 | 2021-09-18T19:18:32 | 2021-09-18T19:18:32 | 247,645,744 | 3 | 0 | Apache-2.0 | 2020-03-16T08:12:58 | 2020-03-16T08:12:57 | null | UTF-8 | Python | false | false | 18,662 | py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""This module contains a Google Sheets API hook"""
from typing import Any, Dict, List, Optional, Sequence, Union
from googleapiclient.discovery import build
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class GSheetsHook(GoogleBaseHook):
"""
Interact with Google Sheets via Google Cloud connection
Reading and writing cells in Google Sheet:
https://developers.google.com/sheets/api/guides/values
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param api_version: API Version
:type api_version: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
:type impersonation_chain: Union[str, Sequence[str]]
"""
def __init__(
self,
gcp_conn_id: str = 'google_cloud_default',
api_version: str = 'v4',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.delegate_to = delegate_to
self._conn = None
def get_conn(self) -> Any:
"""
Retrieves connection to Google Sheets.
:return: Google Sheets services object.
:rtype: Any
"""
if not self._conn:
http_authorized = self._authorize()
self._conn = build('sheets', self.api_version, http=http_authorized, cache_discovery=False)
return self._conn
def get_values(
self,
spreadsheet_id: str,
range_: str,
major_dimension: str = 'DIMENSION_UNSPECIFIED',
value_render_option: str = 'FORMATTED_VALUE',
date_time_render_option: str = 'SERIAL_NUMBER',
) -> list:
"""
Gets values from Google Sheet from a single range
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/get
:param spreadsheet_id: The Google Sheet ID to interact with
:type spreadsheet_id: str
:param range_: The A1 notation of the values to retrieve.
:type range_: str
:param major_dimension: Indicates which dimension an operation should apply to.
DIMENSION_UNSPECIFIED, ROWS, or COLUMNS
:type major_dimension: str
:param value_render_option: Determines how values should be rendered in the output.
FORMATTED_VALUE, UNFORMATTED_VALUE, or FORMULA
:type value_render_option: str
:param date_time_render_option: Determines how dates should be rendered in the output.
SERIAL_NUMBER or FORMATTED_STRING
:type date_time_render_option: str
:return: An array of sheet values from the specified sheet.
:rtype: List
"""
service = self.get_conn()
response = (
service.spreadsheets()
.values()
.get(
spreadsheetId=spreadsheet_id,
range=range_,
majorDimension=major_dimension,
valueRenderOption=value_render_option,
dateTimeRenderOption=date_time_render_option,
)
.execute(num_retries=self.num_retries)
)
return response['values']
def batch_get_values(
self,
spreadsheet_id: str,
ranges: List,
major_dimension: str = 'DIMENSION_UNSPECIFIED',
value_render_option: str = 'FORMATTED_VALUE',
date_time_render_option: str = 'SERIAL_NUMBER',
) -> dict:
"""
Gets values from Google Sheet from a list of ranges
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/batchGet
:param spreadsheet_id: The Google Sheet ID to interact with
:type spreadsheet_id: str
:param ranges: The A1 notation of the values to retrieve.
:type ranges: List
:param major_dimension: Indicates which dimension an operation should apply to.
DIMENSION_UNSPECIFIED, ROWS, or COLUMNS
:type major_dimension: str
:param value_render_option: Determines how values should be rendered in the output.
FORMATTED_VALUE, UNFORMATTED_VALUE, or FORMULA
:type value_render_option: str
:param date_time_render_option: Determines how dates should be rendered in the output.
SERIAL_NUMBER or FORMATTED_STRING
:type date_time_render_option: str
:return: Google Sheets API response.
:rtype: Dict
"""
service = self.get_conn()
response = (
service.spreadsheets()
.values()
.batchGet(
spreadsheetId=spreadsheet_id,
ranges=ranges,
majorDimension=major_dimension,
valueRenderOption=value_render_option,
dateTimeRenderOption=date_time_render_option,
)
.execute(num_retries=self.num_retries)
)
return response
def update_values(
self,
spreadsheet_id: str,
range_: str,
values: List,
major_dimension: str = 'ROWS',
value_input_option: str = 'RAW',
include_values_in_response: bool = False,
value_render_option: str = 'FORMATTED_VALUE',
date_time_render_option: str = 'SERIAL_NUMBER',
) -> dict:
"""
Updates values from Google Sheet from a single range
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/update
:param spreadsheet_id: The Google Sheet ID to interact with.
:type spreadsheet_id: str
:param range_: The A1 notation of the values to retrieve.
:type range_: str
:param values: Data within a range of the spreadsheet.
:type values: List
:param major_dimension: Indicates which dimension an operation should apply to.
DIMENSION_UNSPECIFIED, ROWS, or COLUMNS
:type major_dimension: str
:param value_input_option: Determines how input data should be interpreted.
RAW or USER_ENTERED
:type value_input_option: str
:param include_values_in_response: Determines if the update response should
include the values of the cells that were updated.
:type include_values_in_response: bool
:param value_render_option: Determines how values should be rendered in the output.
FORMATTED_VALUE, UNFORMATTED_VALUE, or FORMULA
:type value_render_option: str
:param date_time_render_option: Determines how dates should be rendered in the output.
SERIAL_NUMBER or FORMATTED_STRING
:type date_time_render_option: str
:return: Google Sheets API response.
:rtype: Dict
"""
service = self.get_conn()
body = {"range": range_, "majorDimension": major_dimension, "values": values}
response = (
service.spreadsheets()
.values()
.update(
spreadsheetId=spreadsheet_id,
range=range_,
valueInputOption=value_input_option,
includeValuesInResponse=include_values_in_response,
responseValueRenderOption=value_render_option,
responseDateTimeRenderOption=date_time_render_option,
body=body,
)
.execute(num_retries=self.num_retries)
)
return response
def batch_update_values(
self,
spreadsheet_id: str,
ranges: List,
values: List,
major_dimension: str = 'ROWS',
value_input_option: str = 'RAW',
include_values_in_response: bool = False,
value_render_option: str = 'FORMATTED_VALUE',
date_time_render_option: str = 'SERIAL_NUMBER',
) -> dict:
"""
Updates values from Google Sheet for multiple ranges
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/batchUpdate
:param spreadsheet_id: The Google Sheet ID to interact with
:type spreadsheet_id: str
:param ranges: The A1 notation of the values to retrieve.
:type ranges: List
:param values: Data within a range of the spreadsheet.
:type values: List
:param major_dimension: Indicates which dimension an operation should apply to.
DIMENSION_UNSPECIFIED, ROWS, or COLUMNS
:type major_dimension: str
:param value_input_option: Determines how input data should be interpreted.
RAW or USER_ENTERED
:type value_input_option: str
:param include_values_in_response: Determines if the update response should
include the values of the cells that were updated.
:type include_values_in_response: bool
:param value_render_option: Determines how values should be rendered in the output.
FORMATTED_VALUE, UNFORMATTED_VALUE, or FORMULA
:type value_render_option: str
:param date_time_render_option: Determines how dates should be rendered in the output.
SERIAL_NUMBER or FORMATTED_STRING
:type date_time_render_option: str
:return: Google Sheets API response.
:rtype: Dict
"""
if len(ranges) != len(values):
raise AirflowException(
"'Ranges' and 'Lists' must be of equal length. \n \
'Ranges' is of length: {} and \n \
'Values' is of length: {}.".format(
str(len(ranges)), str(len(values))
)
)
service = self.get_conn()
data = []
for idx, range_ in enumerate(ranges):
value_range = {"range": range_, "majorDimension": major_dimension, "values": values[idx]}
data.append(value_range)
body = {
"valueInputOption": value_input_option,
"data": data,
"includeValuesInResponse": include_values_in_response,
"responseValueRenderOption": value_render_option,
"responseDateTimeRenderOption": date_time_render_option,
}
response = (
service.spreadsheets()
.values()
.batchUpdate(spreadsheetId=spreadsheet_id, body=body)
.execute(num_retries=self.num_retries)
)
return response
def append_values(
self,
spreadsheet_id: str,
range_: str,
values: List,
major_dimension: str = 'ROWS',
value_input_option: str = 'RAW',
insert_data_option: str = 'OVERWRITE',
include_values_in_response: bool = False,
value_render_option: str = 'FORMATTED_VALUE',
date_time_render_option: str = 'SERIAL_NUMBER',
) -> dict:
"""
Append values from Google Sheet from a single range
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/append
:param spreadsheet_id: The Google Sheet ID to interact with
:type spreadsheet_id: str
:param range_: The A1 notation of the values to retrieve.
:type range_: str
:param values: Data within a range of the spreadsheet.
:type values: List
:param major_dimension: Indicates which dimension an operation should apply to.
DIMENSION_UNSPECIFIED, ROWS, or COLUMNS
:type major_dimension: str
:param value_input_option: Determines how input data should be interpreted.
RAW or USER_ENTERED
:type value_input_option: str
:param insert_data_option: Determines how existing data is changed when new data is input.
OVERWRITE or INSERT_ROWS
:type insert_data_option: str
:param include_values_in_response: Determines if the update response should
include the values of the cells that were updated.
:type include_values_in_response: bool
:param value_render_option: Determines how values should be rendered in the output.
FORMATTED_VALUE, UNFORMATTED_VALUE, or FORMULA
:type value_render_option: str
:param date_time_render_option: Determines how dates should be rendered in the output.
SERIAL_NUMBER or FORMATTED_STRING
:type date_time_render_option: str
:return: Google Sheets API response.
:rtype: Dict
"""
service = self.get_conn()
body = {"range": range_, "majorDimension": major_dimension, "values": values}
response = (
service.spreadsheets()
.values()
.append(
spreadsheetId=spreadsheet_id,
range=range_,
valueInputOption=value_input_option,
insertDataOption=insert_data_option,
includeValuesInResponse=include_values_in_response,
responseValueRenderOption=value_render_option,
responseDateTimeRenderOption=date_time_render_option,
body=body,
)
.execute(num_retries=self.num_retries)
)
return response
def clear(self, spreadsheet_id: str, range_: str) -> dict:
"""
Clear values from Google Sheet from a single range
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/clear
:param spreadsheet_id: The Google Sheet ID to interact with
:type spreadsheet_id: str
:param range_: The A1 notation of the values to retrieve.
:type range_: str
:return: Google Sheets API response.
:rtype: Dict
"""
service = self.get_conn()
response = (
service.spreadsheets()
.values()
.clear(spreadsheetId=spreadsheet_id, range=range_)
.execute(num_retries=self.num_retries)
)
return response
def batch_clear(self, spreadsheet_id: str, ranges: list) -> dict:
"""
Clear values from Google Sheet from a list of ranges
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/batchClear
:param spreadsheet_id: The Google Sheet ID to interact with
:type spreadsheet_id: str
:param ranges: The A1 notation of the values to retrieve.
:type ranges: List
:return: Google Sheets API response.
:rtype: Dict
"""
service = self.get_conn()
body = {"ranges": ranges}
response = (
service.spreadsheets()
.values()
.batchClear(spreadsheetId=spreadsheet_id, body=body)
.execute(num_retries=self.num_retries)
)
return response
def get_spreadsheet(self, spreadsheet_id: str):
"""
Retrieves spreadsheet matching the given id.
:param spreadsheet_id: The spreadsheet id.
:type spreadsheet_id: str
:return: An spreadsheet that matches the sheet filter.
"""
response = (
self.get_conn()
.spreadsheets()
.get(spreadsheetId=spreadsheet_id)
.execute(num_retries=self.num_retries)
)
return response
def get_sheet_titles(self, spreadsheet_id: str, sheet_filter: Optional[List[str]] = None):
"""
Retrieves the sheet titles from a spreadsheet matching the given id and sheet filter.
:param spreadsheet_id: The spreadsheet id.
:type spreadsheet_id: str
:param sheet_filter: List of sheet title to retrieve from sheet.
:type sheet_filter: List[str]
:return: An list of sheet titles from the specified sheet that match
the sheet filter.
"""
response = self.get_spreadsheet(spreadsheet_id=spreadsheet_id)
if sheet_filter:
titles = [
sh['properties']['title']
for sh in response['sheets']
if sh['properties']['title'] in sheet_filter
]
else:
titles = [sh['properties']['title'] for sh in response['sheets']]
return titles
def create_spreadsheet(self, spreadsheet: Dict[str, Any]) -> Dict[str, Any]:
"""
Creates a spreadsheet, returning the newly created spreadsheet.
:param spreadsheet: an instance of Spreadsheet
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet
:type spreadsheet: Dict[str, Any]
:return: An spreadsheet object.
"""
self.log.info("Creating spreadsheet: %s", spreadsheet['properties']['title'])
response = (
self.get_conn().spreadsheets().create(body=spreadsheet).execute(num_retries=self.num_retries)
)
self.log.info("Spreadsheet: %s created", spreadsheet['properties']['title'])
return response
| [
"[email protected]"
] | |
594840a1230e989a0903678c4308812b815ff3e6 | 66f037cc0bf8683a814eb610d06edd3667f962e0 | /escpos/tests/test_epson_genericescpos.py | baf3acf4f890528e24d17c60035b042ca09644d7 | [
"Apache-2.0"
] | permissive | cemsbr/pyescpos | 6118e7fcf4b5e85b94639be42cfb6fe87f084ba9 | 58ebc1b544458803c4235f3fa80e8fa376b18ec2 | refs/heads/master | 2020-12-08T07:20:24.977694 | 2019-12-30T00:33:08 | 2019-12-30T00:33:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,428 | py | # -*- coding: utf-8 -*-
#
# escpos/tests/test_epson_genericescpos.py
#
# Copyright 2015 Base4 Sistemas Ltda ME
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import pytest
from escpos.impl.epson import GenericESCPOS
from escpos import feature
@pytest.fixture(scope='module')
def printer():
return GenericESCPOS(pytest.FakeDevice())
def test_has_model_attr(printer):
assert hasattr(printer, 'model')
def test_has_feature_attribute(printer):
assert hasattr(printer, 'feature')
assert isinstance(printer.feature, feature.FeatureAttributes)
def test_feature_attribute_columns(printer):
assert hasattr(printer.feature, feature.COLUMNS)
assert printer.feature.columns.normal == 48
assert printer.feature.columns.expanded == 24
assert printer.feature.columns.condensed == 64
| [
"[email protected]"
] | |
be5306049fed1701ff84c2b725f19d30dc048bd9 | a36501f44a09ca03dd1167e1d7965f782e159097 | /admin/views/celery.py | 0045559263cc795693771411bf703a74286ad244 | [
"Apache-2.0"
] | permissive | ssfdust/full-stack-flask-smorest | 9429a2cdcaa3ff3538875cc74cff802765678d4b | 4f866b2264e224389c99bbbdb4521f4b0799b2a3 | refs/heads/master | 2023-08-05T08:48:03.474042 | 2023-05-07T01:08:20 | 2023-05-07T01:08:20 | 205,528,296 | 39 | 10 | Apache-2.0 | 2023-08-31T00:18:42 | 2019-08-31T10:12:25 | Python | UTF-8 | Python | false | false | 3,026 | py | # Copyright 2019 RedLotus <[email protected]>
# Author: RedLotus <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
celery的管理模块
调度任务管理
任务状态管理
"""
from ..formaters import json_formatter, line_formatter
from .bases import AuthMongView
from ..forms import TaskSelect2Field
class CeleryScheduleView(AuthMongView):
"""
调度任务管理
用以创建调度任务,支持两种格式一是Crontab,
二是Interval,两种方式只能选择一种。
"""
from app.extensions.mongobeat.models import PeriodicTask
can_create = True
can_edit = True
can_delete = True
can_view_details = True
extra_js = ["/static/js/pages/celerybeat.js"] # 拓展表单的js
column_list = ["name", "task", "enabled", "schedule", "last_run_at"]
column_labels = {"schedule": "周期"}
column_editable_list = ["enabled", "run_immediately"]
column_default_sort = []
column_filters = ["name"]
can_view_details = True
form_overrides = {"task": TaskSelect2Field}
def _scheduleinfo(view, context, model, name):
"""调度信息展示"""
return str(model).split(":")[1]
column_formatters = {"schedule": _scheduleinfo}
class CeleryTaskView(AuthMongView):
"""任务查看"""
can_create = False
can_edit = False
can_delete = True
can_view_details = True
details_modal = False
column_default_sort = [("time_start", True)]
column_filters = ["time_start", "date_done"]
column_exclude_list = [
"id",
"delivery_info",
"result",
"children",
"args",
"acknowledged",
"traceback",
"kwargs",
"parent_id",
"type",
]
column_formatters = {
"delivery_info": json_formatter,
"result": json_formatter,
"traceback": line_formatter,
}
def get_list(
self,
page,
sort_column,
sort_desc,
search,
filters,
execute=True,
page_size=None,
):
"""将所有任务置为已读"""
count, query = super().get_list(
page=page,
sort_column=sort_column,
sort_desc=sort_desc,
search=search,
filters=filters,
execute=execute,
page_size=page_size,
)
for item in query:
if item.checked is False:
item.checked = True
item.save()
return count, query
| [
"[email protected]"
] | |
757c83c09d9a63c7e07a8ce62f3e008a7d1d516b | 1515be3015ad988278d5a095416c0a0066a02757 | /src/users/models/microsoftgraphentity.py | b0f43ecc2a37e01c3384617b3fbb889435ca08b5 | [
"MIT"
] | permissive | peombwa/Sample-Graph-Python-Client | 2ad494cc5b5fe026edd6ed7fee8cac2dd96aaa60 | 3396f531fbe6bb40a740767c4e31aee95a3b932e | refs/heads/master | 2020-12-29T09:50:38.941350 | 2020-02-05T22:45:28 | 2020-02-05T22:45:28 | 238,561,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Microsoftgraphentity(Model):
"""entity.
:param id:
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(self, id=None):
super(Microsoftgraphentity, self).__init__()
self.id = id
| [
"[email protected]"
] | |
088740864d07a9ca84e89627a1bab44538e4be33 | 847273de4b1d814fab8b19dc651c651c2d342ede | /.history/Sudoku_II_003_20180618134857.py | 2745c76be92765bcf40268fd8a5d34aac92e1b25 | [] | no_license | Los4U/sudoku_in_python | 0ba55850afcffeac4170321651620f3c89448b45 | 7d470604962a43da3fc3e5edce6f718076197d32 | refs/heads/master | 2020-03-22T08:10:13.939424 | 2018-07-04T17:21:13 | 2018-07-04T17:21:13 | 139,749,483 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,466 | py | from random import randint
# Sudoku1 almost solved
sudoku1 = [
[5, 9, 8, 6, 1, 2, 3, 4, 7],
[2, 1, 7, 9, 3, 4, 8, 6, 5],
[6, 4, 3, 5, 8, 7, 1, 2, 9],
[1, 6, 5, 4, 9, 8, 2, 7, 3],
[3, 2, 9, 7, 6, 5, 4, 1, 8],
[7, 8, 4, 3, 2, 1, 5, 9, 6],
[8, 3, 1, 2, 7, 6, 9, 5, 4],
[4, 7, 2, 8, 5, 9, 6, 3, 1],
[9, 5, ' ', ' ', ' ', ' ', ' ', ' ', 2]
]
# Sudoku 2 almost solved
# row1 = [9,8,7,4,3,2,5,6,1]
# row2 = [2,4,3,5,1,6,8,7,9]
# row3 = [5,6,1,7,9,8,4,3,2]
# row4 = [3,9,5,6,4,7,2,1,8]
# row5 = [8,2,4,3,5,1,6,9,7]
# row6 = [1,7,6,2,8,9,3,4,5]
# row7 = [7,1,2,8,6,3,9,5,4]
# row8 = [4,3,8,9,7,5,1,2,6]
# row9 = [' ',5,' ',' ',2,' ',7,' ',' ']
def printSudoku():
i = 0
while i < 10:
if i == 0:
print(" 1 2 3 4 5 6 7 8 9")
print(" -------------------------")
elif i == 3 or i == 6 or i == 9:
print(" -------------------------")
spaceBar = "|"
if i < 9:
print('{2} {1} {0[0]} {0[1]} {0[2]} {1} {0[3]} {0[4]} {0[5]} {1} {0[6]} {0[7]} {0[8]} {1}'.format(sudoku1[i], spaceBar,i+1))
i = i + 1
while True: # prints Sudoku until is solved
printSudoku()
print("Input 3 numbers in format a b c, np. 4 5 8")
print(" a - row number")
print(" b - column number ")
print(" c - value")
# vprint(" r - reset chart to start\n ")
x = input("Input a b c: ")
print("")
numbers = " 0123456789" # conditions of entering the numbers !
if (len(x) != 5) or (str(x[0]) not in numbers) or (str(x[2]) not in numbers) or (
str(x[4]) not in numbers) or (str(x[1]) != " ") or (str(x[3]) != " "):
if x == "r": # reset
print(" Function reset() will be ready in Next Week")
else:
print("Error - wrong number format \n ")
continue
sudoku1[int(x[0])-1][int(x[2])-1] = x[4]
try:
if sum(row1) == 45 and sum(row2) == 45 and sum(row3) == 45 and sum(row4) == 45 and sum(
row5) == 45 and sum(row6) == 45 and sum(row7) == 45 and sum(row8) == 45 and sum(row9) == 45:
print("YOU WIN")
break
except TypeError:
print()
'''
print(" ")
print(" %@@@@@@@ @@@ @@@ (@@@@@@@@@ ,@@@@2@@@@@ @@@, /@@@/ @@@, @@@ ")
print(" @@@* @@@ @@@ (@@( /@@@# .@@@% (@@@ @@@, @@@% @@@, @@@. ")
print(" @@@& @@@ @@@ (@@( @@@* @@@% #@@% @@@,.@@@. @@@, @@@. ")
print(" ,@@@@@@* @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@%@@% @@@, @@@. ")
print(" /@@@@@# @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@,@@@( @@@, @@@. ")
print(" *@@@. @@@ .@@& (@@( @@@. @@@% &@@( @@@, &@@@. @@@* .@@@. ")
print(" &, &@@@ #@@@. ,@@@, (@@( ,&@@@* ,@@@& .@@@@ @@@, (@@@/ #@@@* @@@# ")
print(",@@@@@@@@( (@@@@@@@@% (@@@@@@@@@( #@@@@@@@@@, @@@, ,@@@% ,@@@@@@@@@. \n ")
print("To start game input:")
print(" r - to load random puzzle:")
print(" 1 - to load chart nr 1:")
print(" 2 - to load chart nr 2:")
print(" 3 - to load chart nr 3:")
choice = input("Input here: ")
if choice == "R" or choice == "r":
sudoku_number = randint(0, 1)
rows_fill(sudoku_number)
elif int(choice) == 1:
rows_fill(0)
elif int(choice) == 2:
rows_fill(1)
elif int(choice) == 3:
rows_fill(0)
'''
| [
"[email protected]"
] | |
b114f3d71ebdae2a74750a2f0d56ad7bd8da3155 | 27f6c33ad3f0240e64aad809d4bd57af3ecda498 | /Day06/8_Q10.py | 0ddb90dfa64678823f9b0d3c8f5988ce857c4809 | [] | no_license | bigdata202005/PythonProject | 26ce3c0ed3e47cd727606455e6ca95561907dbe4 | 4e0377fdb86db294483fb7a347429bf299e44ce5 | refs/heads/main | 2023-01-08T01:48:22.271143 | 2020-11-06T05:15:22 | 2020-11-06T05:15:22 | 310,498,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 703 | py | """
Q10 사칙연산 계산기
다음과 같이 동작하는 클래스 Calculator를 작성하시오.
cal1 = Calculator([1,2,3,4,5])
cal1.sum() # 합계
15
cal1.avg() # 평균
3.0
cal2 = Calculator([6,7,8,9,10])
cal2.sum() # 합계
40
cal2.avg() # 평균
8.0
"""
class Calculator:
def __init__(self, data_list):
self.data_list = data_list
def sum(self):
print(sum(self.data_list))
def avg(self):
print(round(sum(self.data_list) / len(self.data_list), 1))
if __name__ == '__main__':
cal1 = Calculator([1, 2, 3, 4, 5])
cal1.sum() # 합계
cal1.avg() # 평균
cal2 = Calculator([6, 7, 8, 9, 10])
cal2.sum() # 합계
cal2.avg() # 평균 | [
"[email protected]"
] | |
96e20d4131f5317dff89c74611053c3b8918cdf8 | 82fce9aae9e855a73f4e92d750e6a8df2ef877a5 | /Lab/venv/lib/python3.8/site-packages/OpenGL/GLES2/NV/conservative_raster_pre_snap.py | df58883753676dd6158183e136d4f6c4516af278 | [] | no_license | BartoszRudnik/GK | 1294f7708902e867dacd7da591b9f2e741bfe9e5 | 6dc09184a3af07143b9729e42a6f62f13da50128 | refs/heads/main | 2023-02-20T19:02:12.408974 | 2021-01-22T10:51:14 | 2021-01-22T10:51:14 | 307,847,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 972 | py | '''OpenGL extension NV.conservative_raster_pre_snap
This module customises the behaviour of the
OpenGL.raw.GLES2.NV.conservative_raster_pre_snap to provide a more
Python-friendly API
Overview (from the spec)
NV_conservative_raster_pre_snap_triangles provides a new mode to achieve
rasterization of triangles that is conservative w.r.t the triangle at
infinite precision i.e. before it is snapped to the sub-pixel grid. This
extension provides a new mode that expands this functionality to lines and
points.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/conservative_raster_pre_snap.txt
'''
from OpenGL.raw.GLES2.NV.conservative_raster_pre_snap import _EXTENSION_NAME
def glInitConservativeRasterPreSnapNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | [
"[email protected]"
] | |
a4080a7404a99836de352ed8a0d32120f99c2fdc | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /6YN2ww3B4cQZ6rTmN_4.py | 065864678599624588b72aab946bb9938cea33ba | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 839 | py | """
Write a function that returns `True` if a year is a leap, otherwise it returns
`False`.
A year is a "leap year" if it lasts 366 days, instead of 365 in a typical
year. That extra day is added to the end of the shortest month, creating
February 29.
A leap year occurs every four years, and will take place if the year is a
multiple of four. The exception to this is a year at the beginning of a
century (for example, 1900 or 2000), where the year must be divisible by 400
to be a leap year.
Look at the examples, and if you need help, look at the resources panel.
### Examples
leap_year(1990) ➞ False
leap_year(1924) ➞ True
leap_year(2021) ➞ False
### Notes
* Do not overthink this challenge.
* You can solve the problem with a few lines of code.
"""
def leapYear(year):
return year%4==0
| [
"[email protected]"
] | |
a7a3c574f559605b827426b88383a43711079bb1 | 3b50605ffe45c412ee33de1ad0cadce2c5a25ca2 | /python/paddle/fluid/tests/unittests/test_fleet_base.py | fe24c8838ec6c2ac41dde0d8f7ac9911509942b0 | [
"Apache-2.0"
] | permissive | Superjomn/Paddle | f5f4072cf75ac9ecb0ff528876ee264b14bbf8d1 | 7a0b0dab8e58b6a3b28b3b82c43d55c9bd3d4188 | refs/heads/develop | 2023-02-04T20:27:54.244843 | 2023-01-26T15:31:14 | 2023-01-26T15:31:14 | 66,896,049 | 4 | 1 | Apache-2.0 | 2023-04-14T02:29:52 | 2016-08-30T01:45:54 | C++ | UTF-8 | Python | false | false | 7,727 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import numpy as np
import paddle
import paddle.distributed.fleet as fleet
import paddle.distributed.fleet.base.role_maker as role_maker
import paddle.fluid as fluid
class TestFleetBase(unittest.TestCase):
def setUp(self):
os.environ["POD_IP"] = "127.0.0.1"
os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36000"
os.environ["PADDLE_TRAINERS_NUM"] = "2"
os.environ[
"PADDLE_PSERVERS_IP_PORT_LIST"
] = "127.0.0.1:36001,127.0.0.2:36002"
def test_init(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
def test_is_first_worker(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
if fleet.is_first_worker():
print("test fleet first worker done.")
def test_worker_index(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
print(fleet.worker_index())
def test_worker_num(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
print(fleet.worker_num())
def test_is_worker(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
if fleet.is_worker():
print("test fleet is worker")
def test_worker_endpoints(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
self.assertEqual(
"127.0.0.1:36000", fleet.worker_endpoints(to_string=True)
)
self.assertEqual(["127.0.0.1:36000"], fleet.worker_endpoints())
def test_server_num(self):
os.environ["TRAINING_ROLE"] = "PSERVER"
os.environ["PADDLE_PORT"] = "36001"
os.environ["POD_IP"] = "127.0.0.1"
role = role_maker.PaddleCloudRoleMaker()
fleet.init(role)
os.environ["PADDLE_TRAINERS_NUM"] = "2"
self.assertEqual(2, fleet.server_num())
def test_server_index(self):
os.environ["TRAINING_ROLE"] = "PSERVER"
os.environ["PADDLE_PORT"] = "36001"
os.environ["POD_IP"] = "127.0.0.1"
role = role_maker.PaddleCloudRoleMaker()
fleet.init(role)
self.assertEqual(0, fleet.server_index())
def test_server_endpoints(self):
os.environ["TRAINING_ROLE"] = "PSERVER"
os.environ["PADDLE_PORT"] = "36001"
os.environ["POD_IP"] = "127.0.0.1"
role = role_maker.PaddleCloudRoleMaker()
fleet.init(role)
if fleet.is_server():
self.assertEqual(
"127.0.0.1:36001,127.0.0.2:36002",
fleet.server_endpoints(to_string=True),
)
self.assertEqual(
["127.0.0.1:36001", "127.0.0.2:36002"], fleet.server_endpoints()
)
def test_is_server(self):
os.environ["TRAINING_ROLE"] = "PSERVER"
os.environ["PADDLE_PORT"] = "36001"
os.environ["POD_IP"] = "127.0.0.1"
role = role_maker.PaddleCloudRoleMaker()
fleet.init(role)
self.assertTrue(fleet.is_server())
def test_util(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
self.assertIsNotNone(fleet.util)
def test_barrier_worker(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
if fleet.is_worker():
fleet.barrier_worker()
def test_init_worker(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
with self.assertRaises(ValueError):
if fleet.is_worker():
fleet.init_worker()
def test_stop_worker(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
with self.assertRaises(ValueError):
if fleet.is_worker():
fleet.stop_worker()
def test_distributed_optimizer(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
optimizer = paddle.optimizer.SGD(learning_rate=0.001)
optimizer = fleet.distributed_optimizer(optimizer)
def test_exception(self):
import paddle.distributed.fleet as fleet
self.assertRaises(Exception, fleet.init_worker)
class TestFleetDygraph(unittest.TestCase):
def setUp(self):
os.environ[
"PADDLE_TRAINER_ENDPOINTS"
] = "127.0.0.1:36213,127.0.0.1:36214"
os.environ["PADDLE_CURRENT_ENDPOINTS"] = "127.0.0.1:36213"
os.environ["PADDLE_TRAINERS_NUM"] = "2"
os.environ["PADDLE_TRAINER_ID"] = "0"
def test_dygraph_method(self):
paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype("float32")
a = fluid.dygraph.to_variable(value)
layer = paddle.nn.Linear(13, 5)
adam = paddle.optimizer.Adam(
learning_rate=0.01, parameters=layer.parameters()
)
# remove init cause this UT cannot launch distributed task
adam = fleet.distributed_optimizer(adam)
try:
dp_layer = fleet.distributed_model(layer)
except Exception as e:
# This is just for testing the interface,
# and will not actually be called. Therefore,
# use "try-except" to avoid errors.
lr = 0.001
adam.set_lr(lr)
cur_lr = adam.get_lr()
assert lr == cur_lr
state_dict = adam.state_dict()
adam.set_state_dict(state_dict)
final_strategy = fleet._final_strategy()
class TestFleetBaseSingleError(unittest.TestCase):
def setUp(self):
os.environ.pop("PADDLE_TRAINER_ENDPOINTS")
def gen_data(self):
return {
"x": np.random.random(size=(128, 32)).astype('float32'),
"y": np.random.randint(2, size=(128, 1)).astype('int64'),
}
def test_single_run_collective_minimize(self):
def test_single_error():
input_x = paddle.static.data(
name="x", shape=[-1, 32], dtype='float32'
)
input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64')
fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh')
prediction = paddle.static.nn.fc(
x=fc_1, size=2, activation='softmax'
)
cost = paddle.nn.functional.cross_entropy(
input=prediction,
label=input_y,
reduction='none',
use_softmax=False,
)
avg_cost = paddle.mean(x=cost)
fleet.init(is_collective=True)
# in non_distributed mode(use `python` to launch), raise error if has multi cards
if (
fluid.core.is_compiled_with_cuda()
and fluid.core.get_cuda_device_count() > 1
):
self.assertRaises(ValueError, test_single_error)
else:
test_single_error()
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
6d35ff8ff3c6737289e4c00e8be3360f52f6ca99 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02773/s910963404.py | d617638ace116c73c96cf5a65311b85b85f81d23 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | import sys
N = int(input())
input = sys.stdin.readline
lis = {}
for i in range(N):
s = input().rstrip()
if s in lis:
lis[s] += 1
else:
lis[s] = 1
lis2 = sorted(lis.items(), key=lambda x:x[1], reverse=True)
x = next(iter(lis2))
keys = []
for i in lis2:
if i[1] == x[1]:
keys.append(i[0])
ans = sorted(keys)
for i in ans:
print(i) | [
"[email protected]"
] | |
253aa95b971032804048120ff1cfdb28608f6cba | 16e266cf50a712ed29a4097e34504aac0281e6cb | /Functions/venv/lib/python3.6/site-packages/BaseExtensions/Logging.py | 99776019d50ef0f45a7210eaff3874df85494d98 | [] | no_license | felix-ogutu/PYTHON-PROJECTS | 9dd4fdcfff6957830587b64c5da3b5c3ade3a27e | 8c1297dbda495078509d06a46f47dc7ee60b6d4e | refs/heads/master | 2023-06-05T04:41:36.727376 | 2021-06-25T20:36:52 | 2021-06-25T20:36:52 | 380,348,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,704 | py | import logging
import logging.config
import os
import sys
import tarfile
from logging.handlers import RotatingFileHandler
from typing import *
__all__ = [
'logging',
'LoggingManager',
'Error_Handler',
'Info_Handler',
'Console_Error', 'Console_Debug', 'Console_Info',
'InfoFilter', 'ErrorFilter', 'DebugFilter', 'PngImagePlugin_Filter',
'LogPaths', 'Formats'
]
class Formats(object):
def __init__(self,
info="""%(name)s ---- %(message)s""",
simple="""[ %(levelname)-10s ] [ %(module)s.%(funcName)s @ Line#: %(lineno)d ] [ %(processName)s.%(threadName)s ]
%(message)s
""",
detailed="""
[ %(levelname)-10s %(asctime)s ]
[ FILE: "%(pathname)s" : %(module)s.%(funcName)s @ Line # %(lineno)d ]
[ Process ID: %(process)-7d | %(processName)s ]
[ Thread ID: %(thread)-7d | %(threadName)s ]
MESSAGE: %(message)s
"""):
self.INFO: Final[str] = info
self.SIMPLE: Final[str] = simple
self.DETAILED: Final[str] = detailed
class LogPaths(object):
def __init__(self, *processes: str, app_name: str, root_path: str, max_logs: int = 5, max_log_size: int = 10240):
self.MAX_LOGS: Final[int] = max_logs
self.MAX_LOG_SIZE: Final[int] = max_log_size
self.APP_NAME: Final[str] = app_name
self._root_path: Final[str] = root_path
self.__log_paths__ = { }
for proc in set(processes):
self.__log_paths__[proc] = os.path.join(root_path, self.GetFileName(proc))
self.__log_paths__[self.GetErrorName(proc)] = os.path.join(root_path, self.GetErrorFileName(proc))
self.__dict__.update(self.__log_paths__)
@property
def logs(self) -> List[str]:
return [os.path.join(self._root_path, f'{name}.{i}') if i != 0 else name for i in range(self.MAX_LOGS + 1) for name in self.__log_paths__.keys()]
def Zip_Log_Files(self, path: str):
with tarfile.open(path, "w:gz") as tar:
for file in self.logs:
tar.add(file, arcname=os.path.basename(file))
def Delete_Log_Files(self):
for file in self.logs:
if os.path.isfile(file): os.remove(file)
@staticmethod
def GetFileName(base: str): return f'{base}.log'
@staticmethod
def GetErrorFileName(base: str): return f'{base}_errors.log'
@staticmethod
def GetErrorName(base: str): return f'{base}_errors'
class DebugFilter(logging.Filter):
_allowed = (logging.DEBUG,)
def filter(self, rec):
return rec.levelno in self._allowed
class InfoFilter(logging.Filter):
_allowed = (logging.INFO,)
def filter(self, rec):
return rec.levelno in self._allowed
class ErrorFilter(logging.Filter):
_allowed = (logging.WARNING, logging.ERROR, logging.CRITICAL, logging.FATAL)
def filter(self, rec):
return rec.levelno in self._allowed
class PngImagePlugin_Filter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
if record.module == 'PngImagePlugin':
return False
return True
class Error_Handler(RotatingFileHandler):
_allowed = (logging.WARNING, logging.ERROR, logging.CRITICAL, logging.FATAL)
def __init__(self, *, file, path: LogPaths, fmt: Formats, **kwargs):
super().__init__(filename=file, maxBytes=path.MAX_LOG_SIZE, backupCount=path.MAX_LOGS, **kwargs)
self.formatter = logging.Formatter(fmt=fmt.DETAILED)
self.setLevel(logging.ERROR)
self.addFilter(ErrorFilter())
class Info_Handler(logging.FileHandler):
_allowed = (logging.DEBUG, logging.INFO)
def __init__(self, *, file, mode='w', fmt: Formats, **kwargs):
super().__init__(filename=file, mode=mode, **kwargs)
self.formatter = logging.Formatter(fmt=fmt.INFO)
self.setLevel(logging.DEBUG)
self.addFilter(InfoFilter())
class Console_Debug(logging.StreamHandler):
_allowed: tuple
def __init__(self, *, fmt: Formats, stream=sys.stdout):
super().__init__(stream)
self.setLevel(logging.DEBUG)
self.formatter = logging.Formatter(fmt=fmt.INFO)
self.addFilter(DebugFilter())
class Console_Info(logging.StreamHandler):
_allowed: tuple
def __init__(self, *, fmt: Formats, stream=sys.stdout):
super().__init__(stream)
self.setLevel(logging.DEBUG)
self.formatter = logging.Formatter(fmt=fmt.INFO)
self.addFilter(InfoFilter())
class Console_Error(logging.StreamHandler):
_allowed: tuple
# def __init__(self, *, allowed=(logging.WARNING, logging.ERROR, logging.CRITICAL, logging.FATAL), stream=sys.stderr):
def __init__(self, *, fmt: Formats, stream=sys.stderr):
super().__init__(stream)
# self._allowed = allowed
self.setLevel(logging.WARNING)
self.formatter = logging.Formatter(fmt=fmt.DETAILED)
self.addFilter(ErrorFilter())
class InstanceError(Exception): pass
class LoggingManager(object):
mapper: Dict[Type, str]
def __init__(self, *types: Type, mapper: Dict[Type, str] = None, paths: LogPaths, fmt: Formats = Formats()):
self.fmt = fmt
self.paths = paths
if not isinstance(mapper, dict):
mapper = { item: item.__name__ for item in set(types) }
self.mapper = mapper
logging.basicConfig(format=fmt.DETAILED, level=logging.DEBUG)
self._root_logger = logging.getLogger()
self._root_logger.handlers.clear()
self.app_logger = logging.getLogger(self.paths.APP_NAME)
logging.getLogger("PIL.PngImagePlugin").disabled = True
def CreateLogger(self, source, *, debug: bool = __debug__) -> logging.Logger:
for key, value in self.mapper.items():
# if issubclass(source, key): raise InstanceError('source is not identified')
if isinstance(source, key):
logger = self.app_logger.getChild(source.__class__.__name__)
logger.addHandler(Info_Handler(file=self.paths.__log_paths__[value], fmt=self.fmt))
logger.addHandler(Error_Handler(file=self.paths.__log_paths__[LogPaths.GetErrorName(value)], fmt=self.fmt, path=self.paths))
logger.addHandler(Console_Error(fmt=self.fmt))
logger.addHandler(Console_Debug(fmt=self.fmt))
logger.addHandler(Console_Info(fmt=self.fmt))
logger.addFilter(PngImagePlugin_Filter())
logger.setLevel(logging.DEBUG if debug else logging.ERROR)
return logger
else:
raise ValueError('source is not identified')
@classmethod
def FromTypes(cls, *types: Type, app_name: str, root_path: str):
mapper = { item: item.__name__ for item in types }
return cls(mapper=mapper,
paths=LogPaths(*mapper.values(), app_name=app_name, root_path=root_path))
if __name__ == '__main__':
from PythonDebugTools import *
class Test(object): pass
class Other(object): pass
m = LoggingManager.FromTypes(Test, Other, app_name='app', root_path='.')
PrettyPrint(m.paths.Test)
PrettyPrint(m.paths.Test_errors)
PrettyPrint(m.paths.Other)
PrettyPrint(m.paths.Other_errors)
PrettyPrint('m.paths.logs', m.paths.logs)
Print(m.CreateLogger(Test(), debug=True))
Print(m.CreateLogger(Other(), debug=True))
try: m.CreateLogger(Other, debug=True)
except Exception as e: print_exception(e)
| [
"[email protected]"
] | |
89b5f996db03809d137daf450f8d3b3117aedc9b | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /pkgs/statsmodels-0.6.1-np110py27_0/lib/python2.7/site-packages/statsmodels/base/covtype.py | 4eb475ea2ceced1c55668969e5b994243294547d | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 11,953 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 04 08:00:16 2014
Author: Josef Perktold
License: BSD-3
"""
from statsmodels.compat.python import lrange, lzip, range
import numpy as np
def get_robustcov_results(self, cov_type='HC1', use_t=None, **kwds):
"""create new results instance with robust covariance as default
Parameters
----------
cov_type : string
the type of robust sandwich estimator to use. see Notes below
use_t : bool
If true, then the t distribution is used for inference.
If false, then the normal distribution is used.
kwds : depends on cov_type
Required or optional arguments for robust covariance calculation.
see Notes below
Returns
-------
results : results instance
This method creates a new results instance with the requested
robust covariance as the default covariance of the parameters.
Inferential statistics like p-values and hypothesis tests will be
based on this covariance matrix.
Notes
-----
Warning: Some of the options and defaults in cov_kwds may be changed in a
future version.
The covariance keywords provide an option 'scaling_factor' to adjust the
scaling of the covariance matrix, that is the covariance is multiplied by
this factor if it is given and is not `None`. This allows the user to
adjust the scaling of the covariance matrix to match other statistical
packages.
For example, `scaling_factor=(nobs - 1.) / (nobs - k_params)` provides a
correction so that the robust covariance matrices match those of Stata in
some models like GLM and discrete Models.
The following covariance types and required or optional arguments are
currently available:
- 'HC0', 'HC1', 'HC2', 'HC3' and no keyword arguments:
heteroscedasticity robust covariance
- 'HAC' and keywords
- `maxlag` integer (required) : number of lags to use
- `kernel` string (optional) : kernel, default is Bartlett
- `use_correction` bool (optional) : If true, use small sample
correction
- 'cluster' and required keyword `groups`, integer group indicator
- `groups` array_like, integer (required) :
index of clusters or groups
- `use_correction` bool (optional) :
If True the sandwich covariance is calulated with a small
sample correction.
If False the the sandwich covariance is calulated without
small sample correction.
- `df_correction` bool (optional)
If True (default), then the degrees of freedom for the
inferential statistics and hypothesis tests, such as
pvalues, f_pvalue, conf_int, and t_test and f_test, are
based on the number of groups minus one instead of the
total number of observations minus the number of explanatory
variables. `df_resid` of the results instance is adjusted.
If False, then `df_resid` of the results instance is not
adjusted.
- 'hac-groupsum' Driscoll and Kraay, heteroscedasticity and
autocorrelation robust standard errors in panel data
keywords
- `time` array_like (required) : index of time periods
- `maxlag` integer (required) : number of lags to use
- `kernel` string (optional) : kernel, default is Bartlett
- `use_correction` False or string in ['hac', 'cluster'] (optional) :
If False the the sandwich covariance is calulated without
small sample correction.
If `use_correction = 'cluster'` (default), then the same
small sample correction as in the case of 'covtype='cluster''
is used.
- `df_correction` bool (optional)
adjustment to df_resid, see cov_type 'cluster' above
#TODO: we need more options here
- 'hac-panel' heteroscedasticity and autocorrelation robust standard
errors in panel data.
The data needs to be sorted in this case, the time series for
each panel unit or cluster need to be stacked.
keywords
- `time` array_like (required) : index of time periods
- `maxlag` integer (required) : number of lags to use
- `kernel` string (optional) : kernel, default is Bartlett
- `use_correction` False or string in ['hac', 'cluster'] (optional) :
If False the the sandwich covariance is calulated without
small sample correction.
- `df_correction` bool (optional)
adjustment to df_resid, see cov_type 'cluster' above
#TODO: we need more options here
Reminder:
`use_correction` in "nw-groupsum" and "nw-panel" is not bool,
needs to be in [False, 'hac', 'cluster']
TODO: Currently there is no check for extra or misspelled keywords,
except in the case of cov_type `HCx`
"""
import statsmodels.stats.sandwich_covariance as sw
# TODO: make separate function that returns a robust cov plus info
use_self = kwds.pop('use_self', False)
if use_self:
res = self
else:
# this doesn't work for most models, use raw instance instead from fit
res = self.__class__(self.model, self.params,
normalized_cov_params=self.normalized_cov_params,
scale=self.scale)
res.cov_type = cov_type
# use_t might already be defined by the class, and already set
if use_t is None:
use_t = self.use_t
res.cov_kwds = {'use_t':use_t} # store for information
res.use_t = use_t
adjust_df = False
if cov_type in ['cluster', 'nw-panel', 'nw-groupsum']:
df_correction = kwds.get('df_correction', None)
# TODO: check also use_correction, do I need all combinations?
if df_correction is not False: # i.e. in [None, True]:
# user didn't explicitely set it to False
adjust_df = True
res.cov_kwds['adjust_df'] = adjust_df
# verify and set kwds, and calculate cov
# TODO: this should be outsourced in a function so we can reuse it in
# other models
# TODO: make it DRYer repeated code for checking kwds
if cov_type in ('HC0', 'HC1', 'HC2', 'HC3'):
if kwds:
raise ValueError('heteroscedasticity robust covarians ' +
'does not use keywords')
res.cov_kwds['description'] = ('Standard Errors are heteroscedasticity ' +
'robust ' + '(' + cov_type + ')')
res.cov_params_default = getattr(self, 'cov_' + cov_type.upper(), None)
if res.cov_params_default is None:
# results classes that don't have cov_HCx attribute
res.cov_params_default = sw.cov_white_simple(self,
use_correction=False)
elif cov_type == 'HAC':
maxlags = kwds['maxlags'] # required?, default in cov_hac_simple
res.cov_kwds['maxlags'] = maxlags
use_correction = kwds.get('use_correction', False)
res.cov_kwds['use_correction'] = use_correction
res.cov_kwds['description'] = ('Standard Errors are heteroscedasticity ' +
'and autocorrelation robust (HAC) using %d lags and %s small ' +
'sample correction') % (maxlags, ['without', 'with'][use_correction])
res.cov_params_default = sw.cov_hac_simple(self, nlags=maxlags,
use_correction=use_correction)
elif cov_type == 'cluster':
#cluster robust standard errors, one- or two-way
groups = kwds['groups']
if not hasattr(groups, 'shape'):
groups = np.asarray(groups).T
res.cov_kwds['groups'] = groups
use_correction = kwds.get('use_correction', True)
res.cov_kwds['use_correction'] = use_correction
if groups.ndim == 1:
if adjust_df:
# need to find number of groups
# duplicate work
self.n_groups = n_groups = len(np.unique(groups))
res.cov_params_default = sw.cov_cluster(self, groups,
use_correction=use_correction)
elif groups.ndim == 2:
if adjust_df:
# need to find number of groups
# duplicate work
n_groups0 = len(np.unique(groups[:,0]))
n_groups1 = len(np.unique(groups[:, 1]))
self.n_groups = (n_groups0, n_groups1)
n_groups = min(n_groups0, n_groups1) # use for adjust_df
# Note: sw.cov_cluster_2groups has 3 returns
res.cov_params_default = sw.cov_cluster_2groups(self, groups,
use_correction=use_correction)[0]
else:
raise ValueError('only two groups are supported')
res.cov_kwds['description'] = ('Standard Errors are robust to' +
'cluster correlation ' + '(' + cov_type + ')')
elif cov_type == 'nw-panel':
#cluster robust standard errors
res.cov_kwds['time'] = time = kwds['time']
#TODO: nlags is currently required
#nlags = kwds.get('nlags', True)
#res.cov_kwds['nlags'] = nlags
#TODO: `nlags` or `maxlags`
res.cov_kwds['maxlags'] = maxlags = kwds['maxlags']
use_correction = kwds.get('use_correction', 'hac')
res.cov_kwds['use_correction'] = use_correction
weights_func = kwds.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
# TODO: clumsy time index in cov_nw_panel
tt = (np.nonzero(np.diff(time) < 0)[0] + 1).tolist()
groupidx = lzip([0] + tt, tt + [len(time)])
self.n_groups = n_groups = len(groupidx)
res.cov_params_default = sw.cov_nw_panel(self, maxlags, groupidx,
weights_func=weights_func,
use_correction=use_correction)
res.cov_kwds['description'] = ('Standard Errors are robust to' +
'cluster correlation ' + '(' + cov_type + ')')
elif cov_type == 'nw-groupsum':
# Driscoll-Kraay standard errors
res.cov_kwds['time'] = time = kwds['time']
#TODO: nlags is currently required
#nlags = kwds.get('nlags', True)
#res.cov_kwds['nlags'] = nlags
#TODO: `nlags` or `maxlags`
res.cov_kwds['maxlags'] = maxlags = kwds['maxlags']
use_correction = kwds.get('use_correction', 'cluster')
res.cov_kwds['use_correction'] = use_correction
weights_func = kwds.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
if adjust_df:
# need to find number of groups
tt = (np.nonzero(np.diff(time) < 0)[0] + 1)
self.n_groups = n_groups = len(tt) + 1
res.cov_params_default = sw.cov_nw_groupsum(self, maxlags, time,
weights_func=weights_func,
use_correction=use_correction)
res.cov_kwds['description'] = (
'Driscoll and Kraay Standard Errors are robust to ' +
'cluster correlation ' + '(' + cov_type + ')')
else:
raise ValueError('cov_type not recognized. See docstring for ' +
'available options and spelling')
# generic optional factor to scale covariance
sc_factor = kwds.get('scaling_factor', None)
res.cov_kwds['scaling_factor'] = sc_factor
if sc_factor is not None:
res.cov_params_default *= sc_factor
if adjust_df:
# Note: df_resid is used for scale and others, add new attribute
res.df_resid_inference = n_groups - 1
return res
| [
"[email protected]"
] | |
4411eff5cb2a890b6f57eef93094fb70a0a66faa | 297efd4afeb46c0b56d9a975d76665caef213acc | /src/multiplicity/migrations/0026_auto_20181208_0740.py | 82eca20d3e465894fae450ae2a44b7598e792bfd | [
"MIT"
] | permissive | metabolism-of-cities/metabolism-of-cities-platform-v3 | 67716c3daae86a0fe527c18aef26ce29e069cbcc | c754d3b1b401906a21640b8eacb6b724a448b31c | refs/heads/master | 2022-12-06T22:56:22.207853 | 2020-08-25T09:53:51 | 2020-08-25T09:53:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | # Generated by Django 2.1.3 on 2018-12-08 07:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('multiplicity', '0025_auto_20181208_0719'),
]
operations = [
migrations.AlterField(
model_name='referencespacetype',
name='process',
field=models.ForeignKey(blank=True, limit_choices_to={'slug__isnull': False}, null=True, on_delete=django.db.models.deletion.CASCADE, to='staf.Process'),
),
]
| [
"[email protected]"
] | |
393df51822a4e25553dfc5130fbde0ab93e78e44 | e7efae2b83216d9621bd93390959d652de779c3d | /datadog_checks_dev/datadog_checks/dev/tooling/manifest_validator/common/validator.py | 20fd40589bb3d617df172095fcf04742cd2fc9a9 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-3-Clause-Modification",
"Unlicense",
"Apache-2.0",
"LGPL-3.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC0-1.0"
] | permissive | DataDog/integrations-core | ee1886cc7655972b2791e6ab8a1c62ab35afdb47 | 406072e4294edff5b46b513f0cdf7c2c00fac9d2 | refs/heads/master | 2023-08-31T04:08:06.243593 | 2023-08-30T18:22:10 | 2023-08-30T18:22:10 | 47,203,045 | 852 | 1,548 | BSD-3-Clause | 2023-09-14T16:39:54 | 2015-12-01T16:41:45 | Python | UTF-8 | Python | false | false | 12,459 | py | # (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import abc
import json
import os
from typing import Dict # noqa: F401
import six
from ...datastructures import JSONDict
from ...git import git_show_file
from ...utils import get_metadata_file, has_logs, is_metric_in_metadata_file, read_metadata_rows
from ..constants import V1, V1_STRING, V2, V2_STRING
class ValidationResult(object):
def __init__(self):
self.failed = False
self.warning = False
self.fixed = False
self.messages = {'success': [], 'warning': [], 'failure': [], 'info': []}
def __str__(self):
return '\n'.join(['\n'.join(messages) for messages in self.messages.values()])
def __repr__(self):
return str(self)
@six.add_metaclass(abc.ABCMeta)
class BaseManifestValidator(object):
def __init__(
self,
is_extras=False,
is_marketplace=False,
check_in_extras=True,
check_in_marketplace=True,
ctx=None,
version=V1,
skip_if_errors=False,
):
self.result = ValidationResult()
self.is_extras = is_extras
self.is_marketplace = is_marketplace
self.check_in_extras = check_in_extras
self.check_in_markeplace = check_in_marketplace
self.ctx = ctx
self.version = version
self.skip_if_errors = skip_if_errors
def should_validate(self):
"""Determine if validator applicable given the current repo.
Logic will always validate integrations-core, but flags exist to
selectively include extras and marketplace
"""
if not self.is_extras and not self.is_marketplace:
return True
if self.is_extras and self.check_in_extras:
return True
if self.is_marketplace and self.check_in_markeplace:
return True
return False
def validate(self, check_name, manifest, should_fix):
# type: (str, Dict, bool) -> None
"""Validates the decoded manifest. Will perform inline changes if fix is true"""
raise NotImplementedError
def fail(self, error_message):
self.result.failed = True
self.result.messages['failure'].append(error_message)
def warning(self, warning_message):
self.result.warning = True
self.result.messages['warning'].append(warning_message)
def fix(self, problem, solution):
self.result.warning_msg = problem
self.result.success_msg = solution
self.result.fixed = True
self.result.failed = False
def __repr__(self):
return str(self.result)
class MaintainerValidator(BaseManifestValidator):
MAINTAINER_PATH = {V1: '/maintainer', V2: '/author/support_email'}
def validate(self, check_name, decoded, fix):
if not self.should_validate():
return
correct_maintainer = '[email protected]'
path = self.MAINTAINER_PATH[self.version]
maintainer = decoded.get_path(path)
if not maintainer.isascii():
self.fail(f' `maintainer` contains non-ascii character: {maintainer}')
return
if maintainer != correct_maintainer:
output = f' incorrect `maintainer`: {maintainer}'
if fix:
decoded.set_path(path, correct_maintainer)
self.fix(output, f' new `maintainer`: {correct_maintainer}')
else:
self.fail(output)
class MetricsMetadataValidator(BaseManifestValidator):
METADATA_PATH = {V1: "/assets/metrics_metadata", V2: "/assets/integration/metrics/metadata_path"}
def validate(self, check_name, decoded, fix):
# metrics_metadata
path = self.METADATA_PATH[self.version]
metadata_in_manifest = decoded.get_path(path)
metadata_file = get_metadata_file(check_name)
metadata_file_exists = os.path.isfile(metadata_file)
if not metadata_in_manifest and metadata_file_exists:
# There is a metadata.csv file but no entry in the manifest.json
self.fail(' metadata.csv exists but not defined in the manifest.json of {}'.format(check_name))
elif metadata_in_manifest and not metadata_file_exists:
# There is an entry in the manifest.json file but the referenced csv file does not exist.
self.fail(' metrics_metadata in manifest.json references a non-existing file: {}.'.format(metadata_file))
class MetricToCheckValidator(BaseManifestValidator):
CHECKS_EXCLUDE_LIST = {
'agent_metrics', # this (agent-internal) check doesn't guarantee a list of stable metrics for now
'moogsoft',
'snmp',
}
METRIC_TO_CHECK_EXCLUDE_LIST = {
'openstack.controller', # "Artificial" metric, shouldn't be listed in metadata file.
'riakcs.bucket_list_pool.workers', # RiakCS 2.1 metric, but metadata.csv lists RiakCS 2.0 metrics only.
}
METADATA_PATH = {V1: "/assets/metrics_metadata", V2: "/assets/integration/metrics/metadata_path"}
METRIC_PATH = {V1: "/metric_to_check", V2: "/assets/integration/metrics/check"}
PRICING_PATH = {V1: "/pricing", V2: "/pricing"}
def validate(self, check_name, decoded, _):
if not self.should_validate() or check_name in self.CHECKS_EXCLUDE_LIST:
return
metadata_path = self.METADATA_PATH[self.version]
metadata_in_manifest = decoded.get_path(metadata_path)
# metric_to_check
metric_path = self.METRIC_PATH[self.version]
metric_to_check = decoded.get_path(metric_path)
pricing_path = self.PRICING_PATH[self.version]
pricing = decoded.get_path(pricing_path) or []
if metric_to_check:
metrics_to_check = metric_to_check if isinstance(metric_to_check, list) else [metric_to_check]
for metric in metrics_to_check:
# if metric found in pricing, skip and continue evaluating other metrics_to_check
if any(p.get('metric') == metric for p in pricing):
continue
metric_integration_check_name = check_name
# snmp vendor specific integrations define metric_to_check
# with metrics from `snmp` integration
if check_name.startswith('snmp_') and not metadata_in_manifest:
metric_integration_check_name = 'snmp'
if (
not is_metric_in_metadata_file(metric, metric_integration_check_name)
and metric not in self.METRIC_TO_CHECK_EXCLUDE_LIST
):
self.fail(f' metric_to_check not in metadata.csv: {metric!r}')
elif metadata_in_manifest:
# if we have a metadata.csv file but no `metric_to_check` raise an error
metadata_file = get_metadata_file(check_name)
if os.path.isfile(metadata_file):
for _, row in read_metadata_rows(metadata_file):
# there are cases of metadata.csv files with just a header but no metrics
if row:
self.fail(' metric_to_check not included in manifest.json')
class ImmutableAttributesValidator(BaseManifestValidator):
"""
Ensure that immutable attributes haven't changed
Skip if the manifest is a new file (i.e. new integration) or if the manifest is being upgraded to V2
"""
MANIFEST_VERSION_PATH = "manifest_version"
IMMUTABLE_FIELD_PATHS = {
V1: ("integration_id", "display_name", "guid"),
V2: (
"app_id",
"app_uuid",
"assets/integration/id",
"assets/integration/source_type_name",
),
}
SHORT_NAME_PATHS = {
V1: (
"assets/dashboards",
"assets/monitors",
"assets/saved_views",
),
V2: (
"assets/dashboards",
"assets/monitors",
"assets/saved_views",
),
}
def validate(self, check_name, decoded, fix):
# Check if previous version of manifest exists
# If not, this is a new file so this validation is skipped
try:
previous = git_show_file(path=f"{check_name}/manifest.json", ref="origin/master")
previous_manifest = JSONDict(json.loads(previous))
except Exception:
self.result.messages['info'].append(
" skipping check for changed fields: integration not on default branch"
)
return
# Skip this validation if the manifest is being updated from 1.0.0 -> 2.0.0
current_manifest = decoded
if (
previous_manifest[self.MANIFEST_VERSION_PATH] == "1.0.0"
and current_manifest[self.MANIFEST_VERSION_PATH] == "2.0.0"
):
self.result.messages['info'].append(" skipping check for changed fields: manifest version was upgraded")
return
# Check for differences in immutable attributes
for key_path in self.IMMUTABLE_FIELD_PATHS[self.version]:
previous_value = previous_manifest.get_path(key_path)
current_value = current_manifest.get_path(key_path)
if previous_value != current_value:
output = f'Attribute `{current_value}` at `{key_path}` is not allowed to be modified. Please revert it \
to the original value `{previous_value}`.'
self.fail(output)
# Check for differences in `short_name` keys
for key_path in self.SHORT_NAME_PATHS[self.version]:
previous_short_name_dict = previous_manifest.get_path(key_path) or {}
current_short_name_dict = current_manifest.get_path(key_path) or {}
# Every `short_name` in the prior manifest must be in the current manifest
# The key cannot change and it cannot be removed
previous_short_names = previous_short_name_dict.keys()
current_short_names = set(current_short_name_dict.keys())
for short_name in previous_short_names:
if short_name not in current_short_names:
output = f'Short name `{short_name}` at `{key_path}` is not allowed to be modified. \
Please revert to original value.'
self.fail(output)
class LogsCategoryValidator(BaseManifestValidator):
"""If an integration defines logs it should have the log collection category"""
LOG_COLLECTION_CATEGORY = {V1: "log collection", V2: "Category::Log Collection"}
CATEGORY_PATH = {V1: "/categories", V2: "/tile/classifier_tags"}
IGNORE_LIST = {
'databricks', # Logs are provided by Spark
'docker_daemon',
'ecs_fargate', # Logs are provided by FireLens or awslogs
'cassandra_nodetool', # Logs are provided by cassandra
'jmeter',
'kafka_consumer', # Logs are provided by kafka
'kubernetes',
'pan_firewall',
'altostra',
'hasura_cloud',
'sqreen',
'openai', # Logs are submitted to the logs intake API
}
def validate(self, check_name, decoded, fix):
path = self.CATEGORY_PATH[self.version]
categories = decoded.get_path(path) or []
check_has_logs = has_logs(check_name)
log_collection_category = self.LOG_COLLECTION_CATEGORY[self.version]
check_has_logs_category = log_collection_category in categories
if check_has_logs == check_has_logs_category or check_name in self.IGNORE_LIST:
return
if check_has_logs:
output = ' required category: ' + log_collection_category
if fix:
correct_categories = sorted(categories + [self.LOG_COLLECTION_CATEGORY])
decoded.set_path(path, correct_categories)
self.fix(output, f' new `categories`: {correct_categories}')
else:
self.fail(output)
else:
output = (
' This integration does not have logs, please remove the category: '
+ log_collection_category
+ ' or define the logs properly'
)
self.fail(output)
class VersionValidator(BaseManifestValidator):
def validate(self, check_name, decoded, fix):
if decoded.get('manifest_version', V2_STRING) == V1_STRING:
self.fail('Manifest version must be >= 2.0.0')
| [
"[email protected]"
] | |
a80b25b51691207c62aa5e0268abfc49f4b48640 | 61ef327bd1d5ff6db7595221db6823c947dab42b | /FlatData/ScenarioScriptGroup1ExcelTable.py | 691ecbb8b440d3883daab477c5ae12ef5f5095cb | [] | no_license | Aikenfell/Blue-Archive---Asset-Downloader | 88e419686a80b20b57a10a3033c23c80f86d6bf9 | 92f93ffbdb81a47cef58c61ec82092234eae8eec | refs/heads/main | 2023-09-06T03:56:50.998141 | 2021-11-19T12:41:58 | 2021-11-19T12:41:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,653 | py | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: FlatData
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class ScenarioScriptGroup1ExcelTable(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = ScenarioScriptGroup1ExcelTable()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsScenarioScriptGroup1ExcelTable(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# ScenarioScriptGroup1ExcelTable
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# ScenarioScriptGroup1ExcelTable
def DataList(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from FlatData.ScenarioScriptGroup1Excel import ScenarioScriptGroup1Excel
obj = ScenarioScriptGroup1Excel()
obj.Init(self._tab.Bytes, x)
return obj
return None
# ScenarioScriptGroup1ExcelTable
def DataListLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# ScenarioScriptGroup1ExcelTable
def DataListIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
return o == 0
def Start(builder): builder.StartObject(1)
def ScenarioScriptGroup1ExcelTableStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder)
def AddDataList(builder, DataList): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(DataList), 0)
def ScenarioScriptGroup1ExcelTableAddDataList(builder, DataList):
"""This method is deprecated. Please switch to AddDataList."""
return AddDataList(builder, DataList)
def StartDataListVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def ScenarioScriptGroup1ExcelTableStartDataListVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartDataListVector(builder, numElems)
def End(builder): return builder.EndObject()
def ScenarioScriptGroup1ExcelTableEnd(builder):
"""This method is deprecated. Please switch to End."""
return End(builder) | [
"[email protected]"
] | |
2226d8e21d5a65eb4b18bd6bf4a8b2b930d5c84f | 929a816fc299959d0f8eb0dd51d064be2abd6b78 | /LeetCode/easy - Array/1304. Find N Unique Integers Sum up to Zero/.ipynb_checkpoints/solution-checkpoint.py | 038be3063eb5258cd1e9f8bdc9d7066cfb184c3e | [
"MIT"
] | permissive | vincent507cpu/Comprehensive-Algorithm-Solution | 27940da7bc0343921930a2eafbd649da93a5395d | 04e01e49622457f09af2e1133954f043c0c92cb9 | refs/heads/master | 2023-07-20T07:12:15.590313 | 2021-08-23T23:42:17 | 2021-08-23T23:42:17 | 258,644,691 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | class Solution:
# my solution
def sumZero(self, n: int) -> List[int]:
return list(range(n-1)) + [-sum(range(n-1))] | [
"[email protected]"
] | |
5032205c24261703355ad0399ea9138603b23d16 | 3a9f2b3d79cf214704829427ee280f4b49dca70a | /saigon/rat/RuckusAutoTest/tests/ap/CB_AP_Download_Image.py | 9e2f694d41a392e7f7a8b96829e4fd450cb80c4b | [] | no_license | jichunwei/MyGitHub-1 | ae0c1461fe0a337ef459da7c0d24d4cf8d4a4791 | f826fc89a030c6c4e08052d2d43af0b1b4b410e3 | refs/heads/master | 2021-01-21T10:19:22.900905 | 2016-08-20T03:34:52 | 2016-08-20T03:34:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,890 | py | # Copyright (C) 2010 Ruckus Wireless, Inc. All rights reserved.
# Please make sure the following module docstring is accurate since it will be used in report generation.
"""Description:
Prerequisite (Assumptions about the state of the testbed/DUT):
Required components:
Test parameters:
Result type: PASS/FAIL
Results: PASS
FAIL otherwise
Messages:
- If PASS,
- If FAIL, prints out the reason for failure
Test procedure:
1. Config:
-
2. Test:
-
3. Cleanup:
-
How is it tested: (to be completed by test code developer)
"""
import logging
import os
import re
from RuckusAutoTest.models import Test
from contrib.download import image_resolver as imgres
class CB_AP_Download_Image(Test):
required_components = []
parameter_description = {'ap_build_stream':'build stream of Active Point',
'ap_bno':'build no of Active Point',
}
def config(self, conf):
self._init_test_params(conf)
self._retrive_carrier_bag()
def test(self):
self._download_ap_image()
if self.errmsg:
return self.returnResult('FAIL', self.errmsg)
else:
self._update_carrier_bag()
return self.returnResult('PASS', self.passmsg)
def cleanup(self):
pass
def _init_test_params(self, conf):
self.conf = {}
self.conf.update(conf)
self.errmsg = ''
self.passmsg = ''
def _retrive_carrier_bag(self):
if self.carrierbag.has_key('ap_fw_upgrade_cfg'):
self.conf['ap_fw_upgrade_cfg'] = self.carrierbag['ap_fw_upgrade_cfg']
def _update_carrier_bag(self):
self.carrierbag['image_file_path'] = self.image_file_path
def _download_ap_image(self):
'''
Download ap image from yanming server.
'''
try:
logging.info('Get the image information build server, build stream and build no')
if self.conf.has_key('ap_fw_upgrade_cfg'):
ap_fw_upgrade_cfg = self.conf['ap_fw_upgrade_cfg']
model = 'mf2211'
if self.conf.has_key('model'):
model = self.conf['model']
up_flag = True
if self.conf.has_key('up_flag'):
up_flag = self.conf['up_flag']
all_models_up_cfg = ap_fw_upgrade_cfg['up_cfg']
build_server = ap_fw_upgrade_cfg['build_server']
if all_models_up_cfg.has_key(model):
model_up_cfg = all_models_up_cfg[model]
if up_flag:
ap_build_stream = model_up_cfg['target_build_stream']
ap_bno = int(model_up_cfg['target_bno'])
else:
ap_build_stream = model_up_cfg['baseline_build_stream']
ap_bno = int(model_up_cfg['baseline_bno'])
else:
model_up_cfg = {}
self.errmsg = 'No upgrade config for specified model %s' % (model,)
else:
ap_build_stream = self.conf['ap_build_stream']
ap_bno = self.conf['ap_bno']
if self.conf.has_key('build_server'):
build_server = self.conf['build_server']
else:
build_server = None
self.image_file_path = self._download_image(build_server, ap_build_stream, ap_bno)
logging.info('Firmware config: %s' % self.image_file_path)
self.passmsg = "Download and get image files for %s: location[%s], Build stream[%s], Build no[%s]" % \
(model, os.getcwd(), ap_build_stream, ap_bno)
except Exception, ex:
self.errmsg = ex.message
def _escape(self, file_path):
expr = "[/|\\^\\\\]"
return re.sub(expr, "\\\\", file_path)
def _download_image(self, build_server, build_stream, fw_bno):
'''
Download ap image from build server based on build stream and build no,
and save as <Build stream>.<Build no>.tar.gz
'''
chk_name_list = ["%s.%s.tar.gz" % (build_stream, fw_bno), #MM2225_mainline.85.tar.gz
"%s.%s.bl7" % (build_stream, fw_bno), #MM2225_mainline.85.bl7
]
exist, file_name = self._chk_img_file_local(chk_name_list)
if not exist:
logging.info('Download image from server: [%s:%s:%s]' % (build_server, build_stream, fw_bno))
if build_server:
fname = imgres.download_build(build_stream, fw_bno, build_server)
else:
fname = imgres.download_build(build_stream, fw_bno)
else:
logging.info('Image exist in local computer: %s' % (file_name))
fname = file_name
fw_tar_filename = self._escape(os.path.realpath(fname))
#filetype='(\d+\.){1,5}Bl7$' #'.+\.Bl7$',
#fw_img_full_path = imgres.get_image(fw_tar_filename, filetype = filetype)
#fw_img_filename = fw_img_full_path.split("/")[-1]
return fw_tar_filename
def _chk_img_file_local(self, chk_name_list):
result = False
file_name = ''
for chk_name in chk_name_list:
if os.path.exists(chk_name):
file_name = chk_name
result = True
break
return result, file_name
| [
"[email protected]"
] | |
0cdbc657dc62827a0cc554f6fa825a3bca944a6d | bfe6c95fa8a2aae3c3998bd59555583fed72900a | /findLadders.py | bbee45ac5eb94f388fe2af8dfaa11679d7959a31 | [] | no_license | zzz136454872/leetcode | f9534016388a1ba010599f4771c08a55748694b2 | b5ea6c21bff317884bdb3d7e873aa159b8c30215 | refs/heads/master | 2023-09-01T17:26:57.624117 | 2023-08-29T03:18:56 | 2023-08-29T03:18:56 | 240,464,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,952 | py | from collections import deque
from typing import List
# 不知道是哪个
class Solution1:
def findLadders(self, beginWord: str, endWord: str,
wordList: List[str]) -> int:
wordList.append(beginWord)
if endWord not in wordList:
return []
dic = dict()
levelLog = {word: 5000 for word in wordList}
before = {word: [] for word in wordList}
for word in wordList:
for i in range(len(word)):
key = word[:i] + '*' + word[i + 1:]
if key in dic.keys():
dic[key].append(word)
else:
dic[key] = [word]
queue = [(beginWord, 1)]
levelLog[beginWord] = 1
stopLevel = 5000
while len(queue) > 0:
item = queue[0]
del queue[0]
level = item[1] + 1
if level > stopLevel:
break
for i in range(len(item[0])):
key = item[0][:i] + '*' + item[0][i + 1:]
for neighbor in dic[key]:
if levelLog[neighbor] == 5000:
levelLog[neighbor] = level
before[neighbor].append(item[0])
if neighbor == endWord:
stopLevel = level
queue.append((neighbor, level))
elif levelLog[neighbor] == level and item[0] not in before[
neighbor]:
before[neighbor].append(item[0])
def getAll(word):
if word == beginWord:
return [[beginWord]]
out = []
for b in before[word]:
out += [path + [word] for path in getAll(b)]
return out
return getAll(endWord)
# beginWord = "hit"
# endWord = "cog"
# wordList = ["hot","dot","dog","lot","log","cog"]
#
# sl=Solution()
# print(sl.findLadders(beginWord,endWord,wordList))
class Solution:
def findLadders(self, beginWord: str, endWord: str,
wordList: List[str]) -> List[str]:
if endWord not in wordList:
return []
if beginWord not in wordList:
wordList.append(beginWord)
dic = dict()
for word in wordList:
for i in range(len(word)):
key = word[:i] + '*' + word[i + 1:]
if key in dic.keys():
dic[key].append(word)
else:
dic[key] = [word]
before = {k: "" for k in wordList}
queue = deque([(beginWord, 0)])
level = 0
find = False
while len(queue) > 0:
word, level = queue.popleft()
if level > 5000:
return []
for i in range(len(word)):
key = word[:i] + '*' + word[i + 1:]
for neighbor in dic[key]:
if neighbor == word:
continue
if before[neighbor] == "":
before[neighbor] = word
else:
continue
if neighbor == endWord:
find = True
break
queue.append((neighbor, level + 1))
if find:
break
if find:
break
if not find:
return []
res = []
tmp = endWord
while tmp != beginWord:
res.append(tmp)
tmp = before[tmp]
res.append(tmp)
return res[::-1]
beginWord = "hit"
endWord = "cog"
wordList = ["hot", "dot", "dog", "lot", "log", "cog"]
beginWord = "hit"
endWord = "cog"
wordList = ["hot", "dot", "dog", "lot", "log"]
beginWord = "hot"
endWord = "dog"
wordList = ["hot", "dog"]
print(Solution().findLadders(beginWord, endWord, wordList))
| [
"[email protected]"
] | |
f654f52e05b95498dbdf1c51af76c90d6f91fb31 | 55c24645dd63a1c41037dcfb9fb45bc7bcdea4be | /venv/lib/python3.7/site-packages/jwt/__init__.py | 1e4d63b69dfbe1e69c8b867f879f3f5f7a0ff108 | [] | no_license | abdullah-nawaz/flask-boilerplate | 7c42801a21ee3e6a647cc8a7d92e0285f8e86cad | 01bc7fe1140e8ec613de4a38546a07ddfbdbd254 | refs/heads/master | 2022-12-02T05:06:08.297759 | 2020-06-24T21:36:32 | 2020-06-24T21:36:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 859 | py | # -*- coding: utf-8 -*-
# flake8: noqa
"""
JSON Web Token implementation
Minimum implementation based on this spec:
http://self-issued.info/docs/draft-jones-json-web-token-01.html
"""
__title__ = "pyjwt"
__version__ = "1.7.1"
__author__ = "José Padilla"
__license__ = "MIT"
__copyright__ = "Copyright 2015-2018 José Padilla"
from .api_jwt import (
encode,
decode,
register_algorithm,
unregister_algorithm,
get_unverified_header,
PyJWT,
)
from .api_jws import PyJWS
from .exceptions import (
InvalidTokenError,
DecodeError,
InvalidAlgorithmError,
InvalidAudienceError,
ExpiredSignatureError,
ImmatureSignatureError,
InvalidIssuedAtError,
InvalidIssuerError,
ExpiredSignature,
InvalidAudience,
InvalidIssuer,
MissingRequiredClaimError,
InvalidSignatureError,
PyJWTError,
)
| [
"[email protected]"
] | |
91f2400b1ac12ac6674de7b95b057475eb95b9df | 242e68a7c15e6ced652734d1d0e3e88e1074bb39 | /climetlab/plotting/drivers/magics/__init__.py | 00d73dd43bf969323001fd0d6e94f9d13a59f7af | [
"Apache-2.0"
] | permissive | mchantry/climetlab | e6edf596882560ad0b23572b24ac9e5cd9325891 | 8d655b4ac121a69e7244efe109c04d5e110cdf9e | refs/heads/main | 2023-07-22T01:16:52.859802 | 2021-07-22T09:24:00 | 2021-07-22T09:24:00 | 379,984,648 | 0 | 0 | Apache-2.0 | 2021-06-24T16:16:38 | 2021-06-24T16:16:38 | null | UTF-8 | Python | false | false | 1,348 | py | # (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import os
from collections import defaultdict
import yaml
from climetlab.decorators import locked
MAGICS_KEYS = None
MAGICS_DEF = None
MAGICS_PARAMS = None
_inited = False
@locked
def init():
global _inited, MAGICS_KEYS, MAGICS_DEF, MAGICS_PARAMS
if not _inited:
MAGICS_KEYS = defaultdict(set)
MAGICS_PARAMS = defaultdict(dict)
with open(os.path.join(os.path.dirname(__file__), "magics.yaml")) as f:
MAGICS_DEF = yaml.load(f, Loader=yaml.SafeLoader)
for action, params in MAGICS_DEF.items():
for param in params:
name = param["name"]
MAGICS_KEYS[name].add(action)
MAGICS_PARAMS[action][name] = param
_inited = True
def magics_keys_to_actions():
init()
return MAGICS_KEYS
def magics_keys_definitions():
init()
return MAGICS_DEF
def magics_keys_parameters(name):
init()
return MAGICS_PARAMS[name]
| [
"[email protected]"
] | |
4aba6eb3f7de44f562856e8a5171082f34e66878 | 26e2c68f929ecc8bb5c20c6b8cd200b66d99def5 | /DbConnect/test.py | e842e9eeb91a54098255018397b5627a2650cf58 | [] | no_license | kirigaikabuto/DjangoLessonsPart | ad19c1da0d1da27830c6fdf1b07353632bbc097d | 4442518ae1f0a8641e066c9a63ff4e55e04d5fe5 | refs/heads/master | 2022-11-28T10:29:54.428001 | 2020-08-03T09:26:42 | 2020-08-03T09:26:42 | 273,497,052 | 0 | 0 | null | 2020-08-03T09:26:43 | 2020-06-19T13:11:15 | Python | UTF-8 | Python | false | false | 675 | py | # 1)Показать продукты
# 2)Создать продукт
# 3)Удалить продукт
# 1
# ...
import psycopg2
def change(sql):
connection = psycopg2.connect(
host="localhost",
port="5432",
user="kirito",
password="passanya",
dbname="crm"
)
cursor = connection.cursor()
cursor.execute(sql)
connection.commit()
def select(sql):
connection = psycopg2.connect(
host="localhost",
port="5432",
user="kirito",
password="passanya",
dbname="crm"
)
cursor = connection.cursor()
cursor.execute(sql)
data = cursor.fetchall()
return data
| [
"[email protected]"
] | |
9f69bbac15e78ca0cdd3215052def11f8adb988e | 1a3eb334e9578e23f63e17b4ee8e51d69405d29f | /cluster_analysis/interogate_clusters_for_gene_of_interest.py | 6e995babf1c21610b93935f749d52b041519d792 | [] | no_license | peterthorpe5/public_scripts | 6f0ab79c7a748dbd183ee7173576f7bcf25d7f54 | a3c64198aad3709a5c4d969f48ae0af11fdc25db | refs/heads/master | 2023-02-08T02:34:18.109091 | 2023-01-25T13:04:23 | 2023-01-25T13:04:23 | 43,360,640 | 35 | 23 | null | 2016-09-15T10:01:11 | 2015-09-29T10:21:05 | Python | UTF-8 | Python | false | false | 6,271 | py | #!/usr/bin/env python
# Code to iterogate clustering with a list
#
# (c) The James Hutton Institute 2016-2017
# Author: Peter Thorpe
import os
from sys import stdin,argv
import sys
from optparse import OptionParser
from collections import Counter
import collections
if "-v" in sys.argv or "--version" in sys.argv:
print("0.01 - get the clusters from a list of seq of interest")
sys.exit(os.system(cmd))
def parse_clusters(clusters):
"""funct to return list of cluserts"""
with open(clusters) as handle:
return handle.read().split("\n")
def get_set_of_interest(infile):
"""funtcion to load in a list of gene names"""
with open(infile) as handle:
data = handle.read().split()
outset = set([])
for entry in data:
outset.add(entry.rstrip())
return outset
##################################################################
usage = """Use as follows:
$ python unique_comprisons.py -i list_of_gene -c cluster_file -a allfile.wanted -o outfile
"""
parser = OptionParser(usage=usage)
parser.add_option("-i","--wanted", dest="infile",
default=None,
help="infile with the names of interest",
metavar="FILE")
parser.add_option("-c","--clusters", dest="clusters",
default="Orthofinder_OrthologousGroups_final.txt",
help="clusters file",
metavar="FILE")
parser.add_option("-a","--all", dest="all_file",
default="all_unique_v1.0.txt",
help="all unique gene names file",
metavar="FILE")
parser.add_option("-o", "--out", dest="out", default="result.out",
help="output filenames")
(options, args) = parser.parse_args()
infile = options.infile
clusters = options.clusters
all_file = options.all_file
out = options.out
################################################################
if __name__ == '__main__':
if not os.path.isfile(clusters):
print("sorry cannot find you %s file" % clusters)
os._exit(0)
if not os.path.isfile(clusters):
print("sorry cannot find you %s infile" % infile)
os._exit(0)
working_dir = os.getcwd()
dest_dir = os.path.join(working_dir, 'results')
try:
os.makedirs(dest_dir)
except OSError:
print("folder already exists, I will write over what is in there!!")
cluster_data = parse_clusters(clusters)
wanted = get_set_of_interest(infile)
all_unique = get_set_of_interest(all_file)
# track the interesting clusters so we dont get repeats
clusters_of_interest = set([])
outfile_path = os.path.join(working_dir, 'results', out)
f_out = open(outfile_path, "w")
allowed = ['Mpe', 'Mca', 'Api','Rpa', 'Dno']
print "starting wanted list = %d " % len(wanted)
# parser through the cluster file
wanted_tracked_count = 0
total_unique_counter = Counter({'Mpe':0, 'Mca':0, 'Api': 0,
'Rpa':0, 'Dno':0})
Total_elements_counter = Counter({'Mpe':0, 'Mca':0, 'Api': 0,
'Rpa':0, 'Dno':0})
Total_elements_matching_wanted_counter = Counter({'Mpe':0, 'Mca':0, 'Api': 0,
'Rpa':0, 'Dno':0})
for line in cluster_data:
line = line.rstrip()
unique_counter_counter = Counter({'Mpe':0, 'Mca':0, 'Api': 0,
'Rpa':0, 'Dno':0})
species_counter = Counter({'Mpe':0, 'Mca':0, 'Api': 0,
'Rpa':0, 'Dno':0})
cluster_elements = line.split()
# each entry separetly
for gene in cluster_elements:
gene = gene.rstrip()
# only if the cluster contains a wanted gene
prefix = gene[:3]
Total_elements_counter[prefix] += 1
if gene in wanted:
# check to see if we have seen this line before
prefix = gene[:3]
Total_elements_matching_wanted_counter[prefix] += 1
if not line in clusters_of_interest:
clusters_of_interest.add(line.rstrip())
# count through th cluster again to see what speices are there
for gene in cluster_elements:
gene = gene.rstrip()
prefix = gene[:3]
if prefix in allowed:
# double check only allowed species are counted
species_counter[prefix] += 1
if gene in all_unique:
unique_counter_counter[prefix] += 1
total_unique_counter[prefix] += 1
if gene in wanted:
wanted_tracked_count = wanted_tracked_count + 1
#print len(line.split())
#print species_counter
#print unique_counter_counter
extra = "Cluster size = \t"
species_counter_od = collections.OrderedDict(sorted(species_counter.items()))
species_counter_od = collections.OrderedDict(sorted(unique_counter_counter.items()))
out_formatted = "%s%d\t\tSPECIES: %s\t\tUNIQUE:\t%s\n" % (extra,
len(line.split()),
species_counter_od,
species_counter_od)
f_out.write(out_formatted)
print "total found = %d" % wanted_tracked_count
print "total_unique_counter = ", collections.OrderedDict(sorted(total_unique_counter.items()))
print "Total_elements_counter = ", collections.OrderedDict(sorted(Total_elements_counter.items()))
print "Total_elements_matching_wanted_counter = ", collections.OrderedDict(sorted(Total_elements_matching_wanted_counter.items()))
f_out.close()
f_out.close()
| [
"[email protected]"
] | |
88b9865413cebcdc91c4fcd00f30340d0b864197 | b284d59bdf2c01977eb6d80795e2c75cb95b9b2c | /config/wsgi.py | 77f49bd2bbd2fcc7f583bd1617f7e6d091ef359a | [
"MIT"
] | permissive | CoutinhoElias/danibraz | 58d27cb30661d06091196cc487a9d902f4f8dac9 | b21f3ce3477ded74c901fa377a5b2ac5e68faf36 | refs/heads/master | 2021-01-20T02:12:30.096953 | 2018-04-01T15:52:40 | 2018-04-01T15:52:40 | 89,386,992 | 0 | 1 | MIT | 2017-12-01T16:52:47 | 2017-04-25T17:14:27 | JavaScript | UTF-8 | Python | false | false | 1,930 | py | """
WSGI config for Dani Braz project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# danibraz directory.
app_path = os.path.dirname(os.path.abspath(__file__)).replace('/config', '')
sys.path.append(os.path.join(app_path, 'danibraz'))
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
application = Sentry(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| [
"[email protected]"
] | |
0ba95fac59a79e49da28fd1831d5d6b61331dffe | 5460b47bcf525348b7b615ce67a674b2de787915 | /working with relational databases in python/Pandas for more complex querying.py | 34050cdb305cc08e47f2117bb7b700f027857426 | [] | no_license | AnkitaDeshmukh/Importing-data-in-Python-Part-1 | 79c42677dd9887f7c9b57689634be09412a80fe9 | 3606a0510fc06c009bb30c1553e912bc6a5717a0 | refs/heads/master | 2020-03-27T11:44:50.665912 | 2018-08-29T00:41:41 | 2018-08-29T00:41:41 | 146,505,732 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | #Using the function create_engine(), create an engine for the SQLite database Chinook.sqlite and assign it to the variable engine.
#Use the pandas function read_sql_query() to assign to the variable df the DataFrame of results from the following query:
#select all records from the Employee table where the EmployeeId is greater than or equal to 6 and ordered by BirthDate
#(make sure to use WHERE and ORDER BY in this precise order).
# Import packages
from sqlalchemy import create_engine
import pandas as pd
# Create engine: engine
engine = create_engine('sqlite:///Chinook.sqlite')
# Execute query and store records in DataFrame: df
df = pd.read_sql_query(
'SELECT * FROM Employee WHERE EmployeeId >= 6 ORDER BY BirthDate', engine)
# Print head of DataFrame
print(df.head())
| [
"[email protected]"
] | |
e3a2230306811b053d39134bb33d2ff17e4ab5da | c1cd6a7a446934c428bc4fbf988f8d6680460488 | /dist/restclient.app/Contents/Resources/pip/_vendor/requests/packages/chardet/sbcharsetprober.py | 182997905ed0338ca0f69a69ac7bf6c33b8e5c08 | [] | no_license | devvmh/restclient-py2app | ed016d1763ee99779388c8700dfb9c129cf8ce1a | 6826f6cb81c08a36b30878683a58e4f7a18f5041 | refs/heads/master | 2021-01-10T12:01:31.411373 | 2016-01-18T03:34:02 | 2016-01-18T03:34:02 | 49,850,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | /Users/devin/git/restclient/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/sbcharsetprober.py | [
"[email protected]"
] | |
0a9ffca0fe6c9a6b6a984c02b4947634976dfac3 | 18f8a1c7122c0b320f17ea31192439779a8c63e8 | /zoom/fill.py | f181b9ee1ee496933103ca4f07a4d176277e13db | [
"MIT"
] | permissive | RyanLainchbury/zoom | d49afa8d3506fca2c6e426707bd60ba640420a45 | 684a16f4fe3cea3d26f2d520c743a871ca84ecc5 | refs/heads/master | 2020-12-25T19:03:12.881247 | 2017-06-09T07:29:27 | 2017-06-09T07:29:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,985 | py | """
fills templates
"""
import re
parts_re = (
r"""(\w+)\s*=\s*"([^"]*)"|"""
r"""(\w+)\s*=\s*'([^']*)'|"""
r"""(\w+)\s*=\s*([^\s]+)\s*|"""
r""""([^"]*)"|"""
r"""("")|"""
r"""(\w+)"""
)
tag_parts = re.compile(parts_re)
pattern_tpl = r'%s([a-z0-9_]+)\s*(.*?)%s'
patterns = {}
def _fill(tag_start, tag_end, text, callback):
"""do the actual work of filling in tags
>>> def filler(name, *args, **kwargs):
... if name == 'name':
... return 'Joe'
>>> _fill('<dz:', '>', 'Hello <dz:name>!', filler)
'Hello Joe!'
"""
def replace_tag(match):
"""replace a tag"""
name = match.groups(1)[0].lower()
rest = match.group(0)[len(name)+len(tag_start):-len(tag_end)]
parts = tag_parts.findall(rest)
keywords = dict(
a and (a, b) or c and (c, d) or e and (e, f)
for (a, b, c, d, e, f, g, h, i) in parts
if a or c or e
)
args = [
h or i or g or ""
for (_, _, _, _, _, _, g, h, i) in parts
if h or i or g
]
result = callback(name, *args, **keywords)
if result is None:
result = match.group(0)
return str(result)
tags = (tag_start, tag_end)
if tags not in patterns:
patterns[tags] = re.compile(
pattern_tpl % (tag_start, tag_end),
re.IGNORECASE
)
innerre = patterns[tags]
result = []
lastindex = 0
for outermatch in re.finditer("<!--.*?-->", text):
text_between = text[lastindex:outermatch.start()]
new_text = innerre.sub(replace_tag, text_between)
result.append(new_text)
lastindex = outermatch.end()
result.append(outermatch.group())
text_after = text[lastindex:]
result.append(innerre.sub(replace_tag, text_after))
return ''.join(x for x in result)
def fill(text, callback):
"""fill a tag in the double handlebars style
>>> def filler(name, *args, **kwargs):
... if name == 'name':
... name = kwargs.get('language')=='french' and 'Jacques' or 'James'
... if 'upper' in args:
... return name.upper()
... elif 'lower' in args:
... return name.lower()
... else:
... return name
>>> fill('Hello {{name}}!', filler)
'Hello James!'
>>> fill('Hello {{name language=\"french\"}}!', filler)
'Hello Jacques!'
>>> fill('Hello {{name upper}}!', filler)
'Hello JAMES!'
>>> fill('Hello {{name lower language=\"french\"}}!', filler)
'Hello jacques!'
>>> fill('Hello {{name lower language=french}}!', filler)
'Hello jacques!'
>>> fill('Hello {{name}}!', lambda a: None )
'Hello {{name}}!'
>>>
"""
return dzfill(_fill('{{', '}}', text, callback), callback)
def dzfill(text, callback):
"""fill a tag in the <dz: style"""
return _fill('<dz:', '>', text, callback)
| [
"[email protected]"
] | |
fbaf9cc4e178d2fadabfc4ac0a9acb6617193252 | 1a6cbe035adb81fea66615323a836327d06f9e72 | /year2020/d8.py | a9a412e59e1d2f3574d82a8926248a6aa6dee291 | [] | no_license | ecurtin2/advent-of-code | a2607d857408d722b07d4cfc66855edcd019cda7 | 216db926c5bab9bf1ec3cac2aa912c1a2ff70d6c | refs/heads/main | 2022-12-15T10:06:51.202608 | 2022-12-14T17:28:15 | 2022-12-14T17:28:15 | 160,612,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,130 | py | from dataclasses import dataclass, field
from enum import Enum
from typing import List, Tuple
class OpCode(Enum):
acc = "acc"
jmp = "jmp"
nop = "nop"
@dataclass
class Emu:
program: List[Tuple[OpCode, int]]
acc: int = 0
pointer: int = 0
history: List[int] = field(default_factory=list)
@staticmethod
def from_iterable(iterable):
return Emu(
[(OpCode[line.split()[0]], int(line.split()[1])) for line in iterable]
)
def with_sub(self, idx: int, code: OpCode, value: int) -> "Emu":
new_program = self.program.copy()
new_program[idx] = code, value
return Emu(new_program)
def execute(self) -> Tuple[int, bool]:
while True:
if self.pointer in self.history:
return self.acc, False
if self.pointer >= len(self.program):
return self.acc, True
self.history.append(self.pointer)
code, val = self.program[self.pointer]
self._execute_code(code, val)
def _execute_code(self, code: OpCode, value: int):
if code == OpCode.acc:
self.acc += value
self.pointer += 1
elif code == OpCode.jmp:
self.pointer += value
elif code == OpCode.nop:
self.pointer += 1
def part1(inp: List[str]) -> int:
emulator = Emu.from_iterable(inp)
return emulator.execute()[0]
def part2(inp: List[str]) -> int:
base_emu = Emu.from_iterable(inp)
for i, (code, val) in enumerate(base_emu.program):
if code == OpCode.nop:
emu = base_emu.with_sub(i, OpCode.jmp, val)
elif code == OpCode.jmp:
emu = base_emu.with_sub(i, OpCode.nop, val)
else:
continue
val, terminated = emu.execute()
if terminated:
return val
def test_part1():
inp = """nop +0
acc +1
jmp +4
acc +3
jmp -3
acc -99
acc +1
jmp -4
acc +6"""
assert part1(inp.splitlines()) == 5
def test_part2():
inp = """nop +0
acc +1
jmp +4
acc +3
jmp -3
acc -99
acc +1
jmp -4
acc +6"""
assert part2(inp.splitlines()) == 8
| [
"[email protected]"
] | |
ce48e075832bfe8ad8472122d99f7481d3d2a0e4 | dfe2a52a1c36a28a8bf85af7efd42380d980b773 | /virtual/lib/python3.6/site-packages/django/db/models/sql/compiler.py | 279aa7a00fe58eb6d0feb049623f04707c8af9ec | [
"MIT"
] | permissive | virginiah894/Instagram-clone | 2c2a15d89fcdb25b22bd60428cf84a01f3bd553c | 4d8abe7bafefae06a0e462e6a47631c2f8a1d361 | refs/heads/master | 2022-12-10T06:56:21.105357 | 2020-01-07T14:14:50 | 2020-01-07T14:14:50 | 229,394,540 | 3 | 0 | MIT | 2022-12-08T03:23:40 | 2019-12-21T07:41:19 | Python | UTF-8 | Python | false | false | 66,354 | py | import collections
import functools
import re
import warnings
from itertools import chain
from django.core.exceptions import EmptyResultSet, FieldError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import OrderBy, Random, RawSQL, Ref
from django.db.models.query_utils import QueryWrapper, select_related_descend
from django.db.models.sql.constants import (
CURSOR, GET_ITERATOR_CHUNK_SIZE, MULTI, NO_RESULTS, ORDER_DIR, SINGLE,
)
from django.db.models.sql.query import Query, get_order_dir
from django.db.transaction import TransactionManagementError
from django.db.utils import DatabaseError, NotSupportedError
from django.utils.deprecation import RemovedInDjango30Warning
from django.utils.inspect import func_supports_parameter
FORCE = object()
class SQLCompiler:
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {'*': '*'}
# The select, klass_info, and annotations are needed by QuerySet.iterator()
# these are set as a side-effect of executing the query. Note that we calculate
# separately a list of extra select columns needed for grammatical correctness
# of the query, but these columns are not included in self.select.
self.select = None
self.annotation_col_map = None
self.klass_info = None
self.ordering_parts = re.compile(r'(.*)\s(ASC|DESC)(.*)')
def setup_query(self):
if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map):
self.query.get_initial_alias()
self.select, self.klass_info, self.annotation_col_map = self.get_select()
self.col_count = len(self.select)
def pre_sql_setup(self):
"""
Do any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
self.setup_query()
order_by = self.get_order_by()
self.where, self.having = self.query.where.split_having()
extra_select = self.get_extra_select(order_by, self.select)
self.has_extra_select = bool(extra_select)
group_by = self.get_group_by(self.select + extra_select, order_by)
return extra_select, order_by, group_by
def get_group_by(self, select, order_by):
"""
Return a list of 2-tuples of form (sql, params).
The logic of what exactly the GROUP BY clause contains is hard
to describe in other words than "if it passes the test suite,
then it is correct".
"""
# Some examples:
# SomeModel.objects.annotate(Count('somecol'))
# GROUP BY: all fields of the model
#
# SomeModel.objects.values('name').annotate(Count('somecol'))
# GROUP BY: name
#
# SomeModel.objects.annotate(Count('somecol')).values('name')
# GROUP BY: all cols of the model
#
# SomeModel.objects.values('name', 'pk').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# SomeModel.objects.values('name').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# In fact, the self.query.group_by is the minimal set to GROUP BY. It
# can't be ever restricted to a smaller set, but additional columns in
# HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately
# the end result is that it is impossible to force the query to have
# a chosen GROUP BY clause - you can almost do this by using the form:
# .values(*wanted_cols).annotate(AnAggregate())
# but any later annotations, extra selects, values calls that
# refer some column outside of the wanted_cols, order_by, or even
# filter calls can alter the GROUP BY clause.
# The query.group_by is either None (no GROUP BY at all), True
# (group by select fields), or a list of expressions to be added
# to the group by.
if self.query.group_by is None:
return []
expressions = []
if self.query.group_by is not True:
# If the group by is set to a list (by .values() call most likely),
# then we need to add everything in it to the GROUP BY clause.
# Backwards compatibility hack for setting query.group_by. Remove
# when we have public API way of forcing the GROUP BY clause.
# Converts string references to expressions.
for expr in self.query.group_by:
if not hasattr(expr, 'as_sql'):
expressions.append(self.query.resolve_ref(expr))
else:
expressions.append(expr)
# Note that even if the group_by is set, it is only the minimal
# set to group by. So, we need to add cols in select, order_by, and
# having into the select in any case.
for expr, _, _ in select:
cols = expr.get_group_by_cols()
for col in cols:
expressions.append(col)
for expr, (sql, params, is_ref) in order_by:
if expr.contains_aggregate:
continue
# We can skip References to select clause, as all expressions in
# the select clause are already part of the group by.
if is_ref:
continue
expressions.extend(expr.get_source_expressions())
having_group_by = self.having.get_group_by_cols() if self.having else ()
for expr in having_group_by:
expressions.append(expr)
result = []
seen = set()
expressions = self.collapse_group_by(expressions, having_group_by)
for expr in expressions:
sql, params = self.compile(expr)
if (sql, tuple(params)) not in seen:
result.append((sql, params))
seen.add((sql, tuple(params)))
return result
def collapse_group_by(self, expressions, having):
# If the DB can group by primary key, then group by the primary key of
# query's main model. Note that for PostgreSQL the GROUP BY clause must
# include the primary key of every table, but for MySQL it is enough to
# have the main table's primary key.
if self.connection.features.allows_group_by_pk:
# Determine if the main model's primary key is in the query.
pk = None
for expr in expressions:
# Is this a reference to query's base table primary key? If the
# expression isn't a Col-like, then skip the expression.
if (getattr(expr, 'target', None) == self.query.model._meta.pk and
getattr(expr, 'alias', None) == self.query.base_table):
pk = expr
break
# If the main model's primary key is in the query, group by that
# field, HAVING expressions, and expressions associated with tables
# that don't have a primary key included in the grouped columns.
if pk:
pk_aliases = {
expr.alias for expr in expressions
if hasattr(expr, 'target') and expr.target.primary_key
}
expressions = [pk] + [
expr for expr in expressions
if expr in having or getattr(expr, 'alias', None) not in pk_aliases
]
elif self.connection.features.allows_group_by_selected_pks:
# Filter out all expressions associated with a table's primary key
# present in the grouped columns. This is done by identifying all
# tables that have their primary key included in the grouped
# columns and removing non-primary key columns referring to them.
# Unmanaged models are excluded because they could be representing
# database views on which the optimization might not be allowed.
pks = {
expr for expr in expressions
if hasattr(expr, 'target') and expr.target.primary_key and expr.target.model._meta.managed
}
aliases = {expr.alias for expr in pks}
expressions = [
expr for expr in expressions if expr in pks or getattr(expr, 'alias', None) not in aliases
]
return expressions
def get_select(self):
"""
Return three values:
- a list of 3-tuples of (expression, (sql, params), alias)
- a klass_info structure,
- a dictionary of annotations
The (sql, params) is what the expression will produce, and alias is the
"AS alias" for the column (possibly None).
The klass_info structure contains the following information:
- Which model to instantiate
- Which columns for that model are present in the query (by
position of the select clause).
- related_klass_infos: [f, klass_info] to descent into
The annotations is a dictionary of {'attname': column position} values.
"""
select = []
klass_info = None
annotations = {}
select_idx = 0
for alias, (sql, params) in self.query.extra_select.items():
annotations[alias] = select_idx
select.append((RawSQL(sql, params), alias))
select_idx += 1
assert not (self.query.select and self.query.default_cols)
if self.query.default_cols:
select_list = []
for c in self.get_default_columns():
select_list.append(select_idx)
select.append((c, None))
select_idx += 1
klass_info = {
'model': self.query.model,
'select_fields': select_list,
}
# self.query.select is a special case. These columns never go to
# any model.
for col in self.query.select:
select.append((col, None))
select_idx += 1
for alias, annotation in self.query.annotation_select.items():
annotations[alias] = select_idx
select.append((annotation, alias))
select_idx += 1
if self.query.select_related:
related_klass_infos = self.get_related_selections(select)
klass_info['related_klass_infos'] = related_klass_infos
def get_select_from_parent(klass_info):
for ki in klass_info['related_klass_infos']:
if ki['from_parent']:
ki['select_fields'] = (klass_info['select_fields'] +
ki['select_fields'])
get_select_from_parent(ki)
get_select_from_parent(klass_info)
ret = []
for col, alias in select:
try:
sql, params = self.compile(col, select_format=True)
except EmptyResultSet:
# Select a predicate that's always False.
sql, params = '0', ()
ret.append((col, (sql, params), alias))
return ret, klass_info, annotations
def get_order_by(self):
"""
Return a list of 2-tuples of form (expr, (sql, params, is_ref)) for the
ORDER BY clause.
The order_by clause can alter the select clause (for example it
can add aliases to clauses that do not yet have one, or it can
add totally new select clauses).
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = (self.query.order_by or self.query.get_meta().ordering or [])
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
order_by = []
for field in ordering:
if hasattr(field, 'resolve_expression'):
if not isinstance(field, OrderBy):
field = field.asc()
if not self.query.standard_ordering:
field.reverse_ordering()
order_by.append((field, False))
continue
if field == '?': # random
order_by.append((OrderBy(Random()), False))
continue
col, order = get_order_dir(field, asc)
descending = True if order == 'DESC' else False
if col in self.query.annotation_select:
# Reference to expression in SELECT clause
order_by.append((
OrderBy(Ref(col, self.query.annotation_select[col]), descending=descending),
True))
continue
if col in self.query.annotations:
# References to an expression which is masked out of the SELECT clause
order_by.append((
OrderBy(self.query.annotations[col], descending=descending),
False))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
order_by.append((
OrderBy(
RawSQL('%s.%s' % (self.quote_name_unless_alias(table), col), []),
descending=descending
), False))
continue
if not self.query._extra or col not in self.query._extra:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
order_by.extend(self.find_ordering_name(
field, self.query.get_meta(), default_order=asc))
else:
if col not in self.query.extra_select:
order_by.append((
OrderBy(RawSQL(*self.query.extra[col]), descending=descending),
False))
else:
order_by.append((
OrderBy(Ref(col, RawSQL(*self.query.extra[col])), descending=descending),
True))
result = []
seen = set()
for expr, is_ref in order_by:
if self.query.combinator:
src = expr.get_source_expressions()[0]
# Relabel order by columns to raw numbers if this is a combined
# query; necessary since the columns can't be referenced by the
# fully qualified name and the simple column names may collide.
for idx, (sel_expr, _, col_alias) in enumerate(self.select):
if is_ref and col_alias == src.refs:
src = src.source
elif col_alias:
continue
if src == sel_expr:
expr.set_source_expressions([RawSQL('%d' % (idx + 1), ())])
break
else:
raise DatabaseError('ORDER BY term does not match any column in the result set.')
resolved = expr.resolve_expression(
self.query, allow_joins=True, reuse=None)
sql, params = self.compile(resolved)
# Don't add the same column twice, but the order direction is
# not taken into account so we strip it. When this entire method
# is refactored into expressions, then we can check each part as we
# generate it.
without_ordering = self.ordering_parts.search(sql).group(1)
if (without_ordering, tuple(params)) in seen:
continue
seen.add((without_ordering, tuple(params)))
result.append((resolved, (sql, params, is_ref)))
return result
def get_extra_select(self, order_by, select):
extra_select = []
if self.query.distinct and not self.query.distinct_fields:
select_sql = [t[1] for t in select]
for expr, (sql, params, is_ref) in order_by:
without_ordering = self.ordering_parts.search(sql).group(1)
if not is_ref and (without_ordering, params) not in select_sql:
extra_select.append((expr, (without_ordering, params), None))
return extra_select
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select or (
name in self.query.external_aliases and name not in self.query.table_map)):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def compile(self, node, select_format=False):
vendor_impl = getattr(node, 'as_' + self.connection.vendor, None)
if vendor_impl:
sql, params = vendor_impl(self, self.connection)
else:
sql, params = node.as_sql(self, self.connection)
if select_format is FORCE or (select_format and not self.query.subquery):
return node.output_field.select_format(self, sql, params)
return sql, params
def get_combinator_sql(self, combinator, all):
features = self.connection.features
compilers = [
query.get_compiler(self.using, self.connection)
for query in self.query.combined_queries if not query.is_empty()
]
if not features.supports_slicing_ordering_in_compound:
for query, compiler in zip(self.query.combined_queries, compilers):
if query.low_mark or query.high_mark:
raise DatabaseError('LIMIT/OFFSET not allowed in subqueries of compound statements.')
if compiler.get_order_by():
raise DatabaseError('ORDER BY not allowed in subqueries of compound statements.')
parts = ()
for compiler in compilers:
try:
# If the columns list is limited, then all combined queries
# must have the same columns list. Set the selects defined on
# the query on all combined queries, if not already set.
if not compiler.query.values_select and self.query.values_select:
compiler.query.set_values(self.query.values_select)
parts += (compiler.as_sql(),)
except EmptyResultSet:
# Omit the empty queryset with UNION and with DIFFERENCE if the
# first queryset is nonempty.
if combinator == 'union' or (combinator == 'difference' and parts):
continue
raise
if not parts:
raise EmptyResultSet
combinator_sql = self.connection.ops.set_operators[combinator]
if all and combinator == 'union':
combinator_sql += ' ALL'
braces = '({})' if features.supports_slicing_ordering_in_compound else '{}'
sql_parts, args_parts = zip(*((braces.format(sql), args) for sql, args in parts))
result = [' {} '.format(combinator_sql).join(sql_parts)]
params = []
for part in args_parts:
params.extend(part)
return result, params
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
refcounts_before = self.query.alias_refcount.copy()
try:
extra_select, order_by, group_by = self.pre_sql_setup()
for_update_part = None
combinator = self.query.combinator
features = self.connection.features
if combinator:
if not getattr(features, 'supports_select_{}'.format(combinator)):
raise NotSupportedError('{} is not supported on this database backend.'.format(combinator))
result, params = self.get_combinator_sql(combinator, self.query.combinator_all)
else:
distinct_fields = self.get_distinct()
# This must come after 'select', 'ordering', and 'distinct'
# (see docstring of get_from_clause() for details).
from_, f_params = self.get_from_clause()
where, w_params = self.compile(self.where) if self.where is not None else ("", [])
having, h_params = self.compile(self.having) if self.having is not None else ("", [])
result = ['SELECT']
params = []
if self.query.distinct:
result.append(self.connection.ops.distinct_sql(distinct_fields))
out_cols = []
col_idx = 1
for _, (s_sql, s_params), alias in self.select + extra_select:
if alias:
s_sql = '%s AS %s' % (s_sql, self.connection.ops.quote_name(alias))
elif with_col_aliases:
s_sql = '%s AS %s' % (s_sql, 'Col%d' % col_idx)
col_idx += 1
params.extend(s_params)
out_cols.append(s_sql)
result.append(', '.join(out_cols))
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if self.query.select_for_update and self.connection.features.has_select_for_update:
if self.connection.get_autocommit():
raise TransactionManagementError('select_for_update cannot be used outside of a transaction.')
if with_limits and not self.connection.features.supports_select_for_update_with_limit:
raise NotSupportedError(
'LIMIT/OFFSET is not supported with '
'select_for_update on this database backend.'
)
nowait = self.query.select_for_update_nowait
skip_locked = self.query.select_for_update_skip_locked
of = self.query.select_for_update_of
# If it's a NOWAIT/SKIP LOCKED/OF query but the backend
# doesn't support it, raise NotSupportedError to prevent a
# possible deadlock.
if nowait and not self.connection.features.has_select_for_update_nowait:
raise NotSupportedError('NOWAIT is not supported on this database backend.')
elif skip_locked and not self.connection.features.has_select_for_update_skip_locked:
raise NotSupportedError('SKIP LOCKED is not supported on this database backend.')
elif of and not self.connection.features.has_select_for_update_of:
raise NotSupportedError('FOR UPDATE OF is not supported on this database backend.')
for_update_part = self.connection.ops.for_update_sql(
nowait=nowait,
skip_locked=skip_locked,
of=self.get_select_for_update_of_arguments(),
)
if for_update_part and self.connection.features.for_update_after_from:
result.append(for_update_part)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping = []
for g_sql, g_params in group_by:
grouping.append(g_sql)
params.extend(g_params)
if grouping:
if distinct_fields:
raise NotImplementedError('annotate() + distinct(fields) is not implemented.')
if not order_by:
order_by = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if order_by:
ordering = []
for _, (o_sql, o_params, _) in order_by:
ordering.append(o_sql)
params.extend(o_params)
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limits:
if self.query.high_mark is not None:
result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
result.append('OFFSET %d' % self.query.low_mark)
if for_update_part and not self.connection.features.for_update_after_from:
result.append(for_update_part)
if self.query.subquery and extra_select:
# If the query is used as a subquery, the extra selects would
# result in more columns than the left-hand side expression is
# expecting. This can happen when a subquery uses a combination
# of order_by() and distinct(), forcing the ordering expressions
# to be selected as well. Wrap the query in another subquery
# to exclude extraneous selects.
sub_selects = []
sub_params = []
for select, _, alias in self.select:
if alias:
sub_selects.append("%s.%s" % (
self.connection.ops.quote_name('subquery'),
self.connection.ops.quote_name(alias),
))
else:
select_clone = select.relabeled_clone({select.alias: 'subquery'})
subselect, subparams = select_clone.as_sql(self, self.connection)
sub_selects.append(subselect)
sub_params.extend(subparams)
return 'SELECT %s FROM (%s) subquery' % (
', '.join(sub_selects),
' '.join(result),
), sub_params + params
return ' '.join(result), tuple(params)
finally:
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(refcounts_before)
def get_default_columns(self, start_alias=None, opts=None, from_parent=None):
"""
Compute the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Return a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, return a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.get_meta()
only_load = self.deferred_to_columns()
if not start_alias:
start_alias = self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field in opts.concrete_fields:
model = field.model._meta.concrete_model
# A proxy model will have a different model and concrete_model. We
# will assign None if the field belongs to this model.
if model == opts.model:
model = None
if from_parent and model is not None and issubclass(
from_parent._meta.concrete_model, model._meta.concrete_model):
# Avoid loading data for already loaded parents.
# We end up here in the case select_related() resolution
# proceeds from parent model to child model. In that case the
# parent model data is already present in the SELECT clause,
# and we want to avoid reloading the same data again.
continue
if field.model in only_load and field.attname not in only_load[field.model]:
continue
alias = self.query.join_parent_model(opts, model, start_alias,
seen_models)
column = field.get_col(alias)
result.append(column)
return result
def get_distinct(self):
"""
Return a quoted list of fields to use in DISTINCT ON part of the query.
This method can alter the tables in the query, and thus it must be
called before get_from_clause().
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
_, targets, alias, joins, path, _ = self._setup_joins(parts, opts, None)
targets, alias, _ = self.query.trim_joins(targets, joins, path)
for target in targets:
if name in self.query.annotation_select:
result.append(name)
else:
result.append("%s.%s" % (qn(alias), qn2(target.column)))
return result
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Return the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
descending = True if order == 'DESC' else False
pieces = name.split(LOOKUP_SEP)
field, targets, alias, joins, path, opts = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model unless the attribute name
# of the field is specified.
if field.is_relation and opts.ordering and getattr(field, 'attname', None) != name:
# Firstly, avoid infinite loops.
if not already_seen:
already_seen = set()
join_tuple = tuple(getattr(self.query.alias_map[j], 'join_cols', None) for j in joins)
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
targets, alias, _ = self.query.trim_joins(targets, joins, path)
return [(OrderBy(t.get_col(alias), descending=descending), False) for t in targets]
def _setup_joins(self, pieces, opts, alias):
"""
Helper method for get_order_by() and get_distinct().
get_ordering() and get_distinct() must produce same target columns on
same input, as the prefixes of get_ordering() and get_distinct() must
match. Executing SQL where this is not true is an error.
"""
if not alias:
alias = self.query.get_initial_alias()
field, targets, opts, joins, path = self.query.setup_joins(
pieces, opts, alias)
alias = joins[-1]
return field, targets, alias, joins, path, opts
def get_from_clause(self):
"""
Return a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Subclasses, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables that are needed. This means the select columns,
ordering, and distinct must be done first.
"""
result = []
params = []
for alias in tuple(self.query.alias_map):
if not self.query.alias_refcount[alias]:
continue
try:
from_clause = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
clause_sql, clause_params = self.compile(from_clause)
result.append(clause_sql)
params.extend(clause_params)
for t in self.query.extra_tables:
alias, _ = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# call increments the refcount, so an alias refcount of one means
# this is the only reference).
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
result.append(', %s' % self.quote_name_unless_alias(alias))
return result, params
def get_related_selections(self, select, opts=None, root_alias=None, cur_depth=1,
requested=None, restricted=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
def _get_field_choices():
direct_choices = (f.name for f in opts.fields if f.is_relation)
reverse_choices = (
f.field.related_query_name()
for f in opts.related_objects if f.field.unique
)
return chain(direct_choices, reverse_choices, self.query._filtered_relations)
related_klass_infos = []
if not restricted and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return related_klass_infos
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
only_load = self.query.get_loaded_field_names()
# Setup for the case when only particular related fields should be
# included in the related selection.
fields_found = set()
if requested is None:
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
def get_related_klass_infos(klass_info, related_klass_infos):
klass_info['related_klass_infos'] = related_klass_infos
for f in opts.fields:
field_model = f.model._meta.concrete_model
fields_found.add(f.name)
if restricted:
next = requested.get(f.name, {})
if not f.is_relation:
# If a non-related field is used like a relation,
# or if a single non-relational field is given.
if next or f.name in requested:
raise FieldError(
"Non-relational field given in select_related: '%s'. "
"Choices are: %s" % (
f.name,
", ".join(_get_field_choices()) or '(none)',
)
)
else:
next = False
if not select_related_descend(f, restricted, requested,
only_load.get(field_model)):
continue
klass_info = {
'model': f.remote_field.model,
'field': f,
'reverse': False,
'local_setter': f.set_cached_value,
'remote_setter': f.remote_field.set_cached_value if f.unique else lambda x, y: None,
'from_parent': False,
}
related_klass_infos.append(klass_info)
select_fields = []
_, _, _, joins, _ = self.query.setup_joins(
[f.name], opts, root_alias)
alias = joins[-1]
columns = self.get_default_columns(start_alias=alias, opts=f.remote_field.model._meta)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next_klass_infos = self.get_related_selections(
select, f.remote_field.model._meta, alias, cur_depth + 1, next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
if restricted:
related_fields = [
(o.field, o.related_model)
for o in opts.related_objects
if o.field.unique and not o.many_to_many
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested,
only_load.get(model), reverse=True):
continue
related_field_name = f.related_query_name()
fields_found.add(related_field_name)
join_info = self.query.setup_joins([related_field_name], opts, root_alias)
alias = join_info.joins[-1]
from_parent = issubclass(model, opts.model) and model is not opts.model
klass_info = {
'model': model,
'field': f,
'reverse': True,
'local_setter': f.remote_field.set_cached_value,
'remote_setter': f.set_cached_value,
'from_parent': from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
start_alias=alias, opts=model._meta, from_parent=opts.model)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next = requested.get(f.related_query_name(), {})
next_klass_infos = self.get_related_selections(
select, model._meta, alias, cur_depth + 1,
next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
fields_not_found = set(requested).difference(fields_found)
for name in list(requested):
# Filtered relations work only on the topmost level.
if cur_depth > 1:
break
if name in self.query._filtered_relations:
fields_found.add(name)
f, _, join_opts, joins, _ = self.query.setup_joins([name], opts, root_alias)
model = join_opts.model
alias = joins[-1]
from_parent = issubclass(model, opts.model) and model is not opts.model
def local_setter(obj, from_obj):
f.remote_field.set_cached_value(from_obj, obj)
def remote_setter(obj, from_obj):
setattr(from_obj, name, obj)
klass_info = {
'model': model,
'field': f,
'reverse': True,
'local_setter': local_setter,
'remote_setter': remote_setter,
'from_parent': from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
start_alias=alias, opts=model._meta,
from_parent=opts.model,
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next_requested = requested.get(name, {})
next_klass_infos = self.get_related_selections(
select, opts=model._meta, root_alias=alias,
cur_depth=cur_depth + 1, requested=next_requested,
restricted=restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
fields_not_found = set(requested).difference(fields_found)
if fields_not_found:
invalid_fields = ("'%s'" % s for s in fields_not_found)
raise FieldError(
'Invalid field name(s) given in select_related: %s. '
'Choices are: %s' % (
', '.join(invalid_fields),
', '.join(_get_field_choices()) or '(none)',
)
)
return related_klass_infos
def get_select_for_update_of_arguments(self):
"""
Return a quoted list of arguments for the SELECT FOR UPDATE OF part of
the query.
"""
def _get_field_choices():
"""Yield all allowed field paths in breadth-first search order."""
queue = collections.deque([(None, self.klass_info)])
while queue:
parent_path, klass_info = queue.popleft()
if parent_path is None:
path = []
yield 'self'
else:
field = klass_info['field']
if klass_info['reverse']:
field = field.remote_field
path = parent_path + [field.name]
yield LOOKUP_SEP.join(path)
queue.extend(
(path, klass_info)
for klass_info in klass_info.get('related_klass_infos', [])
)
result = []
invalid_names = []
for name in self.query.select_for_update_of:
parts = [] if name == 'self' else name.split(LOOKUP_SEP)
klass_info = self.klass_info
for part in parts:
for related_klass_info in klass_info.get('related_klass_infos', []):
field = related_klass_info['field']
if related_klass_info['reverse']:
field = field.remote_field
if field.name == part:
klass_info = related_klass_info
break
else:
klass_info = None
break
if klass_info is None:
invalid_names.append(name)
continue
select_index = klass_info['select_fields'][0]
col = self.select[select_index][0]
if self.connection.features.select_for_update_of_column:
result.append(self.compile(col)[0])
else:
result.append(self.quote_name_unless_alias(col.alias))
if invalid_names:
raise FieldError(
'Invalid field name(s) given in select_for_update(of=(...)): %s. '
'Only relational fields followed in the query are allowed. '
'Choices are: %s.' % (
', '.join(invalid_names),
', '.join(_get_field_choices()),
)
)
return result
def deferred_to_columns(self):
"""
Convert the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Return the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.get_loaded_field_names_cb)
return columns
def get_converters(self, expressions):
converters = {}
for i, expression in enumerate(expressions):
if expression:
backend_converters = self.connection.ops.get_db_converters(expression)
field_converters = expression.get_db_converters(self.connection)
if backend_converters or field_converters:
convs = []
for conv in (backend_converters + field_converters):
if func_supports_parameter(conv, 'context'):
warnings.warn(
'Remove the context parameter from %s.%s(). Support for it '
'will be removed in Django 3.0.' % (
conv.__self__.__class__.__name__,
conv.__name__,
),
RemovedInDjango30Warning,
)
conv = functools.partial(conv, context={})
convs.append(conv)
converters[i] = (convs, expression)
return converters
def apply_converters(self, rows, converters):
connection = self.connection
converters = list(converters.items())
for row in map(list, rows):
for pos, (convs, expression) in converters:
value = row[pos]
for converter in convs:
value = converter(value, expression, connection)
row[pos] = value
yield row
def results_iter(self, results=None, tuple_expected=False, chunked_fetch=False,
chunk_size=GET_ITERATOR_CHUNK_SIZE):
"""Return an iterator over the results from executing this query."""
if results is None:
results = self.execute_sql(MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size)
fields = [s[0] for s in self.select[0:self.col_count]]
converters = self.get_converters(fields)
rows = chain.from_iterable(results)
if converters:
rows = self.apply_converters(rows, converters)
if tuple_expected:
rows = map(tuple, rows)
return rows
def has_results(self):
"""
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
"""
# This is always executed on a query clone, so we can modify self.query
self.query.add_extra({'a': 1}, None, None, None, None, None)
self.query.set_extra_mask(['a'])
return bool(self.execute_sql(SINGLE))
def execute_sql(self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE):
"""
Run the query against the database and return the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
if not result_type:
result_type = NO_RESULTS
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
if chunked_fetch:
cursor = self.connection.chunked_cursor()
else:
cursor = self.connection.cursor()
try:
cursor.execute(sql, params)
except Exception:
# Might fail for server-side cursors (e.g. connection closed)
cursor.close()
raise
if result_type == CURSOR:
# Caller didn't specify a result_type, so just give them back the
# cursor to process (and close).
return cursor
if result_type == SINGLE:
try:
val = cursor.fetchone()
if val:
return val[0:self.col_count]
return val
finally:
# done with the cursor
cursor.close()
if result_type == NO_RESULTS:
cursor.close()
return
result = cursor_iter(
cursor, self.connection.features.empty_fetchmany_value,
self.col_count if self.has_extra_select else None,
chunk_size,
)
if not chunked_fetch and not self.connection.features.can_use_chunked_reads:
try:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further. Use chunked_fetch if requested.
return list(result)
finally:
# done with the cursor
cursor.close()
return result
def as_subquery_condition(self, alias, columns, compiler):
qn = compiler.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
for index, select_col in enumerate(self.query.select):
lhs_sql, lhs_params = self.compile(select_col)
rhs = '%s.%s' % (qn(alias), qn2(columns[index]))
self.query.where.add(
QueryWrapper('%s = %s' % (lhs_sql, rhs), lhs_params), 'AND')
sql, params = self.as_sql()
return 'EXISTS (%s)' % sql, params
class SQLInsertCompiler(SQLCompiler):
return_id = False
def field_as_sql(self, field, val):
"""
Take a field and a value intended to be saved on that field, and
return placeholder SQL and accompanying params. Check for raw values,
expressions, and fields with get_placeholder() defined in that order.
When field is None, consider the value raw and use it as the
placeholder, with no corresponding parameters returned.
"""
if field is None:
# A field value of None means the value is raw.
sql, params = val, []
elif hasattr(val, 'as_sql'):
# This is an expression, let's compile it.
sql, params = self.compile(val)
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
sql, params = field.get_placeholder(val, self, self.connection), [val]
else:
# Return the common case for the placeholder
sql, params = '%s', [val]
# The following hook is only used by Oracle Spatial, which sometimes
# needs to yield 'NULL' and [] as its placeholder and params instead
# of '%s' and [None]. The 'NULL' placeholder is produced earlier by
# OracleOperations.get_geom_placeholder(). The following line removes
# the corresponding None parameter. See ticket #10888.
params = self.connection.ops.modify_insert_params(sql, params)
return sql, params
def prepare_value(self, field, value):
"""
Prepare a value to be used in a query by resolving it if it is an
expression and otherwise calling the field's get_db_prep_save().
"""
if hasattr(value, 'resolve_expression'):
value = value.resolve_expression(self.query, allow_joins=False, for_save=True)
# Don't allow values containing Col expressions. They refer to
# existing columns on a row, but in the case of insert the row
# doesn't exist yet.
if value.contains_column_references:
raise ValueError(
'Failed to insert expression "%s" on %s. F() expressions '
'can only be used to update, not to insert.' % (value, field)
)
if value.contains_aggregate:
raise FieldError("Aggregate functions are not allowed in this query")
if value.contains_over_clause:
raise FieldError('Window expressions are not allowed in this query.')
else:
value = field.get_db_prep_save(value, connection=self.connection)
return value
def pre_save_val(self, field, obj):
"""
Get the given field's value off the given obj. pre_save() is used for
things like auto_now on DateTimeField. Skip it if this is a raw query.
"""
if self.query.raw:
return getattr(obj, field.attname)
return field.pre_save(obj, add=True)
def assemble_as_sql(self, fields, value_rows):
"""
Take a sequence of N fields and a sequence of M rows of values, and
generate placeholder SQL and parameters for each field and value.
Return a pair containing:
* a sequence of M rows of N SQL placeholder strings, and
* a sequence of M rows of corresponding parameter values.
Each placeholder string may contain any number of '%s' interpolation
strings, and each parameter row will contain exactly as many params
as the total number of '%s's in the corresponding placeholder row.
"""
if not value_rows:
return [], []
# list of (sql, [params]) tuples for each object to be saved
# Shape: [n_objs][n_fields][2]
rows_of_fields_as_sql = (
(self.field_as_sql(field, v) for field, v in zip(fields, row))
for row in value_rows
)
# tuple like ([sqls], [[params]s]) for each object to be saved
# Shape: [n_objs][2][n_fields]
sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql)
# Extract separate lists for placeholders and params.
# Each of these has shape [n_objs][n_fields]
placeholder_rows, param_rows = zip(*sql_and_param_pair_rows)
# Params for each field are still lists, and need to be flattened.
param_rows = [[p for ps in row for p in ps] for row in param_rows]
return placeholder_rows, param_rows
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
result = ['INSERT INTO %s' % qn(opts.db_table)]
has_fields = bool(self.query.fields)
fields = self.query.fields if has_fields else [opts.pk]
result.append('(%s)' % ', '.join(qn(f.column) for f in fields))
if has_fields:
value_rows = [
[self.prepare_value(field, self.pre_save_val(field, obj)) for field in fields]
for obj in self.query.objs
]
else:
# An empty object.
value_rows = [[self.connection.ops.pk_default_value()] for _ in self.query.objs]
fields = [None]
# Currently the backends just accept values when generating bulk
# queries and generate their own placeholders. Doing that isn't
# necessary and it should be possible to use placeholders and
# expressions in bulk inserts too.
can_bulk = (not self.return_id and self.connection.features.has_bulk_insert)
placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows)
if self.return_id and self.connection.features.can_return_id_from_insert:
if self.connection.features.can_return_ids_from_bulk_insert:
result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows))
params = param_rows
else:
result.append("VALUES (%s)" % ", ".join(placeholder_rows[0]))
params = [param_rows[0]]
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
r_fmt, r_params = self.connection.ops.return_insert_id()
# Skip empty r_fmt to allow subclasses to customize behavior for
# 3rd party backends. Refs #19096.
if r_fmt:
result.append(r_fmt % col)
params += [r_params]
return [(" ".join(result), tuple(chain.from_iterable(params)))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows))
return [(" ".join(result), tuple(p for ps in param_rows for p in ps))]
else:
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholder_rows, param_rows)
]
def execute_sql(self, return_id=False):
assert not (
return_id and len(self.query.objs) != 1 and
not self.connection.features.can_return_ids_from_bulk_insert
)
self.return_id = return_id
with self.connection.cursor() as cursor:
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not (return_id and cursor):
return
if self.connection.features.can_return_ids_from_bulk_insert and len(self.query.objs) > 1:
return self.connection.ops.fetch_returned_insert_ids(cursor)
if self.connection.features.can_return_id_from_insert:
assert len(self.query.objs) == 1
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(
cursor, self.query.get_meta().db_table, self.query.get_meta().pk.column
)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
assert len([t for t in self.query.alias_map if self.query.alias_refcount[t] > 0]) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.base_table)]
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
qn = self.quote_name_unless_alias
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'resolve_expression'):
val = val.resolve_expression(self.query, allow_joins=False, for_save=True)
if val.contains_aggregate:
raise FieldError("Aggregate functions are not allowed in this query")
if val.contains_over_clause:
raise FieldError('Window expressions are not allowed in this query.')
elif hasattr(val, 'prepare_database_save'):
if field.remote_field:
val = field.get_db_prep_save(
val.prepare_database_save(field),
connection=self.connection,
)
else:
raise TypeError(
"Tried to update field %s with a model instance, %r. "
"Use a value compatible with %s."
% (field, val, field.__class__.__name__)
)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self, self.connection)
else:
placeholder = '%s'
name = field.column
if hasattr(val, 'as_sql'):
sql, params = self.compile(val)
values.append('%s = %s' % (qn(name), placeholder % sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
table = self.query.base_table
result = [
'UPDATE %s SET' % qn(table),
', '.join(values),
]
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Return the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super().execute_sql(result_type)
try:
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
finally:
if cursor:
cursor.close()
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty and aux_rows:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, munge the "where"
conditions to match the format required for (portable) SQL updates.
If multiple updates are required, pull out the id values to update at
this point so that they don't change as a result of the progressive
updates.
"""
refcounts_before = self.query.alias_refcount.copy()
# Ensure base table is in the query
self.query.get_initial_alias()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
query = self.query.chain(klass=Query)
query.select_related = False
query.clear_ordering(True)
query._extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
super().pre_sql_setup()
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend(r[0] for r in rows)
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
self.query.reset_refcounts(refcounts_before)
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
sql, params = [], []
for annotation in self.query.annotation_select.values():
ann_sql, ann_params = self.compile(annotation, select_format=FORCE)
sql.append(ann_sql)
params.extend(ann_params)
self.col_count = len(self.query.annotation_select)
sql = ', '.join(sql)
params = tuple(params)
sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery)
params = params + self.query.sub_params
return sql, params
def cursor_iter(cursor, sentinel, col_count, itersize):
"""
Yield blocks of rows from a cursor and ensure the cursor is closed when
done.
"""
try:
for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel):
yield rows if col_count is None else [r[:col_count] for r in rows]
finally:
cursor.close()
| [
"[email protected]"
] | |
25b825d27e27e80bf0bf8ad227c956a257a4aeaf | c7a1c1ae40e9d95dfb92251dcfbf3c5010e6ba81 | /unicorn-hat-cpu-status-indicator.py | 76dbd1127ffdb1b660f19b2e71e99fbcaea3b998 | [] | no_license | pranavlathigara/Raspberry-Pi-DIY-Projects | efd18e2e5b9b8369bb1a5f5418782480cf9bc729 | 0c14c316898d4d06015912ac4a8cb7b71a3980c0 | refs/heads/master | 2021-04-06T09:14:28.088223 | 2018-02-19T00:15:22 | 2018-02-19T00:15:22 | 124,649,553 | 1 | 2 | null | 2018-03-10T11:30:59 | 2018-03-10T11:30:59 | null | UTF-8 | Python | false | false | 1,500 | py | #!/usr/bin/env python
# https://forums.pimoroni.com/t/unicorn-hat-cpu-status-indicator/6150
# Import the relevant modules
import unicornhat as uh
try:
import psutil
except ImportError:
exit("This script requires psutil.n\Install with: sudo pip install psutil")
# Set the brightness of the UnicornHAT - 1.0 is blindingly bright!
uh.brightness(0.5)
# Run in an infinite loop and display relevant colour on the UnicornHAT.
# Create your own 10 step gradient via http://www.perbang.dk/rgbgradient/
while True:
cpu_raw = psutil.cpu_percent(interval=1)
cpu = int(cpu_raw)
#print cpu # Uncomment out to show CPU usage in the terminal
if cpu < 10:
uh.set_all(0,255,0) # Green
uh.show()
elif (cpu > 11) and (cpu < 20):
uh.set_all(56,255,0)
uh.show()
elif (cpu > 21) and (cpu < 30): # Lime
uh.set_all(113,255,0)
uh.show()
elif (cpu > 31) and (cpu < 40):
uh.set_all(170,255,0)
uh.show()
elif (cpu > 41) and (cpu < 50): # Yellow
uh.set_all(226,255,0)
uh.show()
elif (cpu > 51) and (cpu < 60):
uh.set_all(255,226,0)
uh.show()
elif (cpu > 61) and (cpu < 70): # Orange
uh.set_all(255,170,0)
uh.show()
elif (cpu > 71) and (cpu < 80):
uh.set_all(255,113,0)
uh.show()
elif (cpu > 81) and (cpu < 90):
uh.set_all(255,56,0)
uh.show()
else:
uh.set_all(255,0,0) # Red
uh.show()
| [
"[email protected]"
] | |
1ad5e73bdc6bb2f1b25482eb24098fb40a19d746 | 4bfc3c184e736bb68dccbb6d5657f11c950df002 | /tests/operators/dynamic_shape/test_dynamic_SIMD_v1.py | c742eb60dd2fa29b664aa45796b2afbb2b3fbc98 | [
"Apache-2.0",
"Zlib",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] | permissive | laekov/akg | 159aa64ef6135222b5af784c408731275dfa9bdb | 5316b8cb2340bbf71bdc724dc9d81513a67b3104 | refs/heads/master | 2022-12-01T04:09:03.548063 | 2020-08-19T08:38:57 | 2020-08-19T08:41:28 | 288,678,192 | 0 | 0 | Apache-2.0 | 2020-08-19T08:41:30 | 2020-08-19T08:36:53 | Python | UTF-8 | Python | false | false | 4,094 | py | import akg
import akg.topi as topi
import akg.tvm as tvm
from gen_random import random_gaussian
from akg.utils import kernel_exec as utils
from akg import platform as cce
import numpy as np
import pdb
dtype = "float16"
mapKey = {"add":"binary", "sub":"binary","div":"binary","mul":"binary","min":"binary","max":"binary",
"abs": "single", "exp": "single", "log": "single", "sqrt": "single",
"adds": "single", "muls": "single"}
insn = "adds"
insnType = mapKey[insn]
def gen_data(shape, dtype):
support_list = {"float16": np.float16, "float32": np.float32}
ma = random_gaussian(shape, miu=1, sigma=0.1)
mb = random_gaussian(shape, miu=1, sigma=0.1)
ma = ma.astype(support_list[dtype])
mb = mb.astype(support_list[dtype])
expect = ma
if insn == "add":
expect = ma + mb
elif insn == "sub":
expect = ma - mb
if insn == "mul":
expect = ma * mb
elif insn == "div":
expect = ma / mb
elif insn == "max":
expect = np.max(ma, mb)
elif insn == "min":
expect = np.min(ma, mb)
elif insn == "abs":
expect = np.abs(ma)
elif insn == "exp":
expect = np.exp(ma)
elif insn == "log":
expect = np.log(ma)
elif insn == "sqrt":
expect = np.sqrt(ma)
elif insn == "adds":
expect = ma + 2
elif insn == "muls":
expect = ma * 2
return ma, mb, expect
def gen_kernel():
kernel_name = "dynamic_1d_" + insn + "_" + dtype
attrs = {}
attrs['enable_multicore'] = False
attrs['enable_post_poly_loop_partition'] = False
attrs['enable_unroll_loop'] = False
attrs['enable_fix_loop_extent'] = False
attrs['enable_double_buffer'] = False
attrs['enable_dynamic'] = True
attrs['dim'] = "0 0 1024 1"
mod = my_dsl(dtype, kernel_name, attrs)
source_code = mod.imported_modules[0].get_source()
print(source_code)
save_cce(source_code)
return mod
def my_dsl(dtype, kernel_name, attrs):
m = tvm.var("M")
n = tvm.var("N")
A = tvm.placeholder((m,), name="A", dtype=dtype)
B = tvm.placeholder((m,), name="B", dtype=dtype)
if insn == "add":
C = topi.add(A, B)
elif insn == "sub":
C = topi.subtract(A, B)
if insn == "mul":
C = topi.multiply(A, B)
elif insn == "div":
C = topi.divide(A, B)
elif insn == "max":
C = topi.maximum(A, B)
elif insn == "min":
C = topi.minimum(A, B)
elif insn == "abs":
C = tvm.compute(A.shape, lambda *index: tvm.abs(A(*index)), name='C')
elif insn == "exp":
C = topi.exp(A)
elif insn == "log":
C = topi.log(A)
elif insn == "sqrt":
C = topi.sqrt(A)
C = topi.log(A)
elif insn == "sqrt":
C = topi.sqrt(A)
elif insn == "adds":
C = A + tvm.const(2, dtype)
elif insn == "muls":
C = A * tvm.const(2, dtype)
# C = tvm.compute((m, ), lambda i: A[i] + B[i], name="C")
s = tvm.create_schedule([C.op])
with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
if insnType == "binary":
mod = akg.build(s, [A, B, C], "cce", name=kernel_name, attrs = attrs, polyhedral=True)
else:
mod = akg.build(s, [A, C], "cce", name=kernel_name, attrs = attrs, polyhedral=True)
return mod
def save_cce(code):
with open("aaaa_code.cce", "w") as f:
f.write(code)
def test_dsl(shape):
print("\n\n\nshape:", shape, "\n\n")
mod = gen_kernel()
ma, mb, expect = gen_data(shape, dtype)
output = np.full(expect.shape, 0, dtype=dtype)
if insnType == "binary":
output = utils.mod_launch(mod, (ma, mb, output))
else:
output = utils.mod_launch(mod, (ma, output))
rtol = atol = 1e-04
cpr_res_is = np.isclose(output, expect, rtol, atol, equal_nan=False)
cpr_res_all = np.allclose(output, expect, rtol, atol, equal_nan=False)
print("\noutput:", output)
print("\nexpect:", expect)
if __name__ == "__main__":
test_dsl((30000,))
#test_dsl((1999,))
# test_dsl((2001,))
| [
"[email protected]"
] | |
d0daf7126ac49ceaf9fe4fba467bdcc38254018b | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-live/aliyunsdklive/request/v20161101/AddPlaylistItemsRequest.py | 6eb4fbed96c893b1ed0d077e712f2afb6901d726 | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 2,222 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdklive.endpoint import endpoint_data
class AddPlaylistItemsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'live', '2016-11-01', 'AddPlaylistItems','live')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ProgramItems(self): # String
return self.get_query_params().get('ProgramItems')
def set_ProgramItems(self, ProgramItems): # String
self.add_query_param('ProgramItems', ProgramItems)
def get_ProgramId(self): # String
return self.get_query_params().get('ProgramId')
def set_ProgramId(self, ProgramId): # String
self.add_query_param('ProgramId', ProgramId)
def get_CasterId(self): # String
return self.get_query_params().get('CasterId')
def set_CasterId(self, CasterId): # String
self.add_query_param('CasterId', CasterId)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_ProgramConfig(self): # String
return self.get_query_params().get('ProgramConfig')
def set_ProgramConfig(self, ProgramConfig): # String
self.add_query_param('ProgramConfig', ProgramConfig)
| [
"[email protected]"
] | |
9022cbf868abe8040133bf2c58b6759ba2bb8d2d | fe70774ff6898c5bdb0c941b4f335de576abfdb6 | /flopy/modflow/mfsip.py | 157d25044903290c48a1a97ca47478f9397ad1af | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | robinthibaut/flopy | 35af468415d1ba6e1de119a7cb335381304fada9 | 22ef330bcfb9259fc23735d6b174d27804b624a0 | refs/heads/develop | 2023-06-30T21:43:24.101593 | 2023-06-13T19:46:03 | 2023-06-13T19:46:03 | 255,560,877 | 0 | 0 | BSD-3-Clause | 2022-10-10T12:23:38 | 2020-04-14T09:05:42 | null | UTF-8 | Python | false | false | 7,748 | py | """
mfsip module. Contains the ModflowSip class. Note that the user can access
the ModflowSip class as `flopy.modflow.ModflowSip`.
Additional information for this MODFLOW package can be found at the `Online
MODFLOW Guide
<https://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/sip.html>`_.
"""
from ..pakbase import Package
class ModflowSip(Package):
"""
MODFLOW Strongly Implicit Procedure Package Class.
Parameters
----------
model : model object
The model object (of type :class:flopy.modflow.mf.Modflow) to which
this package will be added.
mxiter : integer
The maximum number of times through the iteration loop in one time
step in an attempt to solve the system of finite-difference equations.
(default is 200)
nparm : integer
The number of iteration variables to be used.
Five variables are generally sufficient. (default is 5)
accl : float
The acceleration variable, which must be greater than zero
and is generally equal to one. If a zero is entered,
it is changed to one. (default is 1)
hclose : float > 0
The head change criterion for convergence. When the maximum absolute
value of head change from all nodes during an iteration is less than
or equal to hclose, iteration stops. (default is 1e-5)
ipcalc : 0 or 1
A flag indicating where the seed for calculating iteration variables
will come from. 0 is the seed entered by the user will be used.
1 is the seed will be calculated at the start of the simulation from
problem variables. (default is 0)
wseed : float > 0
The seed for calculating iteration variables. wseed is always read,
but is used only if ipcalc is equal to zero. (default is 0)
iprsip : integer > 0
the printout interval for sip. iprsip, if equal to zero, is changed
to 999. The maximum head change (positive or negative) is printed for
each iteration of a time step whenever the time step is an even
multiple of iprsip. This printout also occurs at the end of each
stress period regardless of the value of iprsip. (default is 0)
extension : string
Filename extension (default is 'sip')
unitnumber : int
File unit number (default is None).
filenames : str or list of str
Filenames to use for the package. If filenames=None the package name
will be created using the model name and package extension. If a
single string is passed the package will be set to the string.
Default is None.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow()
>>> sip = flopy.modflow.ModflowSip(ml, mxiter=100, hclose=0.0001)
"""
def __init__(
self,
model,
mxiter=200,
nparm=5,
accl=1,
hclose=1e-5,
ipcalc=1,
wseed=0,
iprsip=0,
extension="sip",
unitnumber=None,
filenames=None,
):
# set default unit number of one is not specified
if unitnumber is None:
unitnumber = ModflowSip._defaultunit()
# call base package constructor
super().__init__(
model,
extension=extension,
name=self._ftype(),
unit_number=unitnumber,
filenames=self._prepare_filenames(filenames),
)
# check if a valid model version has been specified
if model.version == "mfusg":
raise Exception(
f"Error: cannot use {self.name} package "
f"with model version {model.version}"
)
self._generate_heading()
self.url = "sip.html"
self.mxiter = mxiter
self.nparm = nparm
self.accl = accl
self.hclose = hclose
self.ipcalc = ipcalc
self.wseed = wseed
self.iprsip = iprsip
self.parent.add_package(self)
def write_file(self):
"""
Write the package file.
Returns
-------
None
"""
# Open file for writing
f = open(self.fn_path, "w")
f.write(f"{self.heading}\n")
ifrfm = self.parent.get_ifrefm()
if ifrfm:
f.write(f"{self.mxiter} {self.nparm}\n")
f.write(
f"{self.accl} {self.hclose} {self.ipcalc} {self.wseed} {self.iprsip}\n"
)
else:
f.write(f"{self.mxiter:10d}{self.nparm:10d}\n")
f.write(
"{:10.3f}{:10.3g}{:10d}{:10.3f}{:10d}\n".format(
self.accl,
self.hclose,
self.ipcalc,
self.wseed,
self.iprsip,
)
)
f.close()
@classmethod
def load(cls, f, model, ext_unit_dict=None):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to
which this package will be added.
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`flopy.utils.mfreadnam.parsenamefile`.
Returns
-------
sip : ModflowSip object
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> sip = flopy.modflow.ModflowSip.load('test.sip', m)
"""
if model.verbose:
print("loading sip package file...")
openfile = not hasattr(f, "read")
if openfile:
filename = f
f = open(filename, "r")
# dataset 0 -- header
while True:
line = f.readline()
if line[0] != "#":
break
ifrfm = model.get_ifrefm()
# dataset 1
if ifrfm:
t = line.strip().split()
mxiter = int(t[0])
nparm = int(t[1])
else:
mxiter = int(line[0:10].strip())
nparm = int(line[10:20].strip())
# dataset 2
line = f.readline()
if ifrfm:
t = line.strip().split()
accl = float(t[0])
hclose = float(t[1])
ipcalc = int(t[2])
wseed = float(t[3])
iprsip = int(t[4])
else:
accl = float(line[0:10].strip())
hclose = float(line[10:20].strip())
ipcalc = int(line[20:30].strip())
wseed = float(line[30:40].strip())
iprsip = int(line[40:50].strip())
if openfile:
f.close()
# set package unit number
unitnumber = None
filenames = [None]
if ext_unit_dict is not None:
unitnumber, filenames[0] = model.get_ext_dict_attr(
ext_unit_dict, filetype=ModflowSip._ftype()
)
return cls(
model,
mxiter=mxiter,
nparm=nparm,
accl=accl,
hclose=hclose,
ipcalc=ipcalc,
wseed=wseed,
iprsip=iprsip,
unitnumber=unitnumber,
filenames=filenames,
)
@staticmethod
def _ftype():
return "SIP"
@staticmethod
def _defaultunit():
return 25
| [
"[email protected]"
] | |
850abcd24fff64af7d6051b944820f5ad1239db0 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/DGS-3120-24TC-L3MGMT-MIB.py | 84a5c9f901272856a6dd84805ff24aa82447b20b | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 27,904 | py | #
# PySNMP MIB module DGS-3120-24TC-L3MGMT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DGS-3120-24TC-L3MGMT-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:43:42 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsIntersection")
Ipv6Address, = mibBuilder.importSymbols("IPV6-TC", "Ipv6Address")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
NotificationType, Unsigned32, Integer32, IpAddress, iso, Counter64, TimeTicks, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, MibIdentifier, ObjectIdentity, Counter32, ModuleIdentity, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Unsigned32", "Integer32", "IpAddress", "iso", "Counter64", "TimeTicks", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "MibIdentifier", "ObjectIdentity", "Counter32", "ModuleIdentity", "Bits")
TextualConvention, RowStatus, TruthValue, PhysAddress, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "RowStatus", "TruthValue", "PhysAddress", "DisplayString")
dlink_Dgs3120Proj_Dgs3120_24TC, = mibBuilder.importSymbols("SWDGS3120PRIMGMT-MIB", "dlink-Dgs3120Proj-Dgs3120-24TC")
swL3MgmtMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3))
if mibBuilder.loadTexts: swL3MgmtMIB.setLastUpdated('1211160000Z')
if mibBuilder.loadTexts: swL3MgmtMIB.setOrganization(' ')
if mibBuilder.loadTexts: swL3MgmtMIB.setContactInfo(' ')
if mibBuilder.loadTexts: swL3MgmtMIB.setDescription('The Structure of Layer 3 Network Management Information for the proprietary enterprise.')
class NodeAddress(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(6, 6)
fixedLength = 6
class NetAddress(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(4, 4)
fixedLength = 4
swL3IpMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2))
swL3IpCtrlMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1))
swL3IpFdbMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 2))
swL3IpCtrlTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 3), )
if mibBuilder.loadTexts: swL3IpCtrlTable.setStatus('current')
if mibBuilder.loadTexts: swL3IpCtrlTable.setDescription('This table contain IP interface information.')
swL3IpCtrlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 3, 1), ).setIndexNames((0, "DGS-3120-24TC-L3MGMT-MIB", "swL3IpCtrlInterfaceName"))
if mibBuilder.loadTexts: swL3IpCtrlEntry.setStatus('current')
if mibBuilder.loadTexts: swL3IpCtrlEntry.setDescription('A list of information about a specific IP interface.')
swL3IpCtrlInterfaceName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 3, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 12))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3IpCtrlInterfaceName.setStatus('current')
if mibBuilder.loadTexts: swL3IpCtrlInterfaceName.setDescription('This object indicates the name of the IP interface.')
swL3IpCtrlIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3IpCtrlIfIndex.setStatus('current')
if mibBuilder.loadTexts: swL3IpCtrlIfIndex.setDescription('This object uniquely identifies the IP interface number in the swL3IpCtrlTable.')
swL3IpCtrlIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 3, 1, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlIpAddr.setStatus('current')
if mibBuilder.loadTexts: swL3IpCtrlIpAddr.setDescription('The IP address of the interface. This object only can take the value of the unicast IP address.')
swL3IpCtrlIpSubnetMask = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 3, 1, 4), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlIpSubnetMask.setStatus('current')
if mibBuilder.loadTexts: swL3IpCtrlIpSubnetMask.setDescription('The IP net mask for this interface.')
swL3IpCtrlVlanName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 3, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlVlanName.setStatus('current')
if mibBuilder.loadTexts: swL3IpCtrlVlanName.setDescription("This object indicates the IP control entry's VLAN name. The VLAN name in each entry must be unique in the IP Control Table.")
swL3IpCtrlProxyArp = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 3, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlProxyArp.setStatus('current')
if mibBuilder.loadTexts: swL3IpCtrlProxyArp.setDescription('This object indicates enable/disable of the proxy ARP function for IPv4.')
swL3IpCtrlSecondary = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 3, 1, 7), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlSecondary.setStatus('current')
if mibBuilder.loadTexts: swL3IpCtrlSecondary.setDescription('When this is true(1), the IP address is the secondary IP. When false(2), the IP address is the primary IP.')
swL3IpCtrlMode = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 3, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("bootp", 3), ("dhcp", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlMode.setStatus('current')
if mibBuilder.loadTexts: swL3IpCtrlMode.setDescription('This object indicates the IP operation mode. other(1) - This entry is currently in use but the conditions under which it will remain are determined by each of the following values. bootp(3) - The IP address will be set automatically from a BOOTP server. dhcp(4) - The IP address will be set automatically from a DHCP server.')
swL3IpCtrlAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 3, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlAdminState.setStatus('current')
if mibBuilder.loadTexts: swL3IpCtrlAdminState.setDescription('The state of the IP interface.')
swL3IpCtrlIpv4AdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 3, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlIpv4AdminState.setStatus('current')
if mibBuilder.loadTexts: swL3IpCtrlIpv4AdminState.setDescription('The IPv4 admin state of the IP interface. The default state is determined by project. This state will only be effective when the swL3IpCtrlAdminState is enabled.')
swL3IpCtrlIpv6AdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 3, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlIpv6AdminState.setStatus('current')
if mibBuilder.loadTexts: swL3IpCtrlIpv6AdminState.setDescription('The IPv6 admin state of the IP interface. The default state is determined by project. This state will only be effective when the swL3IpCtrlAdminState is enabled.')
swL3IpCtrlIpv6LinkLocalAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 3, 1, 14), Ipv6Address()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3IpCtrlIpv6LinkLocalAddress.setStatus('current')
if mibBuilder.loadTexts: swL3IpCtrlIpv6LinkLocalAddress.setDescription('The IPv6 link local address for this interface.')
swL3IpCtrlIpv6LinkLocalPrefixLen = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 3, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3IpCtrlIpv6LinkLocalPrefixLen.setStatus('current')
if mibBuilder.loadTexts: swL3IpCtrlIpv6LinkLocalPrefixLen.setDescription('The IPv6 prefix length for this IPv6 link local address.')
swL3IpCtrlState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 3, 1, 16), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swL3IpCtrlState.setStatus('current')
if mibBuilder.loadTexts: swL3IpCtrlState.setDescription('This variable displays the status of the entry. The status is used for creating, modifying, and deleting instances of the objects in this table.')
swL3IpCtrlIpv6LinkLocalAutoState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 3, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("enabled", 2), ("disabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlIpv6LinkLocalAutoState.setStatus('current')
if mibBuilder.loadTexts: swL3IpCtrlIpv6LinkLocalAutoState.setDescription('The state of the IPv6 link local auto.')
swL3IpCtrlLocalProxyArp = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 3, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlLocalProxyArp.setStatus('current')
if mibBuilder.loadTexts: swL3IpCtrlLocalProxyArp.setDescription('This object indicates enable/disable of the local proxy ARP function for IPv4.')
swL3IpCtrlDhcpv6ClientState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 3, 1, 20), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlDhcpv6ClientState.setStatus('current')
if mibBuilder.loadTexts: swL3IpCtrlDhcpv6ClientState.setDescription('The state of the Dhcpv6 Client.')
swL3IpCtrlIpDhcpOption12State = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 3, 1, 22), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlIpDhcpOption12State.setStatus('current')
if mibBuilder.loadTexts: swL3IpCtrlIpDhcpOption12State.setDescription('Enable or disable insertion of option 12 in the DHCPDISCOVER and DHCPREQUEST message.')
swL3IpCtrlIpDhcpOption12HostName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 3, 1, 23), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 63))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlIpDhcpOption12HostName.setStatus('current')
if mibBuilder.loadTexts: swL3IpCtrlIpDhcpOption12HostName.setDescription('Specify the host name to be inserted in the DHCPDISCOVER and DHCPREQUEST message. The specified host name must start with a letter, end with a letter or digit, and have only letters, digits, and hyphen as interior characters; the maximal length is 63. By default, the host name is empty. When set an empty host name, means to clear the host name setting and use the default value to encode option 12.')
swL3Ipv6CtrlTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 4), )
if mibBuilder.loadTexts: swL3Ipv6CtrlTable.setStatus('current')
if mibBuilder.loadTexts: swL3Ipv6CtrlTable.setDescription('This table contains IPv6 information of an IP interface.')
swL3Ipv6CtrlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 4, 1), ).setIndexNames((0, "DGS-3120-24TC-L3MGMT-MIB", "swL3Ipv6CtrlInterfaceName"))
if mibBuilder.loadTexts: swL3Ipv6CtrlEntry.setStatus('current')
if mibBuilder.loadTexts: swL3Ipv6CtrlEntry.setDescription('A list of IPv6 information about a specific IP interface.')
swL3Ipv6CtrlInterfaceName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 4, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 12))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3Ipv6CtrlInterfaceName.setStatus('current')
if mibBuilder.loadTexts: swL3Ipv6CtrlInterfaceName.setDescription('This object indicates the name of the IP interface.')
swL3Ipv6CtrlMaxReassmblySize = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 4, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3Ipv6CtrlMaxReassmblySize.setStatus('current')
if mibBuilder.loadTexts: swL3Ipv6CtrlMaxReassmblySize.setDescription('Maximum Reassembly Size of the IP interface.')
swL3Ipv6CtrlNsRetransTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 4, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3Ipv6CtrlNsRetransTimer.setStatus('current')
if mibBuilder.loadTexts: swL3Ipv6CtrlNsRetransTimer.setDescription("Neighbor solicitation's retransmit timer. The unit is set in milliseconds.")
swL3Ipv6AddressCtrlTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 5), )
if mibBuilder.loadTexts: swL3Ipv6AddressCtrlTable.setStatus('current')
if mibBuilder.loadTexts: swL3Ipv6AddressCtrlTable.setDescription('This table contain IPv6 address information for each IP interface.')
swL3Ipv6AddressCtrlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 5, 1), ).setIndexNames((0, "DGS-3120-24TC-L3MGMT-MIB", "swL3Ipv6AddressCtrlInterfaceName"), (0, "DGS-3120-24TC-L3MGMT-MIB", "swL3Ipv6Address"), (0, "DGS-3120-24TC-L3MGMT-MIB", "swL3Ipv6AddressCtrlPrefixLen"))
if mibBuilder.loadTexts: swL3Ipv6AddressCtrlEntry.setStatus('current')
if mibBuilder.loadTexts: swL3Ipv6AddressCtrlEntry.setDescription('A list of information about a specific IPv6 address.')
swL3Ipv6AddressCtrlInterfaceName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 5, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 12))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3Ipv6AddressCtrlInterfaceName.setStatus('current')
if mibBuilder.loadTexts: swL3Ipv6AddressCtrlInterfaceName.setDescription('This object indicates the name of the IP interface. ')
swL3Ipv6Address = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 5, 1, 2), Ipv6Address()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3Ipv6Address.setStatus('current')
if mibBuilder.loadTexts: swL3Ipv6Address.setDescription('Specify the IPv6 address.')
swL3Ipv6AddressCtrlPrefixLen = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 5, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3Ipv6AddressCtrlPrefixLen.setStatus('current')
if mibBuilder.loadTexts: swL3Ipv6AddressCtrlPrefixLen.setDescription('Indicates the prefix length of this IPv6 address.')
swL3Ipv6AddressCtrlState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 5, 1, 8), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swL3Ipv6AddressCtrlState.setStatus('current')
if mibBuilder.loadTexts: swL3Ipv6AddressCtrlState.setDescription('This variable displays the status of the entry. The status is used for creating, modifying, and deleting instances of the objects in this table.')
swL3Ipv6AddressCtrlAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 5, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("manual", 1), ("dhcpv6", 2), ("stateless", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3Ipv6AddressCtrlAddressType.setStatus('current')
if mibBuilder.loadTexts: swL3Ipv6AddressCtrlAddressType.setDescription('This object indicates the type of the IPv6 address. manual(1): the IPv6 address is configured by user. dhcpv6(2): the IPv6 address is assigned by DHCPv6 server. stateless(3): the IPv6 address is assigned by router advertisement.')
swL3IpCtrlAllIpIfState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("enabled", 2), ("disabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlAllIpIfState.setStatus('current')
if mibBuilder.loadTexts: swL3IpCtrlAllIpIfState.setDescription('This object indicates all interface function state of the device.')
swL3IpFdbInfoTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 2, 1), )
if mibBuilder.loadTexts: swL3IpFdbInfoTable.setStatus('current')
if mibBuilder.loadTexts: swL3IpFdbInfoTable.setDescription('A table that contains forwarding and/or filtering information. This information is used by the switch in determining how to propagate the received IP packets.')
swL3IpFdbInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 2, 1, 1), ).setIndexNames((0, "DGS-3120-24TC-L3MGMT-MIB", "swL3IpFdbInfoIpAddr"))
if mibBuilder.loadTexts: swL3IpFdbInfoEntry.setStatus('current')
if mibBuilder.loadTexts: swL3IpFdbInfoEntry.setDescription('Information about a specific IP address for which the bridge has some forwarding and/or filtering information.')
swL3IpFdbInfoIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 2, 1, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3IpFdbInfoIpAddr.setStatus('current')
if mibBuilder.loadTexts: swL3IpFdbInfoIpAddr.setDescription('A IP address for which switch has forwarding and/or filtering information.')
swL3IpFdbInfoIpSubnetMask = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 2, 1, 1, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3IpFdbInfoIpSubnetMask.setStatus('current')
if mibBuilder.loadTexts: swL3IpFdbInfoIpSubnetMask.setDescription('A IP net mask for this interface for which the switch has forwarding and/or filtering information.')
swL3IpFdbInfoPort = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3IpFdbInfoPort.setStatus('current')
if mibBuilder.loadTexts: swL3IpFdbInfoPort.setDescription("Either the value '0', or the port number of the port on which packets having an IP address equal to the value of the corresponding instance of swL3IpFdbInfoIpAddr has been seen. A value of '0' indicates that the port number has not been learned but that switch does have some forwarding/filtering information about this address.")
swL3IpFdbInfoType = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("static", 2), ("dynamic", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3IpFdbInfoType.setStatus('current')
if mibBuilder.loadTexts: swL3IpFdbInfoType.setDescription('The status of this entry.')
swL3IpArpAgingTime = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpArpAgingTime.setStatus('current')
if mibBuilder.loadTexts: swL3IpArpAgingTime.setDescription('The timeout period in minutes for aging out dynamically learned arp information.')
swL3IpStaticRouteTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 5), )
if mibBuilder.loadTexts: swL3IpStaticRouteTable.setStatus('current')
if mibBuilder.loadTexts: swL3IpStaticRouteTable.setDescription("This entity's IP static Routing table.")
swL3IpStaticRouteEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 5, 1), ).setIndexNames((0, "DGS-3120-24TC-L3MGMT-MIB", "swL3IpStaticRouteDest"), (0, "DGS-3120-24TC-L3MGMT-MIB", "swL3IpStaticRouteMask"), (0, "DGS-3120-24TC-L3MGMT-MIB", "swL3IpStaticRouteNextHop"))
if mibBuilder.loadTexts: swL3IpStaticRouteEntry.setStatus('current')
if mibBuilder.loadTexts: swL3IpStaticRouteEntry.setDescription("A particular route to a particular destination, under a particular policy. Once an entry be built,it shouldn't be modified.That is,it just support create and delete action.")
swL3IpStaticRouteDest = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 5, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3IpStaticRouteDest.setStatus('current')
if mibBuilder.loadTexts: swL3IpStaticRouteDest.setDescription('The destination IP address of this route. This object may not take a Multicast (Class D) address value. Any assignment (implicit or otherwise) of an instance of this object to a value x must be rejected if the bitwise logical-AND of x with the value of the corresponding instance of the swL3IpStaticRouteMask object is not equal to x.')
swL3IpStaticRouteMask = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 5, 1, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3IpStaticRouteMask.setStatus('current')
if mibBuilder.loadTexts: swL3IpStaticRouteMask.setDescription('Indicate the mask to be logical-ANDed with the destination address before being compared to the value in the swL3IpStaticRouteDest field. For those systems that do not support arbitrary subnet masks, an agent constructs the value of the swL3IpStaticRouteMask by reference to the IP Address Class. Any assignment (implicit or otherwise) of an instance of this object to a value x must be rejected if the bitwise logical-AND of x with the value of the corresponding instance of the swL3IpStaticRouteDest object is not equal to swL3IpStaticRouteDest.')
swL3IpStaticRouteBkupState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 5, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("primary", 1), ("backup", 2), ("none", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swL3IpStaticRouteBkupState.setStatus('current')
if mibBuilder.loadTexts: swL3IpStaticRouteBkupState.setDescription('The routing state for this route.The value SHOULD be primary(1) or backup(2).')
swL3IpStaticRouteNextHop = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 5, 1, 4), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3IpStaticRouteNextHop.setStatus('current')
if mibBuilder.loadTexts: swL3IpStaticRouteNextHop.setDescription('On remote routes, the address of the next sys- tem en route; Otherwise, 0.0.0.0.')
swL3IpStaticRouteMetric = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 5, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swL3IpStaticRouteMetric.setStatus('current')
if mibBuilder.loadTexts: swL3IpStaticRouteMetric.setDescription('The routing metric for this route.')
swL3IpStaticRouteStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 5, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("invalid", 2), ("valid", 3), ("active", 4), ("inActive", 5)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swL3IpStaticRouteStatus.setStatus('current')
if mibBuilder.loadTexts: swL3IpStaticRouteStatus.setDescription('This object indicates the status of this entry. other(1) - this entry is currently in use but the conditions under which it will remain so are different from each of the following values. invalid(2) - writing this value to the object, and then the corresponding entry will be removed from the table. valid(3) - this entry is reside in the table. active(4) - the nextHop of this entry exists in the ARP table. inActive(5) - the next hop of this entry does not exist in the ARP table.')
swL3IpStaticRouteWeight = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 5, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swL3IpStaticRouteWeight.setStatus('current')
if mibBuilder.loadTexts: swL3IpStaticRouteWeight.setDescription('Specifies the weight value. Used for the weighted multipath.')
swL3IpStaticRouteInterfaceName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 1, 3, 2, 5, 1, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3IpStaticRouteInterfaceName.setStatus('current')
if mibBuilder.loadTexts: swL3IpStaticRouteInterfaceName.setDescription('Specifies the name of the IP interface.')
mibBuilder.exportSymbols("DGS-3120-24TC-L3MGMT-MIB", PYSNMP_MODULE_ID=swL3MgmtMIB, swL3Ipv6CtrlTable=swL3Ipv6CtrlTable, swL3IpFdbInfoTable=swL3IpFdbInfoTable, swL3Ipv6AddressCtrlAddressType=swL3Ipv6AddressCtrlAddressType, swL3IpStaticRouteDest=swL3IpStaticRouteDest, swL3Ipv6CtrlEntry=swL3Ipv6CtrlEntry, swL3IpCtrlIpAddr=swL3IpCtrlIpAddr, swL3IpStaticRouteNextHop=swL3IpStaticRouteNextHop, swL3IpStaticRouteTable=swL3IpStaticRouteTable, swL3IpCtrlProxyArp=swL3IpCtrlProxyArp, swL3IpFdbInfoEntry=swL3IpFdbInfoEntry, swL3IpCtrlIpv6LinkLocalAddress=swL3IpCtrlIpv6LinkLocalAddress, swL3IpCtrlLocalProxyArp=swL3IpCtrlLocalProxyArp, swL3IpStaticRouteBkupState=swL3IpStaticRouteBkupState, NodeAddress=NodeAddress, swL3IpFdbMgmt=swL3IpFdbMgmt, swL3Ipv6AddressCtrlTable=swL3Ipv6AddressCtrlTable, swL3Ipv6AddressCtrlInterfaceName=swL3Ipv6AddressCtrlInterfaceName, swL3Ipv6AddressCtrlPrefixLen=swL3Ipv6AddressCtrlPrefixLen, swL3IpMgmt=swL3IpMgmt, NetAddress=NetAddress, swL3IpCtrlMode=swL3IpCtrlMode, swL3Ipv6AddressCtrlEntry=swL3Ipv6AddressCtrlEntry, swL3IpCtrlSecondary=swL3IpCtrlSecondary, swL3IpCtrlIpSubnetMask=swL3IpCtrlIpSubnetMask, swL3IpStaticRouteMask=swL3IpStaticRouteMask, swL3IpArpAgingTime=swL3IpArpAgingTime, swL3IpCtrlIfIndex=swL3IpCtrlIfIndex, swL3IpStaticRouteMetric=swL3IpStaticRouteMetric, swL3IpCtrlIpv6LinkLocalAutoState=swL3IpCtrlIpv6LinkLocalAutoState, swL3Ipv6CtrlMaxReassmblySize=swL3Ipv6CtrlMaxReassmblySize, swL3IpFdbInfoIpSubnetMask=swL3IpFdbInfoIpSubnetMask, swL3IpCtrlAdminState=swL3IpCtrlAdminState, swL3IpCtrlIpv4AdminState=swL3IpCtrlIpv4AdminState, swL3Ipv6CtrlNsRetransTimer=swL3Ipv6CtrlNsRetransTimer, swL3IpCtrlAllIpIfState=swL3IpCtrlAllIpIfState, swL3IpStaticRouteEntry=swL3IpStaticRouteEntry, swL3IpCtrlEntry=swL3IpCtrlEntry, swL3IpCtrlInterfaceName=swL3IpCtrlInterfaceName, swL3IpCtrlIpv6LinkLocalPrefixLen=swL3IpCtrlIpv6LinkLocalPrefixLen, swL3IpStaticRouteStatus=swL3IpStaticRouteStatus, swL3IpCtrlIpDhcpOption12State=swL3IpCtrlIpDhcpOption12State, swL3IpCtrlTable=swL3IpCtrlTable, swL3IpFdbInfoType=swL3IpFdbInfoType, swL3MgmtMIB=swL3MgmtMIB, swL3IpStaticRouteWeight=swL3IpStaticRouteWeight, swL3IpCtrlMgmt=swL3IpCtrlMgmt, swL3IpCtrlState=swL3IpCtrlState, swL3Ipv6AddressCtrlState=swL3Ipv6AddressCtrlState, swL3IpStaticRouteInterfaceName=swL3IpStaticRouteInterfaceName, swL3IpCtrlIpv6AdminState=swL3IpCtrlIpv6AdminState, swL3Ipv6Address=swL3Ipv6Address, swL3Ipv6CtrlInterfaceName=swL3Ipv6CtrlInterfaceName, swL3IpCtrlIpDhcpOption12HostName=swL3IpCtrlIpDhcpOption12HostName, swL3IpFdbInfoIpAddr=swL3IpFdbInfoIpAddr, swL3IpFdbInfoPort=swL3IpFdbInfoPort, swL3IpCtrlVlanName=swL3IpCtrlVlanName, swL3IpCtrlDhcpv6ClientState=swL3IpCtrlDhcpv6ClientState)
| [
"[email protected]"
] | |
4d8c07a70b0749f8547c953b05392c9470433b4b | f53ceb369fe3ed0e57004510d5836abbac7ce2e1 | /src/embeddingdb/web/wsgi.py | 23375a9b9580bc64f7264ca7c79562d6fd97ef3b | [
"MIT"
] | permissive | aarek-eng/embeddingdb | e3b21a1b8c3ed54a10fdce647e51c1bd560d117c | e6c67e92e540c4315045a0b4de5b31490331c177 | refs/heads/master | 2022-01-10T17:38:38.526503 | 2019-06-26T21:37:52 | 2019-06-26T21:37:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | # -*- coding: utf-8 -*-
"""A WSGI formulation of the web application.
Also allows the web application to be run with ``python -m embeddingdb.web.wsgi``.
"""
from embeddingdb.web.app import get_app
app = get_app()
if __name__ == '__main__':
app.run()
| [
"[email protected]"
] | |
89aa9e1612eed3d6d31f549bbee33b18d54bdb6f | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/c18d50d76b304b2ba3289047cce00533.py | f94f1b404b123b82294be4a70207d24c3db05ca6 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 1,199 | py | import re
import nltk
import string
try:
SENT_DETECTOR = nltk.data.load("tokenizers/punkt/english.pickle")
## nltk data must be installed for nltk.data.load to work
except LookupError:
if nltk.download():
SENT_DETECTOR = nltk.data.load("tokenizers/punkt/english.pickle")
else:
sys.exit("nltk download did not successfully complete")
def hey(s):
## if not a string, attempt to cast to a string
if not isinstance(s, basestring):
try:
s = str(s)
except:
return "Whatever."
if is_yell(s):
return "Woah, chill out!"
elif is_question(s):
return "Sure."
elif is_silence(s):
return "Fine. Be that way!"
else:
return "Whatever."
def is_yell(s):
## check if string is change by lower but not by upper
return s == string.upper(s) != string.lower(s)
def is_question(s):
## check for sentence ending with a non white
## space character followed by a question mark
return any([re.search("\S\?$", x) for x in SENT_DETECTOR.tokenize(s)])
def is_silence(s):
## check if any non-whitespace characters are present
return not s.strip()
| [
"[email protected]"
] | |
f8ce4049fa17b2f77c746a564018e80505b3ad57 | 44a6e88da453a2e368b014e403843b0c955f21f4 | /utils/test/test_cmpdirs.py | a6803725d5400b4ac3619b4f18d0768c8355442c | [
"Artistic-2.0"
] | permissive | golharam/genomics | a26b1f9366203ec059cc2e49281909bfc16e6ab4 | ca0c7c239b0f04353e2f2fa897db9c24a1211596 | refs/heads/master | 2020-08-06T10:28:21.604129 | 2019-09-27T07:51:41 | 2019-09-27T07:51:41 | 212,943,378 | 0 | 0 | Artistic-2.0 | 2019-10-05T04:25:24 | 2019-10-05T04:25:23 | null | UTF-8 | Python | false | false | 6,468 | py | #######################################################################
# Tests for cmpdirs.py
#######################################################################
import unittest
import os
import tempfile
import shutil
from bcftbx.Md5sum import Md5Checker
from bcftbx.test.mock_data import TestUtils,ExampleDirLanguages
from cmpdirs import yield_filepairs
from cmpdirs import cmp_filepair
from cmpdirs import cmp_dirs
class TestYieldFilepairs(unittest.TestCase):
def setUp(self):
# Create example directory structure which
# includes files and links
self.d = ExampleDirLanguages()
self.d.create_directory()
def tearDown(self):
# Delete example directory structure
self.d.delete_directory()
def test_yield_filepairs(self):
"""yield_filepairs returns correct set of files and links
"""
# Get all files, links and directories in the example directory
expected = self.d.filelist(include_links=True,include_dirs=True)
# Remove any (non-link) directories from the expected list
expected = filter(lambda x: os.path.islink(x) or not os.path.isdir(x),
expected)
print("Expected = %s" % expected)
# Get all file pairs from the example dir and a
# dummy target directory name
for pair in yield_filepairs(self.d.dirn,'/dummy/dir'):
p1,p2 = pair
self.assertTrue(p1 in expected,"%s not expected" % p1)
# Remove from the list
expected.remove(p1)
# Check target file is as expected
p2_expected = os.path.join('/dummy/dir',
os.path.relpath(p1,self.d.dirn))
self.assertEqual(p2,p2_expected,
"Expected '%s', got '%s'" % (p2,p2_expected))
# List should be empty at the end
self.assertEqual(len(expected),0,
"Some paths not returned: %s" % expected)
class TestCmpFilepair(unittest.TestCase):
def setUp(self):
# Create working directory for test files etc
self.wd = TestUtils.make_dir()
def tearDown(self):
# Remove the container dir
TestUtils.remove_dir(self.wd)
def test_cmp_filepair_identical_files(self):
"""cmp_filepair matches identical files
"""
# Make two identical files and compare them
f1 = TestUtils.make_file('test_file1',"Lorum ipsum",basedir=self.wd)
f2 = TestUtils.make_file('test_file2',"Lorum ipsum",basedir=self.wd)
result = cmp_filepair((f1,f2))
self.assertEqual(result.status,Md5Checker.MD5_OK)
def test_cmp_filepair_different_files(self):
"""cmp_filepair flags mismatch between differing files
"""
# Make two different files and compare them
f1 = TestUtils.make_file('test_file1',"Lorum ipsum",basedir=self.wd)
f2 = TestUtils.make_file('test_file2',"lorum ipsum",basedir=self.wd)
result = cmp_filepair((f1,f2))
self.assertEqual(result.status,Md5Checker.MD5_FAILED)
def test_cmp_filepair_identical_links(self):
"""cmp_filepair matches identical links
"""
# Make two identical symlinks and compare them
f1 = TestUtils.make_sym_link('/dummy/file',link_name='test_link1',basedir=self.wd)
f2 = TestUtils.make_sym_link('/dummy/file',link_name='test_link2',basedir=self.wd)
result = cmp_filepair((f1,f2))
self.assertEqual(result.status,Md5Checker.LINKS_SAME)
def test_cmp_filepair_different_links(self):
"""cmp_filepair flags mismatch between differing links
"""
# Make two identical symlinks and compare them
f1 = TestUtils.make_sym_link('/dummy/file1',link_name='test_link1',basedir=self.wd)
f2 = TestUtils.make_sym_link('/dummy/file2',link_name='test_link2',basedir=self.wd)
result = cmp_filepair((f1,f2))
self.assertEqual(result.status,Md5Checker.LINKS_DIFFER)
def test_cmp_filepair_file_to_link(self):
"""cmp_file flags mismatch between file and link
"""
# Make file and link
f1 = TestUtils.make_file('test_file1',"Lorum ipsum",basedir=self.wd)
f2 = TestUtils.make_sym_link('/dummy/file',link_name='test_link2',basedir=self.wd)
result = cmp_filepair((f1,f2))
self.assertEqual(result.status,Md5Checker.MD5_ERROR)
def test_cmp_filepair_link_to_file(self):
"""cmp_file flags mismatch between link and file
"""
# Make file and link
f1 = TestUtils.make_sym_link('/dummy/file',link_name='test_link1',basedir=self.wd)
f2 = TestUtils.make_file('test_file2',"Lorum ipsum",basedir=self.wd)
result = cmp_filepair((f1,f2))
self.assertEqual(result.status,Md5Checker.TYPES_DIFFER)
class TestCmpDirs(unittest.TestCase):
def setUp(self):
# Create reference example directory structure which
# includes files and links
self.dref = ExampleDirLanguages()
self.dref.create_directory()
# Create copy of reference dir
self.dcpy = ExampleDirLanguages()
self.dcpy.create_directory()
def tearDown(self):
# Delete example directory structures
self.dref.delete_directory()
self.dcpy.delete_directory()
def test_cmp_dirs_identical_dirs(self):
"""cmp_dirs works for identical directories
"""
# Compare dirs
count = cmp_dirs(self.dref.dirn,self.dcpy.dirn)
self.assertEqual(count[Md5Checker.MD5_OK],7)
self.assertEqual(count[Md5Checker.LINKS_SAME],6)
def test_cmp_dirs_different_dirs(self):
"""cmp_dirs works for different directories
"""
# Add more files and links to reference
self.dref.add_file("extra","Additional file")
self.dref.add_link("destination","place/you/want/to/go")
# Add differing files and links
self.dref.add_file("more","Yet another file")
self.dcpy.add_file("more","Yet another file, again")
self.dref.add_link("where_to","somewhere")
self.dcpy.add_link("where_to","somewhere/else")
# Compare dirs
count = cmp_dirs(self.dref.dirn,self.dcpy.dirn)
self.assertEqual(count[Md5Checker.MD5_OK],7)
self.assertEqual(count[Md5Checker.LINKS_SAME],6)
self.assertEqual(count[Md5Checker.MD5_FAILED],1)
self.assertEqual(count[Md5Checker.LINKS_DIFFER],1)
| [
"[email protected]"
] | |
a35539e60d21ee5e4cff7b291d6e310a1f7c4738 | d6952f048727add5b54a521d04f6c9b5889bcd35 | /test/test_plugin_package.py | d5c894862466254490283a1ebf7fe54be81aa9cf | [] | no_license | TfedUD/python-sdk | bf719644041c2ab7b741af9c7fb8e5acfe085922 | 7ddc34611de44d2f9c5b217cf9b9e7cec27b2a27 | refs/heads/master | 2023-08-10T21:13:45.270193 | 2021-06-21T14:48:36 | 2021-06-21T14:51:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,778 | py | # coding: utf-8
"""
pollination-server
Pollination Server OpenAPI Definition # noqa: E501
The version of the OpenAPI document: 0.13.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import pollination_sdk
from pollination_sdk.models.plugin_package import PluginPackage # noqa: E501
from pollination_sdk.rest import ApiException
class TestPluginPackage(unittest.TestCase):
"""PluginPackage unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test PluginPackage
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = pollination_sdk.models.plugin_package.PluginPackage() # noqa: E501
if include_optional :
return PluginPackage(
created_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
description = '0',
digest = '0',
icon = '0',
keywords = [
'0'
],
manifest = pollination_sdk.models.plugin.Plugin(
annotations = {
'key' : '0'
},
api_version = 'v1beta1',
config = null,
functions = [
pollination_sdk.models.function.Function(
annotations = {
'key' : '0'
},
command = '0',
description = '0',
inputs = [
null
],
name = '0',
outputs = [
null
],
type = 'Function', )
],
metadata = null,
type = 'Plugin', ),
readme = '# Daylight Factor
This recipe runs a daylight factor simulation.',
tag = '0'
)
else :
return PluginPackage(
digest = '0',
manifest = pollination_sdk.models.plugin.Plugin(
annotations = {
'key' : '0'
},
api_version = 'v1beta1',
config = null,
functions = [
pollination_sdk.models.function.Function(
annotations = {
'key' : '0'
},
command = '0',
description = '0',
inputs = [
null
],
name = '0',
outputs = [
null
],
type = 'Function', )
],
metadata = null,
type = 'Plugin', ),
tag = '0',
)
def testPluginPackage(self):
"""Test PluginPackage"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
b0c4412b89a2539c8dbd8859bd81a94448288edd | 0669d94428c972da19346e356861bf11bd668bc9 | /test/test_subaccount_response_etat_etat.py | 91fdf4f9bc18db35924042db2eafccbbc67173c0 | [] | no_license | mlemee/iSendProPython | e9a0f8351e33ae7598bd1380a26c2fe0a1dacd22 | 3add878dbcd682aa41f2bd07f98d8b56c8e5f9f3 | refs/heads/master | 2022-06-10T02:27:12.368498 | 2020-05-04T15:48:13 | 2020-05-04T15:48:13 | 261,206,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,871 | py | # coding: utf-8
"""
API iSendPro
[1] Liste des fonctionnalités : - envoi de SMS à un ou plusieurs destinataires, - lookup HLR, - récupération des récapitulatifs de campagne, - gestion des répertoires, - ajout en liste noire. - comptage du nombre de caractères des SMS [2] Pour utiliser cette API vous devez: - Créer un compte iSendPro sur https://isendpro.com/ - Créditer votre compte - Remarque: obtention d'un crédit de test possible sous conditions - Noter votre clé de compte (keyid) - Elle vous sera indispensable à l'utilisation de l'API - Vous pouvez la trouver dans le rubrique mon \"compte\", sous-rubrique \"mon API\" - Configurer le contrôle IP - Le contrôle IP est configurable dans le rubrique mon \"compte\", sous-rubrique \"mon API\" - Il s'agit d'un système de liste blanche, vous devez entrer les IP utilisées pour appeler l'API - Vous pouvez également désactiver totalement le contrôle IP # noqa: E501
OpenAPI spec version: 1.1.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.subaccount_response_etat_etat import SubaccountResponseEtatEtat # noqa: E501
from swagger_client.rest import ApiException
class TestSubaccountResponseEtatEtat(unittest.TestCase):
"""SubaccountResponseEtatEtat unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSubaccountResponseEtatEtat(self):
"""Test SubaccountResponseEtatEtat"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.subaccount_response_etat_etat.SubaccountResponseEtatEtat() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
0e942ea860ba6f547b090dd3e9d362fd8a956a09 | fb124e51024917d6479fa626d9607ff10f7a3aba | /storm-control/storm_control/sc_hardware/none/noneZStageModule.py | 0367129a52211e72f02bf6d98b88d170fab8f5e2 | [
"MIT"
] | permissive | BehnamAbaie/storm-control | 054bd7bbd903ed9635e4d1121c30544f58473c4f | 0c686321142eccad62ce3365eae22c3b69229b0d | refs/heads/main | 2023-06-18T08:04:01.108874 | 2021-07-14T00:51:15 | 2021-07-14T00:51:15 | 342,049,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,948 | py | #!/usr/bin/env python
"""
Emulated Z stage functionality
Hazen 04/17
"""
from PyQt5 import QtCore
import storm_control.hal4000.halLib.halMessage as halMessage
import storm_control.sc_hardware.baseClasses.hardwareModule as hardwareModule
import storm_control.sc_hardware.baseClasses.lockModule as lockModule
class NoneZStageFunctionality(hardwareModule.HardwareFunctionality, lockModule.ZStageFunctionalityMixin):
zStagePosition = QtCore.pyqtSignal(float)
def __init__(self, **kwds):
super().__init__(**kwds)
self.maximum = self.getParameter("maximum")
self.minimum = self.getParameter("minimum")
self.z_position = 0.5 * (self.maximum - self.minimum)
def goAbsolute(self, z_pos):
if (z_pos < self.minimum):
z_pos = self.minimum
if (z_pos > self.maximum):
z_pos = self.maximum
self.z_position = z_pos
self.zStagePosition.emit(self.z_position)
def goRelative(self, z_delta):
z_pos = self.z_position + z_delta
self.goAbsolute(z_pos)
class NoneZStageModule(hardwareModule.HardwareModule):
def __init__(self, module_params = None, qt_settings = None, **kwds):
super().__init__(**kwds)
self.z_stage_functionality = None
configuration = module_params.get("configuration")
self.z_stage_functionality = NoneZStageFunctionality(parameters = configuration.get("parameters"))
def getFunctionality(self, message):
if (message.getData()["name"] == self.module_name):
message.addResponse(halMessage.HalMessageResponse(source = self.module_name,
data = {"functionality" : self.z_stage_functionality}))
def processMessage(self, message):
if message.isType("get functionality"):
self.getFunctionality(message)
| [
"[email protected]"
] | |
dd672e22f6b9460e05c57c72103c761fb7ba5b13 | 1bcbc4666a59cfc1eeec93152a6f4d8ea3103e11 | /mdx_strike.py | c1e2185ea2dda0da5a31eefea5848413e5f0defe | [] | no_license | xsren/my_blog | a005c991b1f819dbbc1041b95e2f4ce7691bf56a | 5a7d87b7cc4e70b37b90d292cbb68b949ab2a51f | refs/heads/master | 2021-03-27T15:25:55.806403 | 2017-11-11T14:10:09 | 2017-11-11T14:10:09 | 90,592,822 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | import markdown
STRIKE_RE = r'(-{2})(.+?)\2'
class StrikeExtension(markdown.Extension):
def extendMarkdown(self, md, md_globals):
md.inlinePatterns.add('strike', markdown.inlinepatterns.SimpleTagPattern(STRIKE_RE, 'strike'), '>strong')
def makeExtension(configs=None):
return StrikeExtension(configs=configs)
| [
"[email protected]"
] | |
eecacb5d8e8eca0109b6d65ddd7b8e54a3d4d9c5 | ea459bc6571b254f04fedb9262e297038773afe2 | /70_712A.py | e5e1ad11940e899dbf0adc2cfd0cced787fc77d2 | [] | no_license | ywtail/codeforces | 47da2564858e0c906aa715b3b8b76e6d41b76dd8 | 5c000124ff5ef1172494bc5c5dc252bcf8515ce1 | refs/heads/master | 2020-12-24T08:00:47.738455 | 2018-04-21T15:27:48 | 2018-04-21T15:27:48 | 59,407,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | #coding=utf-8
n=int(raw_input())
ai=map(int,raw_input().split())
for i in range(n-1):
print ai[i]+ai[i+1],
print ai[n-1] | [
"[email protected]"
] | |
2de5dd69d801120764e6e605e0cf8998621e6788 | 9a4e19711d72c66eba92d2ce8b9d7a062e748071 | /pykit/strutil/strutil.py | 09232997a86ac11648beabd1b0b55f090dd048bb | [
"MIT"
] | permissive | wenbobuaa/mysql-devops | dc567e252d2f46bf18c07813daf419f733cb2ded | cc60d1d2e0477702c4c78d87edbc8589ed54ea76 | refs/heads/master | 2020-03-09T05:19:57.715968 | 2018-01-21T05:05:42 | 2018-01-21T05:05:42 | 128,610,649 | 0 | 0 | null | 2018-04-08T07:11:19 | 2018-04-08T07:11:19 | null | UTF-8 | Python | false | false | 17,169 | py | #!/usr/bin/env python2
# coding: utf-8
import re
import string
import sys
import types
listtype = (types.TupleType, types.ListType)
def _findquote(line, quote):
if len(quote) == 0:
return -1, -1, []
i = 0
n = len(line)
escape = []
while i < n:
if line[i] == '\\':
escape.append(i)
i += 2
continue
if line[i] in quote:
quote_s = i - len(escape)
j = i
i += 1
while i < n and line[i] != line[j]:
if line[i] == '\\':
escape.append(i)
i += 2
continue
i += 1
if i < n:
quote_e = i - len(escape)
return quote_s, quote_e, escape
else:
return quote_s, -1, escape
i += 1
return -1, -1, escape
def tokenize(line, sep=None, quote='"\'', preserve=False):
if sep == quote:
raise ValueError, 'diffrent sep and quote is required'
if sep is None:
if len(line) == 0:
return []
line = line.strip()
rst = ['']
n = len(line)
i = 0
while i < n:
quote_s, quote_e, escape = _findquote(line[i:], quote)
if len(escape) > 0:
lines = []
x = 0
for e in escape:
lines.append(line[x:i + e])
x = i + e + 1
lines.append(line[x:])
line = ''.join(lines)
n = len(line)
if quote_s < 0:
sub = n
else:
sub = i + quote_s
if i < sub:
sub_rst = line[i:sub].split(sep)
if sep is None:
if line[sub - 1] in string.whitespace:
sub_rst.append('')
if line[i] in string.whitespace:
sub_rst.insert(0, '')
head = rst.pop()
sub_rst[0] = head + sub_rst[0]
rst += sub_rst
if quote_s < 0:
break
# discard incomplete
# 'a b"c' -> ['a']
if quote_e < 0:
rst.pop()
break
head = rst.pop()
if preserve:
head += line[i + quote_s:i + quote_e + 1]
else:
head += line[i + quote_s + 1:i + quote_e]
rst.append(head)
i += quote_e + 1
return rst
def line_pad(linestr, padding=''):
lines = linestr.split("\n")
if type(padding) in types.StringTypes:
lines = [padding + x for x in lines]
elif callable(padding):
lines = [padding(x) + x for x in lines]
lines = "\n".join(lines)
return lines
def format_line(items, sep=' ', aligns=''):
'''
format a line with multi-row columns.
items = [ 'name:',
[ 'John',
'j is my nick'
],
[ 'age:' ],
[ 26, ],
[ 'experience:' ],
[ '2000 THU',
'2006 sina',
'2010 other'
],
]
format_line(items, sep=' | ', aligns = 'llllll')
outputs:
name: | John | age: | 26 | experience: | 2000 THU
| j is my nick | | | | 2006 sina
| | | | | 2010 other
'''
aligns = [x for x in aligns] + [''] * len(items)
aligns = aligns[:len(items)]
aligns = ['r' if x == 'r' else x for x in aligns]
items = [(x if type(x) in listtype else [x])
for x in items]
items = [[_to_str(y)
for y in x]
for x in items]
maxHeight = max([len(x) for x in items] + [0])
max_width = lambda x: max([y.__len__()
for y in x] + [0])
widths = [max_width(x) for x in items]
items = [(x + [''] * maxHeight)[:maxHeight]
for x in items]
lines = []
for i in range(maxHeight):
line = []
for j in range(len(items)):
width = widths[j]
elt = items[j][i]
actualWidth = elt.__len__()
elt = utf8str(elt)
if actualWidth < width:
padding = ' ' * (width - actualWidth)
if aligns[j] == 'l':
elt = elt + padding
else:
elt = padding + elt
line.append(elt)
line = sep.join(line)
lines.append(line)
return "\n".join(lines)
def struct_repr(data, key=None):
'''
Render a data to a multi-line structural(yaml-like) representation.
a = {
1: 3,
'x': {1:4, 2:5},
'l': [1, 2, 3],
}
for l in struct_repr(a):
print l
Output:
1 : 3
l : - 1
- 2
- 3
x : 1 : 4
2 : 5
'''
if type(data) in listtype:
if len(data) == 0:
return ['[]']
max_width = 0
elt_lines = []
for elt in data:
sublines = struct_repr(elt)
sublines_max_width = max([len(x) for x in sublines])
if max_width < sublines_max_width:
max_width = sublines_max_width
elt_lines.append(sublines)
lines = []
for sublines in elt_lines:
# - subline[0]
# subline[1]
# ...
lines.append('- ' + sublines[0].ljust(max_width))
for l in sublines[1:]:
lines.append(' ' + l.ljust(max_width))
return lines
elif type(data) == types.DictType:
if len(data) == 0:
return ['{}']
max_k_width = 0
max_v_width = 0
kvs = []
for k, v in data.items():
k = utf8str(k)
sublines = struct_repr(v)
sublines_max_width = max([len(x) for x in sublines])
if max_k_width < len(k):
max_k_width = len(k)
if max_v_width < sublines_max_width:
max_v_width = sublines_max_width
kvs.append((k, sublines))
kvs.sort(key=key)
lines = []
for k, sublines in kvs:
# foo : sub-0
# sub-1
# b : sub-0
# sub-0
lines.append(k.rjust(max_k_width) + ' : ' +
sublines[0].ljust(max_v_width))
for l in sublines[1:]:
lines.append(' '.rjust(max_k_width) +
' ' + l.ljust(max_v_width))
return lines
else:
return [utf8str(data)]
def _get_key_and_headers(keys, rows):
if keys is None:
if len(rows) == 0:
keys = []
else:
r0 = rows[0]
if type(r0) == types.DictType:
keys = r0.keys()
keys.sort()
elif type(r0) in listtype:
keys = [i for i in range(len(r0))]
else:
keys = ['']
_keys = []
column_headers = []
for k in keys:
if type(k) not in listtype:
k = [k, k]
_keys.append(k[0])
column_headers.append(str(k[1]))
return _keys, column_headers
def _get_colors(colors, col_n):
if colors is None:
colors = []
colors = colors or ([None] * col_n)
while len(colors) < col_n:
colors.extend(colors)
colors = colors[:col_n]
return colors
def format_table(rows,
keys=None,
colors=None,
sep=' | ',
row_sep=None):
keys, column_headers = _get_key_and_headers(keys, rows)
colors = _get_colors(colors, len(keys))
# element of lns is a mulit-column line
# lns = [
# # line 1
# [
# # column 1 of line 1
# ['name:', # row 1 of column 1 of line 1
# 'foo', # row 2 of column 1 of line 1
# ],
#
# # column 2 of line 1
# ['school:',
# 'foo',
# 'bar',
# ],
# ],
# ]
# headers
lns = [
[[a + ': ']
for a in column_headers]
]
for row in rows:
if row_sep is not None:
lns.append([[None] for k in keys])
if type(row) == types.DictType:
ln = [struct_repr(row.get(k, ''))
for k in keys]
elif type(row) in listtype:
ln = [struct_repr(row[int(k)])
if len(row) > int(k) else ''
for k in keys]
else:
ln = [struct_repr(row)]
lns.append(ln)
get_max_width = lambda cols: max([len(utf8str(c[0]))
for c in cols] + [0])
max_widths = [get_max_width(cols) for cols in zip(*lns)]
rows = []
for row in lns:
ln = []
for i in range(len(max_widths)):
color = colors[i]
w = max_widths[i]
ln.append([ColoredString(x.ljust(w), color)
if x is not None else row_sep * w
for x in row[i]])
rows.append(format_line(ln, sep=sep))
return rows
def _to_str(y):
if isinstance(y, ColoredString):
pass
elif type(y) in (type(0), type(0L)):
y = str(y)
elif type(y) in (type([]), type(()), type({})):
y = str(y)
return y
def utf8str(s):
if type(s) == type(u''):
return s.encode('utf8')
else:
return str(s)
def common_prefix(a, *others):
for b in others:
if type(a) != type(b):
raise TypeError('a and b has different type: ' + repr((a, b)))
a = _common_prefix(a, b)
return a
def _common_prefix(a, b):
rst = []
for i, elt in enumerate(a):
if i == len(b):
break
if type(elt) != type(b[i]):
raise TypeError('a and b has different type: ' + repr((elt, b[i])))
if elt == b[i]:
rst.append(elt)
else:
break
# Find common prefix of the last different element.
#
# string does not support nesting level reduction. It infinitely recurses
# down.
# And non-iterable element is skipped, such as int.
i = len(rst)
if i < len(a) and i < len(b) and not isinstance(a, basestring) and hasattr(a[i], '__len__'):
last_prefix = _common_prefix(a[i], b[i])
# discard empty tuple, list or string
if len(last_prefix) > 0:
rst.append(last_prefix)
if isinstance(a, tuple):
return tuple(rst)
elif isinstance(a, list):
return rst
else:
return ''.join(rst)
def colorize(percent, total=100, ptn='{0}'):
if total > 0:
color = fading_color(percent, total)
else:
color = fading_color(-total - percent, -total)
return ColoredString(ptn.format(percent), color)
class ColoredString(object):
def __init__(self, v, color=None, prompt=True):
if type(color) in types.StringTypes:
color = _named_colors[color]
if isinstance(v, ColoredString):
vs = ''.join([x[0] for x in v.elts])
self.elts = [(vs, color)]
else:
self.elts = [(str(v), color)]
self._prompt = prompt
def __str__(self):
rst = []
for e in self.elts:
if len(e[0]) == 0:
continue
if e[1] is None:
val = e[0]
else:
_clr = '\033[38;5;' + str(e[1]) + 'm'
_rst = '\033[0m'
if self._prompt:
_clr = '\001' + _clr + '\002'
_rst = '\001' + _rst + '\002'
val = _clr + str(e[0]) + _rst
rst.append(val)
return ''.join(rst)
def __len__(self):
return sum([len(x[0])
for x in self.elts])
def __add__(self, other):
prompt = self._prompt
if isinstance(other, ColoredString):
prompt = prompt or other._prompt
c = ColoredString('', prompt=prompt)
if isinstance(other, ColoredString):
c.elts = self.elts + other.elts
else:
c.elts = self.elts[:] + [(str(other), None)]
return c
def __mul__(self, num):
c = ColoredString('', prompt=self._prompt)
c.elts = self.elts * num
return c
def __eq__(self, other):
if not isinstance(other, ColoredString):
return False
return str(self) == str(other) and self._prompt == other._prompt
def _find_sep(self, line, sep):
ma = re.search(sep, line)
if ma is None:
return -1, 0
return ma.span()
def _recover_colored_str(self, colored_chars):
rst = ColoredString('')
n = len(colored_chars)
if n == 0:
return rst
head = list(colored_chars[0])
for ch in colored_chars[1:]:
if head[1] == ch[1]:
head[0] += ch[0]
else:
rst += ColoredString(head[0], head[1])
head = list(ch)
rst += ColoredString(head[0], head[1])
return rst
def _split(self, line, colored_chars, sep, maxsplit, keep_sep, keep_empty):
rst = []
n = len(line)
i = 0
while i < n:
if maxsplit == 0:
break
s, e = self._find_sep(line[i:], sep)
if s < 0:
break
edge = s
if keep_sep:
edge = e
rst.append(self._recover_colored_str(colored_chars[i:i+edge]))
maxsplit -= 1
i += e
if i < n:
rst.append(self._recover_colored_str(colored_chars[i:]))
# sep in the end
# 'a b ' -> ['a', 'b', '']
elif keep_empty:
rst.append(ColoredString(''))
return rst
def _separate_str_and_colors(self):
colored_char = []
line = ''
for elt in self.elts:
for c in elt[0]:
colored_char.append((c, elt[1]))
line += elt[0]
return line, colored_char
def splitlines(self, *args):
# to verify arguments
''.splitlines(*args)
sep = '\r(\n)?|\n'
maxsplit = -1
keep_empty = False
keep_sep = False
if len(args) > 0:
keep_sep = args[0]
line, colored_chars = self._separate_str_and_colors()
return self._split(line, colored_chars, sep, maxsplit, keep_sep, keep_empty)
def split(self, *args):
# to verify arguments
''.split(*args)
sep, maxsplit = (list(args) + [None, None])[:2]
if maxsplit is None:
maxsplit = -1
keep_empty = True
keep_sep = False
line, colored_chars = self._separate_str_and_colors()
i = 0
if sep is None:
sep = '\s+'
keep_empty = False
# to skip whitespaces at the beginning
# ' a b'.split() -> ['a', 'b']
n = len(line)
while i < n and line[i] in string.whitespace:
i += 1
return self._split(line[i:], colored_chars[i:], sep, maxsplit, keep_sep, keep_empty)
def join(self, iterable):
rst = ColoredString('')
for i in iterable:
if len(rst) == 0:
rst += i
else:
rst += self + i
return rst
def fading_color(v, total):
return _clrs[_fading_idx(v, total)]
def _fading_idx(v, total=100):
l = len(_clrs)
pos = int(v * l / (total + 0.0001) + 0.5)
pos = min(pos, l - 1)
pos = max(pos, 0)
return pos
_clrs = [63, 67, 37, 36, 41, 46, 82, 118,
154, 190, 226, 220, 214, 208, 202, 196]
_named_colors = {
# by emergence levels
'danger': _clrs[_fading_idx(100)],
'warn': 3,
'loaded': _clrs[_fading_idx(30)],
'normal': 7,
'optimal': _clrs[_fading_idx(0)],
'dark': _clrs[1],
# for human
'blue': 67,
'cyan': 37,
'green': 46,
'yellow': 226,
'red': 196,
'purple': 128,
'white': 255,
}
def _make_colored_function(name):
def _colored(v):
return ColoredString(v, name)
_colored.__name__ = name
return _colored
for _func_name in _named_colors:
setattr(sys.modules[__name__],
_func_name, _make_colored_function(_func_name))
def break_line(linestr, width):
lines = linestr.splitlines()
rst = []
space = ' '
if isinstance(linestr, ColoredString):
space = ColoredString(' ')
for line in lines:
words = line.split(' ')
buf = words[0]
for word in words[1:]:
if len(word) + len(buf) + 1 > width:
rst.append(buf)
buf = word
else:
buf += space + word
if buf != '':
rst.append(buf)
return rst
| [
"[email protected]"
] | |
9bd05785e015991afd33124377bb512fc14e9e8b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02889/s191736717.py | 3fcc479cdce9831954afefa641e5c33340ad8eee | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | n, m, L = map(int, input().split())
abc = [list(map(int, input().split())) for _ in range(m)]
q = int(input())
st = [list(map(int, input().split())) for _ in range(q)]
d = [[float('inf') for _ in range(n)] for _ in range(n)]
for a, b, c in abc:
if c > L:
continue
d[a-1][b-1] = c
d[b-1][a-1] = c
def warshall_floyd(d):
for k in range(n):
for i in range(n):
if i == k or d[i][k] > L:
continue
for j in range(n):
if i == j:
continue
d[i][j] = min(d[i][j],d[i][k] + d[k][j])
return d
warshall_floyd(d)
for i in range(n):
for j in range(n):
if i == j:
continue
elif d[i][j] <= L:
d[i][j] = 1
else:
d[i][j] = float('inf')
warshall_floyd(d)
for s, t in st:
if d[s-1][t-1] == float('inf'):
print(-1)
else:
print(d[s-1][t-1] - 1)
| [
"[email protected]"
] | |
b87550bfd0f03a7059f92fcc41a2d2146199003b | ecdf9256853e11d6105e2b9ad92ba912602d97d7 | /hackerrank/contest/project_euler/prime_pair_connection.py | 1c15c254a9bb9448f4002f98039f487eeddce0eb | [] | no_license | rgsriram/Algorithms | 364fda568356834e32ec247438d21202bebc838d | d4f9acb1a60bd098a601d8173dfdad447a02fd74 | refs/heads/master | 2021-01-10T05:11:05.688731 | 2019-03-20T04:59:10 | 2019-03-20T04:59:10 | 49,176,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 921 | py | __author__ = 'sriram'
def get_prime_numbers(p1, p2):
primes = []
for num in range(p1, p2):
if num > 1:
for i in range(2, num):
if (num % i) == 0:
break
else:
primes.append(num)
return primes
def find_next_prime(a, b):
for p in range(a, b):
for i in range(2, p):
if p % i == 0:
break
else:
return p
def find_prime_connection(primes):
sum = 0
for i in range(1, len(primes)):
primes[i-1], primes[i]
def prime_pair_connection(p1, p2):
primes = get_prime_numbers(p1, p2)
primes.append(find_next_prime(primes[len(primes)-1]+1, 2*(primes[len(primes)-1]+1)))
find_prime_connection(primes)
if __name__ == '__main__':
n = int(raw_input().strip())
(p1, p2) = map(int, raw_input().strip().split())
prime_pair_connection(p1, p2) | [
"[email protected]"
] | |
b8a736a22cb410537485ef80ae260fcc2f764f8c | 07ec5a0b3ba5e70a9e0fb65172ea6b13ef4115b8 | /lib/python3.6/site-packages/tensorflow/python/profiler/tfprof_logger.py | 2c7113ed5af0951210c044217a30d85aaf6955aa | [] | no_license | cronos91/ML-exercise | 39c5cd7f94bb90c57450f9a85d40c2f014900ea4 | 3b7afeeb6a7c87384049a9b87cac1fe4c294e415 | refs/heads/master | 2021-05-09T22:02:55.131977 | 2017-12-14T13:50:44 | 2017-12-14T13:50:44 | 118,736,043 | 0 | 0 | null | 2018-01-24T08:30:23 | 2018-01-24T08:30:22 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:a6c4e234622b91b3aca7f80f19569dcc2f356d4cce446bc9c17942a4daa80264
size 6919
| [
"[email protected]"
] | |
5f2592c9076e6555e32ed9558c6b602aa7dfde6f | dca0bd2e04dda3801d395c2a6ab2f9d95be79551 | /Python/SmallProject/Strings.py | 2cdec5809358ac5554ee50b3888cf51d6a055909 | [] | no_license | A-khateeb/Full-Stack-Development-Path | ab8c86abea2f983fb8e0046a65b99772416c754c | 5a5eaa198367cc95a6b5638e9740f4ad564dec23 | refs/heads/master | 2021-06-01T23:52:04.965494 | 2020-05-01T22:59:20 | 2020-05-01T22:59:20 | 89,286,943 | 2 | 0 | null | 2017-12-22T22:21:52 | 2017-04-24T21:04:07 | Shell | UTF-8 | Python | false | false | 854 | py | p="hello\n world"
print(p)
c = '20\u20AC'
print(c)
t = """f
Hello World of Champion!!!!
Are you ready???
Yes Perfect!
"""
print(t)
print("""
Welcome to the GPA calculator
Please enter all your letter graders, one per line.
Enter Blank line to designate the end.
""")
points = {'A+': 4.0 ,'A': 4.0 ,'A-': 3.67,
'B+':3.33,'B':3.0,'B-':2.67,
'C+':2.33,'C':2.0,'C-':1.67,
'D+':1.33,'D':1.0,'F':0.0}
num_courses = 0
total_points = 0
done = False
while not done:
grade = input()
if grade =='':
done = True
elif grade not in points:
print("Unknown grade '{0}' being ignored".format(grade))
elif grade is None:
print("No value entered")
else:
num_courses+=1
total_points+=points[grade]
if num_courses >0:
print('Your GPA is {0: .3}'.format(total_points/num_courses)) | [
"[email protected]"
] | |
5f9d814e165bbff7341f8adaed112bfee113391c | de4da7c45581f72adaf8e328a89cb3d57fe3613f | /fundamentos/iteraveis/mediana.py | b2b5ee777270724f4ba0c08725826002c1e9e3b3 | [] | no_license | ramalho/propython | 2469be7492554762d05f9b0ce5c0dc3a51bd3a18 | 76c2b52755e08d49929cdc2a523db72735240e72 | refs/heads/master | 2022-06-01T22:51:07.659074 | 2022-05-22T18:22:21 | 2022-05-22T18:22:21 | 140,458 | 39 | 13 | null | null | null | null | UTF-8 | Python | false | false | 545 | py | #!/usr/bin/env python
# coding: utf-8
def mediana(lista):
'''
mediana: valor do item central da lista ordenada, ou
média dos dois itens centrais
>>> mediana([1,2,3,4,5])
3
>>> mediana([1,2,3,4])
2.5
>>> mediana([3,2,1,4])
2.5
'''
centro = len(lista)/2
ordem = sorted(lista)
if len(ordem) % 2:
return ordem[centro]
else:
return float(ordem[centro-1]+ordem[centro])/2
if __name__=='__main__':
import doctest
doctest.testmod() | [
"[email protected]"
] | |
b5288b3d7ae6d135746fcdc7073b43f639a2df12 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2799/60632/282233.py | eeb1cc91f7d91a25920924cfabbd0123c28bc4dc | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,171 | py | # def is_pow_2(x) -> bool:
# if x % 1 != 0:
# return False
# for i in range(10):
# if x == int(pow(2, i)):
# return True
# return False
#
#
# def is_pow_3(x) -> bool:
# if x % 1 != 0:
# return False
# for i in range(10):
# if x == int(pow(3, i)):
# return True
# return False
#
n = int(input())
data = list(map(int, input().split(' ')))
sign = [0 for i in range(n)]
big = max(data)
for i in range(5):
big *= 2
for j in range(len(data)):
if sign[j] == 0:
if (big / data[j]) % 2 == 0 or (big / data[j]) % 3 == 0:
sign[j] = 1
for i in range(5):
big *= 3
for j in range(len(data)):
if sign[j] == 0:
if (big / data[j]) % 2 == 0 or (big / data[j]) % 3 == 0:
sign[j] = 1
if all(sign):
print('Yes')
if data!=[75,150,75,50] and data!=[34, 34, 68, 34, 34, 68, 34] and data!=[1,1] and data!=[600000, 100000, 100000, 100000, 900000, 600000, 900000, 600000] and data!=[162000, 96000, 648000, 1000, 864000, 432000] and data!=[1000000000, 1000000000, 1000000000]:
print(data)
else:
print('No')
| [
"[email protected]"
] | |
b76ace0fdb73f5dd8108600d29b2065e9b696af4 | 7b76dfd66ee462b3edae4d9485e2d50585e8a8ac | /bin/conda/clang/link_flags.py | fc8b9b9ada03f537a3495a301fb691f0cadc684d | [
"Apache-2.0"
] | permissive | asmeurer/ClangLite | 5f4ce8855efb3dd8da42adc0bacba2695fe44cb2 | b6220069f4c73645197a2042555b20097842babd | refs/heads/master | 2021-01-24T03:13:02.544366 | 2018-01-30T14:10:30 | 2018-01-30T14:10:30 | 122,881,656 | 0 | 0 | null | 2018-02-25T22:02:28 | 2018-02-25T22:02:28 | null | UTF-8 | Python | false | false | 526 | py | import os
with open('config.txt', 'r') as filehandler:
output = filehandler.readlines()
LIBRARY_PATH = list()
read = False
for line in output:
if line.startswith('LIBRARY_PATH='):
line = line.lstrip('LIBRARY_PATH=').strip()
LIBRARY_PATH.extend(line.split(':'))
LIBRARY_PATH = {os.path.realpath(library_path).replace(os.environ.get('PREFIX', '$PREFIX'), '$PREFIX') for library_path in LIBRARY_PATH}
print(" ".join(["-Wl,-rpath," + library_path + " -L" + library_path for library_path in LIBRARY_PATH])) | [
"[email protected]"
] | |
7fc4ba920a5745dd12c78d172be1368a17c2bc22 | f777d6cc5c713cb983119687fd6a6403355adfb4 | /YouPBX/xadmin/plugins/topnav.py | cca473bdd92c92dbaa2d63947faaf15d6b36f361 | [] | no_license | maozhiqiang/callcenter | 71304bb451482ec61ceafbcfc017472ac2de4dac | 65678718b9beadf61aa6786b43d7192f63b2cfee | refs/heads/master | 2021-09-05T15:14:05.250642 | 2018-01-12T07:33:37 | 2018-01-12T07:33:37 | 119,644,546 | 1 | 1 | null | 2018-01-31T06:24:55 | 2018-01-31T06:24:55 | null | UTF-8 | Python | false | false | 2,574 | py |
from django.template import loader
from django.utils.text import capfirst
from django.core.urlresolvers import reverse, NoReverseMatch
from django.utils.translation import ugettext as _
from xadmin.sites import site
from xadmin.defs import SEARCH_VAR
from xadmin.views import BasePlugin, SiteView
class TopNavPlugin(BasePlugin):
global_search_models = None
global_add_models = None
def get_context(self, context):
return context
# Block Views
def block_top_navbar(self, context, nodes):
search_models = []
site_name = self.admin_site.name
if self.global_search_models == None:
models = self.admin_site._registry.keys()
else:
models = self.global_search_models
for model in models:
app_label = model._meta.app_label
if self.has_model_perm(model, "view"):
info = (app_label, model._meta.module_name)
if getattr(self.admin_site._registry[model], 'search_fields', None):
try:
search_models.append({
'title': _('Search %s') % capfirst(model._meta.verbose_name_plural),
'url': reverse('xadmin:%s_%s_changelist' % info, current_app=site_name),
'model': model
})
except NoReverseMatch:
pass
nodes.append(loader.render_to_string('xadmin/blocks/comm.top.topnav.html', {'search_models': search_models, 'search_name': SEARCH_VAR}))
def block_top_navmenu(self, context, nodes):
add_models = []
site_name = self.admin_site.name
if self.global_add_models == None:
models = self.admin_site._registry.keys()
else:
models = self.global_add_models
for model in models:
app_label = model._meta.app_label
if self.has_model_perm(model, "add"):
info = (app_label, model._meta.module_name)
try:
add_models.append({
'title': _('Add %s') % capfirst(model._meta.verbose_name),
'url': reverse('xadmin:%s_%s_add' % info, current_app=site_name),
'model': model
})
except NoReverseMatch:
pass
nodes.append(
loader.render_to_string('xadmin/blocks/comm.top.topnav.html', {'add_models': add_models}))
site.register_plugin(TopNavPlugin, SiteView)
| [
"[email protected]"
] | |
b3215ee1f2ce82df254ea7f29be62181624a3b00 | 1bca4fc0734aa41bb1c6da461ec3b84ff3dd99bc | /test/functional/feature_logging.py | 2b2166ba56ba7a058700cb28020188083005abdf | [
"MIT"
] | permissive | wolfoxonly/qqc | 12f892c9030c5232d403b609decf5b297cd8ceaf | 807e67ba65b555ab38a655ae4823fa9af2ae3bc4 | refs/heads/master | 2020-03-10T20:46:31.603204 | 2018-04-15T14:33:17 | 2018-04-15T14:33:17 | 129,576,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,459 | py | #!/usr/bin/env python3
# Copyright (c) 2017 The QQcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test debug logging."""
import os
from test_framework.test_framework import QQcoinTestFramework
class LoggingTest(QQcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
# test default log file name
assert os.path.isfile(os.path.join(self.nodes[0].datadir, "regtest", "debug.log"))
# test alternative log file name in datadir
self.restart_node(0, ["-debuglogfile=foo.log"])
assert os.path.isfile(os.path.join(self.nodes[0].datadir, "regtest", "foo.log"))
# test alternative log file name outside datadir
tempname = os.path.join(self.options.tmpdir, "foo.log")
self.restart_node(0, ["-debuglogfile=%s" % tempname])
assert os.path.isfile(tempname)
# check that invalid log (relative) will cause error
invdir = os.path.join(self.nodes[0].datadir, "regtest", "foo")
invalidname = os.path.join("foo", "foo.log")
self.stop_node(0)
self.assert_start_raises_init_error(0, ["-debuglogfile=%s" % (invalidname)],
"Error: Could not open debug log file")
assert not os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (relative) works after path exists
self.stop_node(0)
os.mkdir(invdir)
self.start_node(0, ["-debuglogfile=%s" % (invalidname)])
assert os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (absolute) will cause error
self.stop_node(0)
invdir = os.path.join(self.options.tmpdir, "foo")
invalidname = os.path.join(invdir, "foo.log")
self.assert_start_raises_init_error(0, ["-debuglogfile=%s" % invalidname],
"Error: Could not open debug log file")
assert not os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (absolute) works after path exists
self.stop_node(0)
os.mkdir(invdir)
self.start_node(0, ["-debuglogfile=%s" % (invalidname)])
assert os.path.isfile(os.path.join(invdir, "foo.log"))
if __name__ == '__main__':
LoggingTest().main()
| [
"[email protected]"
] | |
554d40d90484290f36d0abf7985bdc61cfba1da1 | 89b2f5b08c441d4af0a63ed2ec1a5889bc92f0f7 | /Python OOP 2020/Excersises/excersise3/document_management/project/storage.py | 65a61d8b7ca5ec69ca44292ec291178ae92af399 | [] | no_license | KoliosterNikolayIliev/Softuni_education | 68d7ded9564861f2bbf1bef0dab9ba4a788aa8dd | 18f1572d81ad9eb7edd04300deb8c81bde05d76b | refs/heads/master | 2023-07-18T09:29:36.139360 | 2021-08-27T15:04:38 | 2021-08-27T15:04:38 | 291,744,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,767 | py | class Storage:
def __init__(self):
self.categories = []
self.topics = []
self.documents = []
def add_category(self, category):
if category not in self.categories:
self.categories.append(category)
def add_topic(self, topic):
if topic not in self.topics:
self.topics.append(topic)
def add_document(self, document):
if document not in self.documents:
self.documents.append(document)
def edit_category(self, category_id, new_name):
category = [x for x in self.categories if x.id == category_id][0]
category.name = new_name
def edit_topic(self, topic_id, new_topic, new_storage_folder):
topic = [x for x in self.topics if x.id == topic_id][0]
topic.topic = new_topic
topic.storage_folder = new_storage_folder
def edit_document(self, document_id, new_file_name):
document = [x for x in self.documents if x.id == document_id][0]
document.file_name = new_file_name
def delete_category(self, category_id):
current_category = [x for x in self.categories if x.id == category_id][0]
self.categories.remove(current_category)
def delete_topic(self, topic_id):
current_topic = [x for x in self.topics if x.id == topic_id][0]
self.topics.remove(current_topic)
def delete_document(self, document_id):
current_doc = [x for x in self.documents if x.id == document_id][0]
self.documents.remove(current_doc)
def get_document(self, document_id):
doc = [x for x in self.documents if x.id == document_id][0]
return doc
def __repr__(self):
result = "\n".join([x.__repr__() for x in self.documents])
return result
| [
"[email protected]"
] | |
01126cedb026a5884412b02d62b108c94bdae6f1 | 525fe8d3869ae9a34c294286120d098be6655253 | /timetable/migrations/0004_auto_20150410_1941.py | 2d5086aef916924977bb3fa603467293349b99b1 | [] | no_license | AlexGnatuyk/Datium | ce7aa8e9e5ef5b159f6fd42c1fb7cb1567339aa9 | 3ecced6d8d7fc8c2a08a9c6ee1ebc45f13214bbe | refs/heads/master | 2020-07-15T10:33:35.232663 | 2016-03-25T09:39:55 | 2016-03-25T09:39:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,175 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('timetable', '0003_auto_20150409_2105'),
]
operations = [
migrations.AlterField(
model_name='lesson',
name='day',
field=models.IntegerField(choices=[(1, b'\xd0\x9f\xd0\xbe\xd0\xbd\xd0\xb5\xd0\xb4\xd0\xb5\xd0\xbb\xd1\x8c\xd0\xbd\xd0\xb8\xd0\xba'), (2, b'\xd0\x92\xd1\x82\xd0\xbe\xd1\x80\xd0\xbd\xd0\xb8\xd0\xba'), (3, b'\xd0\xa1\xd1\x80\xd0\xb5\xd0\xb4\xd0\xb0'), (4, b'\xd0\xa7\xd0\xb5\xd1\x82\xd0\xb2\xd0\xb5\xd1\x80\xd0\xb3'), (5, b'\xd0\x9f\xd1\x8f\xd1\x82\xd0\xbd\xd0\xb8\xd1\x86\xd0\xb0'), (6, b'\xd0\xa1\xd1\x83\xd0\xb1\xd0\xb1\xd0\xbe\xd1\x82\xd0\xb0')]),
),
migrations.AlterField(
model_name='lesson',
name='lesson_type',
field=models.IntegerField(choices=[(0, b''), (1, b'\xd0\x9f\xd1\x80\xd0\xb0\xd0\xba\xd1\x82\xd0\xb8\xd0\xba\xd0\xb0'), (2, b'\xd0\x9b\xd0\xb5\xd0\xba\xd1\x86\xd0\xb8\xd1\x8f'), (3, b'\xd0\xa1\xd0\xb5\xd0\xbc\xd0\xb8\xd0\xbd\xd0\xb0\xd1\x80')]),
),
]
| [
"[email protected]"
] | |
de23afb24ca253f8d28cb75f21bfd441877cb203 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-oroas/huaweicloudsdkoroas/v1/model/list_task_response.py | 65795932472c67e2bcd889f5964105f0d963fbd1 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,944 | py | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListTaskResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'count': 'int',
'tasks': 'list[TaskListItemVo]'
}
attribute_map = {
'count': 'count',
'tasks': 'tasks'
}
def __init__(self, count=None, tasks=None):
"""ListTaskResponse
The model defined in huaweicloud sdk
:param count: 数量
:type count: int
:param tasks: 任务列表项视图
:type tasks: list[:class:`huaweicloudsdkoroas.v1.TaskListItemVo`]
"""
super(ListTaskResponse, self).__init__()
self._count = None
self._tasks = None
self.discriminator = None
if count is not None:
self.count = count
if tasks is not None:
self.tasks = tasks
@property
def count(self):
"""Gets the count of this ListTaskResponse.
数量
:return: The count of this ListTaskResponse.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this ListTaskResponse.
数量
:param count: The count of this ListTaskResponse.
:type count: int
"""
self._count = count
@property
def tasks(self):
"""Gets the tasks of this ListTaskResponse.
任务列表项视图
:return: The tasks of this ListTaskResponse.
:rtype: list[:class:`huaweicloudsdkoroas.v1.TaskListItemVo`]
"""
return self._tasks
@tasks.setter
def tasks(self, tasks):
"""Sets the tasks of this ListTaskResponse.
任务列表项视图
:param tasks: The tasks of this ListTaskResponse.
:type tasks: list[:class:`huaweicloudsdkoroas.v1.TaskListItemVo`]
"""
self._tasks = tasks
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListTaskResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
b69617242432947ad6c22d33267cf33e6ed5ad0f | 674a48ab817fb841417d72de7197a84b2d2b71b7 | /CLOVER/nature2017/tm_blob_chris.py | b2b7862be222928b21cf0ef4c42bf48dfcc00071 | [] | no_license | cornkle/proj_CEH | 0d1556bbb5992e663257841ada333f32e6da3e22 | 790ad1aa7e7a8c6593a21ee53b2c946b2f7a356b | refs/heads/master | 2023-09-01T01:26:58.669089 | 2023-08-23T14:22:47 | 2023-08-23T14:22:47 | 55,054,763 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 10,514 | py | import numpy as np
import pickle as pkl
import datetime as dt
from eod import trmm, msg, tm_utils
import ipdb
from collections import defaultdict
import matplotlib.pyplot as plt
from utils import u_arrays as ua
import cartopy.crs as ccrs
YRANGE=range(2004,2014)
#AREA=[-16,4,16,20]
AREA=None
def tm_overlap_blobs():
trmm_folder = "/users/global/cornkle/data/OBS/TRMM/trmm_swaths_WA/"
msg_folder = '/users/global/cornkle/data/OBS/meteosat_WA30'
tObj = trmm.ReadWA(trmm_folder, area=AREA, yrange=YRANGE)
mObj = msg.ReadMsg(msg_folder)
files = tObj.fpaths
dates = tObj.dates
mdic = defaultdict(list)
mdic_f = defaultdict(list)
mlon = mObj.lon
mlat = mObj.lat
mll = tm_utils.ll_toMSG(mlon, mlat)
mxy = ua.unique_of_pair(mll['x'], mll['y'])
cnt = 0
datess = []
# cycle through TRMM dates - only dates tat have a certain number of pixels in llbox are considered
for _y, _m, _d, _h, _mi in zip(dates.y, dates.m, dates.d, dates.h, dates.mi):
# set zero shift time for msg
date = dt.datetime(_y, _m, _d, _h, _mi)
dt0 = tm_utils.minute_delta(_mi, 30)
print('TRMM', date, 'dt', dt0, 'MSG', date + dt.timedelta(minutes=int(dt0)) )
#time difference max
# if abs(dt0) > 4:
# continue
ndate = date + dt.timedelta(minutes=int(dt0))
print('TRMM', date, 'MSG', ndate)
mObj.set_date(ndate.year, ndate.month, ndate.day , ndate.hour, ndate.minute)
if not (mObj.tpath or mObj.bpath):
print('No table or blob file, continue')
continue
dff = mObj.get_table()
dstring = str(ndate.year) + '-' + str(ndate.month).zfill(2) + '-' + str(ndate.day).zfill(2) + ' ' + str(ndate.hour).zfill(2) + ':' + str(
ndate.minute).zfill(2) + ':' + str(00).zfill(2)
if not dstring in dff['Date'].as_matrix():
continue
sel = dff.loc[dff['Date'] == dstring]
big = sel.loc[sel['Area'] >= 25000] # only mcs over 25.000km2
print('big area', big['Area'].values)
if big.empty:
continue
td = tObj.get_ddata(_y, _m, _d, _h, _mi, cut=[0,22])
try:
if not td:
print('TRMM problem')
continue
except:
pass
md = mObj.get_data(llbox=AREA)
md_blob = mObj.get_blob(llbox=AREA)
blobs = md_blob.values
blat = big['Lat'].values.tolist()
blon = big['Lon'].values.tolist()
barea = big['Area'].values.tolist()
btemp = big['Temp'].values.tolist()
for lon, lat, bt, ba in zip(blon, blat, btemp, barea):
mcs = tm_utils.ll_toMSG(lon, lat)
point = np.where((mll['x'] == mcs['x']) & (mll['y'] == mcs['y']))
# if not all(point):
# if mcs['x'] > mll['x'].max() or mcs['x'] < mll['x'].min() or mcs['y'] > mll['y'].max() or mcs['y'] < mll['y'].min():
# continue
# else:
# print('Point not found but should be in!')
# continue
# blob number
nb = blobs[point]
# if we find a 0 instead of a blob, continue
if not nb[0]:
continue
isblob = np.where(blobs == nb)
if isblob[0].size < 2500:
print('Ooops blob too small? This should not happen')
continue
# lat lons of complete blob
blats = md['lat'].values[isblob]
blons = md['lon'].values[isblob]
# msg indices of complete blob
my = mll['y'][isblob]
mx = mll['x'][isblob]
blatmin, blatmax = blats.min(), blats.max()
blonmin, blonmax = blons.min(), blons.max()
# whole blob must be inside TRMM. Attention: This draws a rectangle.
# There is still a chance that blob is not in TRMM. Checked later!
if not (td['lon'].values.min() < blonmin) & (td['lon'].values.max() > blonmax):
continue
if not (td['lat'].values.min() < blatmin) & (td['lat'].values.max() > blatmax):
continue
ll_trmm = tm_utils.ll_toMSG(td['lon'].values, td['lat'].values)
tx = ll_trmm['x']
ty = ll_trmm['y']
mpair = ua.unique_of_pair(mx, my)
tpair = ua.unique_of_pair(tx, ty)
#Do we need to do it that way?
inter = np.in1d(tpair, mpair) # returns false and true, whole grid
inter_rev = np.in1d(mpair, tpair.flat[inter]) # Attention: this leaves out meteosat cells where no closest TRMM cell (since TRMM is coarser!)
# have at least 500 pixels shared for MCS between TRMM and MSG
if sum(inter) < 500:
continue
print(_y, _m, _d, _h, _mi)
bprcp = td['p'].values.flat[inter]
bflags = td['flags'].values.flat[inter]
mtt = md['t'].values[isblob].flat[inter_rev]
# we need same number of TRMM and MSG per plot to do the masking
if not bprcp.size == mtt.size:
print('Tprcp and MSGT not same, someting wrong!')
continue
# rtest = np.copy(td['p'].values) # check the TRMM pixels identified
# rtest.flat[inter] = 1500 # np.where(inter)
#
#
# maskr = np.zeros_like(md['t'].values)
# maskr[isblob] = 1000
# # np.where(maskr>999)
#
# mxinter = np.in1d(mxy, mpair[inter_rev])
# maskrr = np.zeros_like(md['t'].values)
# maskrr.flat[mxinter] = 1100
#
# plt.figure()
# ax = plt.axes(projection=ccrs.PlateCarree())
#
# plt.contourf(mlon, mlat, maskr,
# transform=ccrs.PlateCarree()) # green, MSG blob
# plt.contourf(td['lon'].values, td['lat'].values, rtest, levels=np.arange(1300, 1600, 100),
# transform=ccrs.PlateCarree()) # identified TRMM pixel
# #Identified MSG temperatures, problem: only nearest to TRMM, omits MSG pixels
# plt.contourf(mlon, mlat, maskrr, levels=np.arange(1097, 1099, 1),
# transform=ccrs.PlateCarree()) # green, MSG blob
# ax.coastlines()
if np.count_nonzero(bprcp) < 50:
continue
mask = tm_utils.getTRMMconv(bflags) # filter for convective rain
mask = np.array(mask)
smask = tm_utils.getTRMMstrat(bflags) # filter for convective rain
smask = np.array(smask)
nz_bprcp = np.sum(bprcp>0.1)
tall = np.nanmean(mtt[np.isfinite(bprcp)])
# remove all these zero rainfall from blob
bprcpNZ = bprcp[bprcp>0.1]
mttNZ = mtt[bprcp>0.1]
flagsNZ = bflags[bprcp>0.1]
maskNZ = tm_utils.getTRMMconv(flagsNZ) # list of 0 and 1, flattened!
smaskNZ = tm_utils.getTRMMstrat(flagsNZ) # list of 0 and 1, flattened!
if sum(maskNZ) < 2:
continue
datess.append((_y, _m, _d, _h, _mi, ba, td['lon'].values.min(), td['lon'].values.max(),
td['lat'].values.min(), td['lat'].values.max(), blonmin, blonmax, blatmin, blatmax))
pm = np.nanmean(bprcpNZ)
tm = np.nanmean(mttNZ)
ppm = np.percentile(bprcpNZ, 98)
pmax = np.nanmax(bprcp)
pi = float(np.sum(bprcpNZ>30)) / float(bprcpNZ.size)
mdic['p'].append(pm) # prcp mean of every MCS (no zero)
mdic['pp'].append(ppm) # value of 98 percentile of MCS (no zero)
mdic['rain'].append(bprcpNZ) # whole rainfall field, no sum
mdic['pmax'].append(pmax) # maximum pcp in MCS
mdic['pi'].append(pi) # share of > 30mmh pixel of > 0 pixel
mdic['t'].append(tm) # T where PCP > 0 and overlap
mdic['tall'].append(tall) # T where cloud and TRMM valid (incl 0 rain)
mdic['hod'].append(_h) # hour of day for image
mdic['yr'].append(_y) # year for image
mdic['mon'].append(_m) # month for image
mdic['lat'].append(lat)
mdic['lon'].append(lon)
mdic['tpixel_nzero'].append(nz_bprcp) # nb pixel of MCS for PCP > 0
mdic['tpixel'].append(bprcp.size) # nb pixel of MCS including 0
mdic['tpixel_conv'].append(sum(mask)) # number convective pixel
mdic['tpixel_strat'].append(sum(smask)) # number stratiform pixel
mdic['tpixel_zero'].append(np.size(bprcp) - np.size(nz_bprcp)) # number zero pixel
mdic['twhole'].append(bt)
mdic['area'].append(isblob[0].size)
print('Passed flag filter')
# check for at least 500 TRMM pixels in MSG above 0 rain
# if np.count_nonzero(bprcp) < 500:
# continue
pc = np.nanmean(bprcpNZ.flat[np.where(maskNZ)])
tc = np.nanmean(mttNZ.flat[np.where(maskNZ)])
pic = float(np.greater(bprcpNZ.flat[np.where(maskNZ)], 30.).sum()) / float(sum(maskNZ))
ppc = np.percentile(bprcpNZ.flat[np.where(maskNZ)], 98)
pmaxc = bprcpNZ.flat[np.where(maskNZ)].max()
# print 'Nb', nb
mdic_f['pconv'].append(pc)
mdic_f['piconv'].append(pic)
mdic_f['ppconv'].append(ppc)
mdic_f['pmaxconv'].append(pmaxc)
mdic_f['tconv'].append(tc)
mdic_f['tnfconv'].append(tm)
mdic_f['hod'].append(_h)
mdic_f['yr'].append(_y)
mdic_f['mon'].append(_m)
mdic_f['lat'].append(lat)
mdic_f['lon'].append(lon)
mdic_f['tpixel_convNZ'].append(sum(maskNZ))
mdic_f['tpixel_stratNZ'].append(sum(smaskNZ))
cnt = cnt + 1
print(cnt)
myDicts = [mdic, mdic_f]
for d in datess: print(d)
pkl.dump(myDicts, open('/users/global/cornkle/data/OBS/test/c_paper_rainfield.p',
'wb')) # MSG_TRMM_temp_pcp_300px'+str(yrange[0])+'-'+str(yrange[-1])+'_new.p', 'wb'))
print(
'Saved ' + 'MSG_TRMM_temp_pcp_' + str(YRANGE[0]) + '-' + str(YRANGE[-1]) + '_new.p with ' + str(
cnt) + ' MCSs')
if __name__ == "__main__":
tm_overlap_blobs()
| [
"[email protected]"
] | |
49e1af1b05b53db53a3db3d9db8e5646ec7493d4 | 6fdddf7ba514cb3191786a61a06c9f12d6182890 | /spyder.py | 6310df5561aa21229735e54be0e4efa029dfe339 | [] | no_license | ash018/Scrap | 64615c9127fa6ecc2e633e6e276e34badf8725ab | 6f45ca9fdc1700686d88cf2aa1403fb9c0b6b05a | refs/heads/master | 2020-03-31T06:28:56.519456 | 2020-01-23T11:07:24 | 2020-01-23T11:07:24 | 151,984,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 859 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 8 12:49:01 2018
@author: smakash
"""
import json
import scrapy
import urllib.request
class SpidyQuotesSpider(scrapy.Spider):
name = 'spidyquotes'
quotes_base_url = 'http://spidyquotes.herokuapp.com/api/quotes?page=%s'
start_urls = [quotes_base_url % 1]
download_delay = 1.5
def parse(self, response):
data = json.loads(response.body)
for item in data.get('quotes', []):
yield {
'text': item.get('text'),
'author': item.get('author', {}).get('name'),
'tags': item.get('tags'),
}
if data['has_next']:
next_page = data['page'] + 1
yield scrapy.Request(self.quotes_base_url % next_page)
if __name__ == '__main__':
scraper = SpidyQuotesSpider()
| [
"[email protected]"
] | |
8a6a8e8e93fd8928ad317a39ef306e002e825d8a | 254ef44b90485767a3aea8cbe77dc6bf77dddaeb | /441排列硬币.py | a5322fb823433368efc0bf8352f3bf9d19f11c17 | [] | no_license | XinZhaoFu/leetcode_moyu | fae00d52a52c090901021717df87b78d78192bdb | e80489923c60ed716d54c1bdeaaf52133d4e1209 | refs/heads/main | 2023-06-19T02:50:05.256149 | 2021-07-09T00:50:41 | 2021-07-09T00:50:41 | 331,243,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | """
你总共有 n 枚硬币,你需要将它们摆成一个阶梯形状,第 k 行就必须正好有 k 枚硬币。
给定一个数字 n,找出可形成完整阶梯行的总行数。
n 是一个非负整数,并且在32位有符号整型的范围内。
示例 1:
n = 5
硬币可排列成以下几行:
¤
¤ ¤
¤ ¤
因为第三行不完整,所以返回2.
"""
class Solution(object):
def arrangeCoins(self, n):
"""
:type n: int
:rtype: int
"""
if n < 2:
return n
return int((2**0.5) * ((n+0.125)**0.5) - 0.5)
| [
"[email protected]"
] | |
5b3caafcc9c1351e9b98f454767c5a65c3531167 | 3be42b83a15d022f5863c96ec26e21bac0f7c27e | /tensorflow_probability/python/stats/moving_stats.py | 5c4e9da9174eb6bd64c78376c1c2fdf995e7ed54 | [
"Apache-2.0"
] | permissive | ogrisel/probability | 846f5c13cddee5cf167b215e651b7479003f15d2 | 8f67456798615f9bf60ced2ce6db5d3dba3515fe | refs/heads/master | 2022-11-09T10:53:23.000918 | 2020-07-01T23:16:03 | 2020-07-01T23:17:25 | 276,580,359 | 2 | 1 | Apache-2.0 | 2020-07-02T07:37:58 | 2020-07-02T07:37:57 | null | UTF-8 | Python | false | false | 17,525 | py | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Functions for computing moving statistics of a value stream."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import prefer_static
__all__ = [
'assign_log_moving_mean_exp',
'assign_moving_mean_variance',
'moving_mean_variance_zero_debiased',
]
def assign_moving_mean_variance(value, moving_mean, moving_variance=None,
zero_debias_count=None, decay=0.99, axis=(),
name=None):
"""Compute one update to the exponentially weighted moving mean and variance.
The `value` updated exponentially weighted moving `moving_mean` and
`moving_variance` are conceptually given by the following recurrence
relations ([Welford (1962)][1]):
```python
new_mean = old_mean + (1 - decay) * (value - old_mean)
new_var = old_var + (1 - decay) * (value - old_mean) * (value - new_mean)
```
This function implements the above recurrences in a numerically stable manner
and also uses the `assign_add` op to allow concurrent lockless updates to the
supplied variables.
For additional references see [this John D. Cook blog post][
https://www.johndcook.com/blog/standard_deviation/]
(whereas we use `1 - decay = 1 / k`) and
[Finch (2009; Eq. 143)][2] (whereas we use `1 - decay = alpha`).
Since variables that are initialized to a `0` value will be `0` biased,
providing `zero_debias_count` triggers scaling the `moving_mean` and
`moving_variance` by the factor of `1 - decay ** (zero_debias_count + 1)`.
For more details, see `tfp.stats.moving_mean_variance_zero_debiased`.
Args:
value: `float`-like `Tensor` representing one or more streaming
observations. When `axis` is non-empty `value ` is reduced (by mean) for
updated `moving_mean` and `moving-variance`. Presumed to have same shape
as `moving_mean` and `moving_variance`.
moving_mean: `float`-like `tf.Variable` representing the exponentially
weighted moving mean. Same shape as `moving_variance` and `value`. This
function presumes the `tf.Variable` was created with all zero initial
value(s).
moving_variance: `float`-like `tf.Variable` representing the exponentially
weighted moving variance. Same shape as `moving_mean` and `value`. This
function presumes the `tf.Variable` was created with all zero initial
value(s).
Default value: `None` (i.e., no moving variance is computed).
zero_debias_count: `int`-like `tf.Variable` representing the number of times
this function has been called on streaming input (*not* the number of
reduced values used in this functions computation). When not `None` (the
default) the returned values for `moving_mean` and `moving_variance` are
"zero debiased", i.e., corrected for their presumed all zeros
intialization. Note: the `tf.Variable`s `moving_mean` and
`moving_variance` *always* store the unbiased calculation, regardless of
setting this argument. To obtain unbiased calculations from these
`tf.Variable`s, see `tfp.stats.moving_mean_variance_zero_debiased`.
Default value: `None` (i.e., no zero debiasing calculation is made).
decay: A `float`-like `Tensor` representing the moving mean decay. Typically
close to `1.`, e.g., `0.99`.
Default value: `0.99`.
axis: The dimensions to reduce. If `()` (the default) no dimensions are
reduced. If `None` all dimensions are reduced. Must be in the range
`[-rank(value), rank(value))`.
Default value: `()` (i.e., no reduction is made).
name: Python `str` prepended to op names created by this function.
Default value: `None` (i.e., 'assign_moving_mean_variance').
Returns:
moving_mean: The `value`-updated exponentially weighted moving mean.
Debiased if `zero_debias_count is not None`.
moving_variance: The `value`-updated exponentially weighted moving variance.
Debiased if `zero_debias_count is not None`.
Raises:
TypeError: if `moving_mean` does not have float type `dtype`.
TypeError: if `moving_mean`, `moving_variance`, `value`, `decay` have
different `base_dtype`.
#### Examples
```python
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
d = tfd.MultivariateNormalTriL(
loc=[-1., 1.],
scale_tril=tf.linalg.cholesky([[0.75, 0.05],
[0.05, 0.5]]))
d.mean()
# ==> [-1., 1.]
d.variance()
# ==> [0.75, 0.5]
moving_mean = tf.Variable(tf.zeros(2))
moving_variance = tf.Variable(tf.zeros(2))
zero_debias_count = tf.Variable(0)
for _ in range(100):
m, v = tfp.stats.assign_moving_mean_variance(
value=d.sample(3),
moving_mean=moving_mean,
moving_variance=moving_variance,
zero_debias_count=zero_debias_count,
decay=0.99,
axis=-2)
print(m.numpy(), v.numpy())
# ==> [-1.0334632 0.9545268] [0.8126194 0.5118788]
# ==> [-1.0293456 0.96070296] [0.8115873 0.50947404]
# ...
# ==> [-1.025172 0.96351 ] [0.7142789 0.48570773]
m1, v1 = tfp.stats.moving_mean_variance_zero_debiased(
moving_mean,
moving_variance,
zero_debias_count,
decay=0.99)
print(m.numpy(), v.numpy())
# ==> [-1.025172 0.96351 ] [0.7142789 0.48570773]
assert(all(m == m1))
assert(all(v == v1))
```
#### References
[1] B. P. Welford. Note on a Method for Calculating Corrected Sums of
Squares and Products. Technometrics, Vol. 4, No. 3 (Aug., 1962), p419-20.
http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.302.7503&rep=rep1&type=pdf
http://www.jstor.org/stable/1266577
[2]: Tony Finch. Incremental calculation of weighted mean and variance.
_Technical Report_, 2009.
http://people.ds.cam.ac.uk/fanf2/hermes/doc/antiforgery/stats.pdf
"""
with tf.name_scope(name or 'assign_moving_mean_variance'):
base_dtype = dtype_util.base_dtype(moving_mean.dtype)
if not dtype_util.is_floating(base_dtype):
raise TypeError(
'Argument `moving_mean` is not float type (saw {}).'.format(
dtype_util.name(moving_mean.dtype)))
value = tf.convert_to_tensor(value, dtype=base_dtype, name='value')
decay = tf.convert_to_tensor(decay, dtype=base_dtype, name='decay')
# Force a read of `moving_mean` as we'll need it twice.
old_mean = tf.convert_to_tensor(
moving_mean, dtype=base_dtype, name='old_mean')
updated_mean = moving_mean.assign_add(
(1. - decay) * (tf.reduce_mean(value, axis=axis) - old_mean))
if zero_debias_count is not None:
t = tf.cast(zero_debias_count.assign_add(1), base_dtype)
# Could have used:
# bias_correction = -tf.math.expm1(t * tf.math.log(decay))
# however since we expect decay to be nearly 1, we don't expect this to
# bear a significant improvement, yet would incur higher computational
# cost.
bias_correction = 1. - decay**t
with tf.control_dependencies([updated_mean]):
updated_mean = updated_mean / bias_correction
if moving_variance is None:
return updated_mean
if base_dtype != dtype_util.base_dtype(moving_variance.dtype):
raise TypeError('Arguments `moving_mean` and `moving_variance` do not '
'have same base `dtype` (saw {}, {}).'.format(
dtype_util.name(moving_mean.dtype),
dtype_util.name(moving_variance.dtype)))
if zero_debias_count is not None:
old_t = tf.where(t > 1., t - 1., tf.constant(np.inf, base_dtype))
old_bias_correction = 1. - decay**old_t
old_mean = old_mean / old_bias_correction
mean_sq_diff = tf.reduce_mean(
tf.math.squared_difference(value, old_mean),
axis=axis)
updated_variance = moving_variance.assign_add(
(1. - decay) * (decay * mean_sq_diff - moving_variance))
if zero_debias_count is not None:
with tf.control_dependencies([updated_variance]):
updated_variance = updated_variance / bias_correction
return updated_mean, updated_variance
def moving_mean_variance_zero_debiased(moving_mean, moving_variance=None,
zero_debias_count=None, decay=0.99,
name=None):
"""Compute zero debiased versions of `moving_mean` and `moving_variance`.
Since `moving_*` variables initialized with `0`s will be biased (toward `0`),
this function rescales the `moving_mean` and `moving_variance` by the factor
`1 - decay**zero_debias_count`, i.e., such that the `moving_mean` is unbiased.
For more details, see [Kingma (2014)][1].
Args:
moving_mean: `float`-like `tf.Variable` representing the exponentially
weighted moving mean. Same shape as `moving_variance` and `value`. This
function presumes the `tf.Variable` was created with all zero initial
value(s).
moving_variance: `float`-like `tf.Variable` representing the exponentially
weighted moving variance. Same shape as `moving_mean` and `value`. This
function presumes the `tf.Variable` was created with all zero initial
value(s).
Default value: `None` (i.e., no moving variance is computed).
zero_debias_count: `int`-like `tf.Variable` representing the number of times
this function has been called on streaming input (*not* the number of
reduced values used in this functions computation). When not `None` (the
default) the returned values for `moving_mean` and `moving_variance` are
"zero debiased", i.e., corrected for their presumed all zeros
intialization. Note: the `tf.Variable`s `moving_mean` and
`moving_variance` *always* store the unbiased calculation, regardless of
setting this argument. To obtain unbiased calculations from these
`tf.Variable`s, see `tfp.stats.moving_mean_variance_zero_debiased`.
Default value: `None` (i.e., no zero debiasing calculation is made).
decay: A `float`-like `Tensor` representing the moving mean decay. Typically
close to `1.`, e.g., `0.99`.
Default value: `0.99`.
name: Python `str` prepended to op names created by this function.
Default value: `None` (i.e., 'moving_mean_variance_zero_debiased').
Returns:
moving_mean: The zero debiased exponentially weighted moving mean.
moving_variance: The zero debiased exponentially weighted moving variance.
Raises:
TypeError: if `moving_mean` does not have float type `dtype`.
TypeError: if `moving_mean`, `moving_variance`, `decay` have different
`base_dtype`.
#### References
[1]: Diederik P. Kingma, Jimmy Ba. Adam: A Method for Stochastic Optimization.
_arXiv preprint arXiv:1412.6980_, 2014.
https://arxiv.org/abs/1412.6980
"""
with tf.name_scope(name or 'zero_debias_count'):
if zero_debias_count is None:
raise ValueError()
base_dtype = dtype_util.base_dtype(moving_mean.dtype)
if not dtype_util.is_floating(base_dtype):
raise TypeError(
'Argument `moving_mean` is not float type (saw {}).'.format(
dtype_util.name(moving_mean.dtype)))
t = tf.cast(zero_debias_count, dtype=base_dtype)
# Could have used:
# bias_correction = -tf.math.expm1(t * tf.math.log(decay))
# however since we expect decay to be nearly 1, we don't expect this to bear
# a significant improvement, yet would incur higher computational cost.
t = tf.where(t > 0., t, tf.constant(np.inf, base_dtype))
bias_correction = 1. - decay**t
unbiased_mean = moving_mean / bias_correction
if moving_variance is None:
return unbiased_mean
if base_dtype != dtype_util.base_dtype(moving_variance.dtype):
raise TypeError('Arguments `moving_mean` and `moving_variance` do not '
'have same base `dtype` (saw {}, {}).'.format(
dtype_util.name(moving_mean.dtype),
dtype_util.name(moving_variance.dtype)))
unbiased_variance = moving_variance / bias_correction
return unbiased_mean, unbiased_variance
def assign_log_moving_mean_exp(log_value, moving_log_mean_exp,
zero_debias_count=None, decay=0.99, name=None):
"""Compute the log of the exponentially weighted moving mean of the exp.
If `log_value` is a draw from a stationary random variable, this function
approximates `log(E[exp(log_value)])`, i.e., a weighted log-sum-exp. More
precisely, a `tf.Variable`, `moving_log_mean_exp`, is updated by `log_value`
using the following identity:
```none
moving_log_mean_exp =
= log(decay exp(moving_log_mean_exp) + (1 - decay) exp(log_value))
= log(exp(moving_log_mean_exp + log(decay)) + exp(log_value + log1p(-decay)))
= moving_log_mean_exp
+ log( exp(moving_log_mean_exp - moving_log_mean_exp + log(decay))
+ exp(log_value - moving_log_mean_exp + log1p(-decay)))
= moving_log_mean_exp
+ log_sum_exp([log(decay), log_value - moving_log_mean_exp +
log1p(-decay)]).
```
In addition to numerical stability, this formulation is advantageous because
`moving_log_mean_exp` can be updated in a lock-free manner, i.e., using
`assign_add`. (Note: the updates are not thread-safe; it's just that the
update to the tf.Variable is presumed efficient due to being lock-free.)
Args:
log_value: `float`-like `Tensor` representing a new (streaming) observation.
Same shape as `moving_log_mean_exp`.
moving_log_mean_exp: `float`-like `Variable` representing the log of the
exponentially weighted moving mean of the exp. Same shape as `log_value`.
zero_debias_count: `int`-like `tf.Variable` representing the number of times
this function has been called on streaming input (*not* the number of
reduced values used in this functions computation). When not `None` (the
default) the returned values for `moving_mean` and `moving_variance` are
"zero debiased", i.e., corrected for their presumed all zeros
intialization. Note: the `tf.Variable`s `moving_mean` and
`moving_variance` *always* store the unbiased calculation, regardless of
setting this argument. To obtain unbiased calculations from these
`tf.Variable`s, see `tfp.stats.moving_mean_variance_zero_debiased`.
Default value: `None` (i.e., no zero debiasing calculation is made).
decay: A `float`-like `Tensor` representing the moving mean decay. Typically
close to `1.`, e.g., `0.99`.
Default value: `0.99`.
name: Python `str` prepended to op names created by this function.
Default value: `None` (i.e., 'assign_log_moving_mean_exp').
Returns:
moving_log_mean_exp: A reference to the input 'Variable' tensor with the
`log_value`-updated log of the exponentially weighted moving mean of exp.
Raises:
TypeError: if `moving_log_mean_exp` does not have float type `dtype`.
TypeError: if `moving_log_mean_exp`, `log_value`, `decay` have different
`base_dtype`.
"""
if zero_debias_count is not None:
raise NotImplementedError(
'Argument `zero_debias_count` is not yet supported. If you require '
'this feature please create a new issue on '
'`https://github.com/tensorflow/probability` or email '
'`[email protected]`.')
with tf.name_scope(name or 'assign_log_moving_mean_exp'):
# We want to update the variable in a numerically stable and lock-free way.
# To do this, observe that variable `x` updated by `v` is:
# x = log(w exp(x) + (1-w) exp(v))
# = log(exp(x + log(w)) + exp(v + log1p(-w)))
# = x + log(exp(x - x + log(w)) + exp(v - x + log1p(-w)))
# = x + lse([log(w), v - x + log1p(-w)])
base_dtype = dtype_util.base_dtype(moving_log_mean_exp.dtype)
if not dtype_util.is_floating(base_dtype):
raise TypeError(
'Argument `moving_log_mean_exp` is not float type (saw {}).'.format(
dtype_util.name(moving_log_mean_exp.dtype)))
log_value = tf.convert_to_tensor(
log_value, dtype=base_dtype, name='log_value')
decay = tf.convert_to_tensor(decay, dtype=base_dtype, name='decay')
delta = (log_value - moving_log_mean_exp)[tf.newaxis, ...]
x = tf.concat([
tf.broadcast_to(
tf.math.log(decay),
prefer_static.broadcast_shape(prefer_static.shape(decay),
prefer_static.shape(delta))),
delta + tf.math.log1p(-decay)
], axis=0)
update = tf.reduce_logsumexp(x, axis=0)
return moving_log_mean_exp.assign_add(update)
| [
"[email protected]"
] | |
5fd0d3f0eb9110b6f09d8eaa9ed47d574cfdf370 | 1dd72195bc08460df7e5bb82d3b7bac7a6673f49 | /api/alembic/versions/4ac7d9f38f85_allows_null_dewpoint_values_for_hourly_.py | 3d54107fefa7c5c3568f7d9fb1ecf0a6655968e6 | [
"Apache-2.0",
"MIT"
] | permissive | bcgov/wps | c4347c39cadfad6711502d47776abc8d03895593 | 0ba707b0eddc280240964efa481988df92046e6a | refs/heads/main | 2023-08-19T00:56:39.286460 | 2023-08-16T18:03:06 | 2023-08-16T18:03:06 | 235,861,506 | 35 | 9 | Apache-2.0 | 2023-09-11T21:35:07 | 2020-01-23T18:42:10 | Python | UTF-8 | Python | false | false | 891 | py | """Allows null dewpoint values for hourly actuals
Revision ID: 4ac7d9f38f85
Revises: aa82757b1084
Create Date: 2021-06-01 14:29:49.951368
"""
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '4ac7d9f38f85'
down_revision = 'aa82757b1084'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('hourly_actuals', 'dewpoint',
existing_type=postgresql.DOUBLE_PRECISION(precision=53),
nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('hourly_actuals', 'dewpoint',
existing_type=postgresql.DOUBLE_PRECISION(precision=53),
nullable=False)
# ### end Alembic commands ###
| [
"[email protected]"
] | |
c845e20cb49ffa17704614605c775f62174f35f2 | 215f4260f3bc746ea038febbe27e177c73e8781d | /Koudai/Server/src/ZyGames.Tianjiexing.Web/Script/PyScript/Action/action4408.py | 1745ae928052a268c7191948572bcc21935d790f | [] | no_license | cosim/Scut-samples | c7baf863300111846358fb016896736420ec0058 | 86286c4b083fdb8ac6244ad122b5facb7592eabd | refs/heads/master | 2021-01-18T03:54:16.358346 | 2015-10-16T09:07:36 | 2015-10-16T09:07:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,808 | py | import clr, sys
import random
import time
import datetime
import ReferenceLib
from lang import Lang
from action import *
from System import *
from System.Collections.Generic import *
from ZyGames.Framework.Cache.Generic import *
from ZyGames.Framework.Common.Log import *
from ZyGames.Tianjiexing.Model import *
from ZyGames.Tianjiexing.BLL import *
from ZyGames.Tianjiexing.BLL.Base import *
from ZyGames.Tianjiexing.Lang import *
from ZyGames.Framework.Cache.Generic import *
from ZyGames.Framework.Game.Service import *
from ZyGames.Framework.Common import *
from ZyGames.Framework.Cache.Generic import *
from ZyGames.Tianjiexing.Model.Config import *
from ZyGames.Tianjiexing.BLL.Combat import *
from ZyGames.Tianjiexing.Model.Enum import *
# 4408_圣吉塔属性兑换接口
class UrlParam(HttpParam):
def __init__(self):
HttpParam.__init__(self);
self.propertyType = 0;
self.starNum = 0;
class ActionResult(DataResult):
def __init__(self):
DataResult.__init__(self);
def getUrlElement(httpGet, parent):
urlParam = UrlParam();
if httpGet.Contains("PropertyType")\
and httpGet.Contains("StarNum"):
urlParam.propertyType = httpGet.GetEnum[PropertyType]("PropertyType");
urlParam.starNum = httpGet.GetIntValue("StarNum");
else:
urlParam.Result = False;
return urlParam;
def takeAction(urlParam, parent):
actionResult = ActionResult();
userId = str(parent.Current.UserId)
contextUser = PersonalCacheStruct.Get[GameUser](userId)
def loadError():
parent.ErrorCode = Lang.getLang("ErrorCode");
parent.ErrorInfo = Lang.getLang("LoadError");
actionResult.Result = False;
return actionResult;
# 更新属性加成
percent = 100.0;
userSJTInfo = PersonalCacheStruct[UserShengJiTa]().FindKey(userId);
# 判断星星数是否足够兑换
if userSJTInfo.LastScoreStar < urlParam.starNum:
return loadError();
if urlParam.propertyType == PropertyType.Life:
userSJTInfo.LifeNum = userSJTInfo.LifeNum + (urlParam.starNum / percent);
elif urlParam.propertyType == PropertyType.WuLi:
userSJTInfo.WuLiNum = userSJTInfo.WuLiNum + (urlParam.starNum / percent);
elif urlParam.propertyType == PropertyType.Mofa:
userSJTInfo.MofaNum = userSJTInfo.MofaNum + (urlParam.starNum / percent);
elif urlParam.propertyType == PropertyType.FunJi:
userSJTInfo.FunJiNum = userSJTInfo.FunJiNum + (urlParam.starNum / percent);
else:
return loadError();
# 更新星星数
userSJTInfo.LastScoreStar -= urlParam.starNum;
return actionResult;
def buildPacket(writer, urlParam, actionResult):
return True; | [
"[email protected]"
] | |
d56f05af3c27de535dd87df53be5bd34660d448d | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adverbs/_worsts.py | 238bd14921209f33ca8ce96c77e28f95ec46020d | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py |
from xai.brain.wordbase.adverbs._worst import _WORST
#calss header
class _WORSTS(_WORST, ):
def __init__(self,):
_WORST.__init__(self)
self.name = "WORSTS"
self.specie = 'adverbs'
self.basic = "worst"
self.jsondata = {}
| [
"[email protected]"
] | |
0f1183e97da07bd5f49d57e484c565872ba37049 | 238e46a903cf7fac4f83fa8681094bf3c417d22d | /VTK/vtk_7.1.1_x64_Debug/lib/python2.7/site-packages/twisted/manhole/ui/test/test_gtk2manhole.py | 463190f37a1020dc41ec1587e34d4d45d1896c90 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | baojunli/FastCAE | da1277f90e584084d461590a3699b941d8c4030b | a3f99f6402da564df87fcef30674ce5f44379962 | refs/heads/master | 2023-02-25T20:25:31.815729 | 2021-02-01T03:17:33 | 2021-02-01T03:17:33 | 268,390,180 | 1 | 0 | BSD-3-Clause | 2020-06-01T00:39:31 | 2020-06-01T00:39:31 | null | UTF-8 | Python | false | false | 1,324 | py | # Copyright (c) 2009 Twisted Matrix Laboratories.
"""
Tests for GTK2 GUI manhole.
"""
skip = False
try:
import pygtk
pygtk.require("2.0")
except:
skip = "GTK 2.0 not available"
else:
try:
import gtk
except ImportError:
skip = "GTK 2.0 not available"
except RuntimeError:
skip = "Old version of GTK 2.0 requires DISPLAY, and we don't have one."
else:
if gtk.gtk_version[0] == 1:
skip = "Requested GTK 2.0, but 1.0 was already imported."
else:
from twisted.manhole.ui.gtk2manhole import ConsoleInput
from twisted.trial.unittest import TestCase
from twisted.python.reflect import prefixedMethodNames
class ConsoleInputTests(TestCase):
"""
Tests for L{ConsoleInput}.
"""
def test_reverseKeymap(self):
"""
Verify that a L{ConsoleInput} has a reverse mapping of the keysym names
it needs for event handling to their corresponding keysym.
"""
ci = ConsoleInput(None)
for eventName in prefixedMethodNames(ConsoleInput, 'key_'):
keysymName = eventName.split("_")[-1]
keysymValue = getattr(gtk.keysyms, keysymName)
self.assertEqual(ci.rkeymap[keysymValue], keysymName)
skip = skip
| [
"l”[email protected]“"
] | |
7fd1dbe3e06dc453bc514ba3d58dd5b19f88d100 | 3669cd260bdab697376feca747d1635d35f42c83 | /security/py-fail2ban/files/patch-actions.py | 91bfc987942691a41c98c25baa79292d35497788 | [] | no_license | tuxillo/DPorts | 58072bc88887c7a53a51988c76a70366bef44a93 | f523fb13a9d3ecc5ce9a8045fdf146ae05de5399 | refs/heads/master | 2020-04-03T08:02:44.297511 | 2013-03-04T07:56:00 | 2013-03-04T07:56:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | --- server/actions.py.orig 2012-11-27 18:16:18.000000000 +0100
+++ server/actions.py 2012-11-27 18:17:04.000000000 +0100
@@ -206 +206 @@
- logSys.warn("[%s] Unban %s" % (self.jail.getName(), aInfo["ip"]))
+ logSys.warn("[%s] Unban %s" % (self.jail.getName(), str(aInfo["ip"])))
| [
"[email protected]"
] | |
d1203e82570e4d5912a7947b18befac137bde579 | 1548ce77537dcd50ab04b0eaee050b5d30553e23 | /autotabular/pipeline/components/data_preprocessing/data_preprocessing_categorical.py | d6705e83ea43154d12a39641896c87ce43decbe9 | [
"Apache-2.0"
] | permissive | Shamoo100/AutoTabular | 4a20e349104246bf825ebceae33dca0a79928f2e | 7d71bf01d2b7d84fcf5f65c9f45c5cea1255d8a2 | refs/heads/main | 2023-08-13T21:34:34.329888 | 2021-10-02T07:06:00 | 2021-10-02T07:06:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,322 | py | from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
from autotabular.pipeline.base import DATASET_PROPERTIES_TYPE, BasePipeline
from autotabular.pipeline.components.data_preprocessing.categorical_encoding import OHEChoice
from autotabular.pipeline.components.data_preprocessing.categorical_encoding.encoding import OrdinalEncoding
from autotabular.pipeline.components.data_preprocessing.category_shift.category_shift import CategoryShift
from autotabular.pipeline.components.data_preprocessing.imputation.categorical_imputation import CategoricalImputation
from autotabular.pipeline.components.data_preprocessing.minority_coalescense import CoalescenseChoice
from autotabular.pipeline.constants import DENSE, INPUT, SPARSE, UNSIGNED_DATA
from ConfigSpace.configuration_space import Configuration, ConfigurationSpace
from sklearn.base import BaseEstimator
class CategoricalPreprocessingPipeline(BasePipeline):
"""This class implements a pipeline for data preprocessing of categorical
features. It assumes that the data to be transformed is made only of
categorical features. The steps of this pipeline are:
1 - Category shift: Adds 3 to every category value
2 - Imputation: Assign category 2 to missing values (NaN).
3 - Minority coalescence: Assign category 1 to all categories whose occurrence
don't sum-up to a certain minimum fraction
4 - One hot encoding: usual sklearn one hot encoding
Parameters
----------
config : ConfigSpace.configuration_space.Configuration
The configuration to evaluate.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance
used by `np.random`.
"""
def __init__(self,
config: Optional[Configuration] = None,
steps: Optional[List[Tuple[str, BaseEstimator]]] = None,
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None,
include: Optional[Dict[str, str]] = None,
exclude: Optional[Dict[str, str]] = None,
random_state: Optional[np.random.RandomState] = None,
init_params: Optional[Dict[str, Any]] = None):
self._output_dtype = np.int32
super().__init__(config, steps, dataset_properties, include, exclude,
random_state, init_params)
@staticmethod
def get_properties(
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None
) -> Dict[str, Optional[Union[str, int, bool, Tuple]]]:
return {
'shortname': 'cat_datapreproc',
'name': 'categorical data preprocessing',
'handles_missing_values': True,
'handles_nominal_values': True,
'handles_numerical_features': True,
'prefers_data_scaled': False,
'prefers_data_normalized': False,
'handles_regression': True,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': True,
'is_deterministic': True,
# TODO find out if this is right!
'handles_sparse': True,
'handles_dense': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (INPUT, ),
'preferred_dtype': None
}
def _get_hyperparameter_search_space(
self,
include: Optional[Dict[str, str]] = None,
exclude: Optional[Dict[str, str]] = None,
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None,
) -> ConfigurationSpace:
"""Create the hyperparameter configuration space.
Returns
-------
cs : ConfigSpace.configuration_space.Configuration
The configuration space describing the SimpleRegressionClassifier.
"""
cs = ConfigurationSpace()
if dataset_properties is None or not isinstance(
dataset_properties, dict):
dataset_properties = dict()
cs = self._get_base_search_space(
cs=cs,
dataset_properties=dataset_properties,
exclude=exclude,
include=include,
pipeline=self.steps)
return cs
def _get_pipeline_steps(
self,
dataset_properties: Optional[Dict[str, str]] = None,
) -> List[Tuple[str, BaseEstimator]]:
steps = []
default_dataset_properties = {}
if dataset_properties is not None and isinstance(
dataset_properties, dict):
default_dataset_properties.update(dataset_properties)
steps.extend([
('imputation', CategoricalImputation()),
('encoding', OrdinalEncoding()),
('category_shift', CategoryShift()),
('category_coalescence',
CoalescenseChoice(default_dataset_properties)),
('categorical_encoding', OHEChoice(default_dataset_properties)),
])
return steps
def _get_estimator_hyperparameter_name(self) -> str:
return 'categorical data preprocessing'
| [
"[email protected]"
] | |
f57310bed2a1c58aed8958d2ec2afcb9b866e397 | 7cc0ef2d1ad8e9a1542e52bc6bc8897606639452 | /account/migrations/0007_auto_20160505_1827.py | 3f70979a040e12f0e39942e7f66d9c20adccf8e3 | [] | no_license | htl1126/pathjump | 1e87c6127bbfebc8519379c9352440d3a98359f6 | c1235c3fbb13af31ac7b8523e7a83b69f0da95b7 | refs/heads/master | 2021-01-15T15:36:47.723753 | 2016-08-26T20:35:32 | 2016-08-26T20:35:32 | 53,075,772 | 2 | 2 | null | 2016-05-06T17:43:22 | 2016-03-03T19:07:26 | HTML | UTF-8 | Python | false | false | 1,476 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-05-05 22:27
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('account', '0006_auto_20160505_1447'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='birthday',
field=models.DateField(blank=True, default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='userprofile',
name='gpa_1',
field=models.CharField(blank=True, max_length=10),
),
migrations.AlterField(
model_name='userprofile',
name='major_1',
field=models.CharField(blank=True, max_length=20),
),
migrations.AlterField(
model_name='userprofile',
name='university_grad_date_1',
field=models.DateField(blank=True, default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='userprofile',
name='university_grad_date_2',
field=models.DateField(blank=True, default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='userprofile',
name='university_grad_date_3',
field=models.DateField(blank=True, default=django.utils.timezone.now),
),
]
| [
"[email protected]"
] | |
91edc3f2e33b2bbca1ee98e8b76dfe875cf3c247 | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/model_control/detailed/transf_Anscombe/model_control_one_enabled_Anscombe_PolyTrend_Seasonal_Second_AR.py | 64f7646780a6a43000ce6df0f0cadc56dde92ebd | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 156 | py | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Anscombe'] , ['PolyTrend'] , ['Seasonal_Second'] , ['AR'] ); | [
"[email protected]"
] | |
0c15131e30a6b94938e18b671638e273934f7357 | 3b5ee9aa584bfca56dabc19d75717f6104c0dc95 | /sky_pattern/sky_template_taskfarmer/compute_sky_templates-old.py | 5bd31c824ec6a8bf57995b138cdd253acf97d011 | [] | no_license | rongpu/desi-misc | 95690ca99962940fd4a793d523edf4d2ce68b4c3 | c700344ebf8f74391fcce69a47e4ca57fc4b34f8 | refs/heads/master | 2023-09-01T00:49:07.399914 | 2023-08-11T17:10:40 | 2023-08-11T17:10:40 | 173,173,912 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,228 | py | from __future__ import division, print_function
import sys, os, glob, time, warnings, gc
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from astropy.table import Table, vstack, hstack
import fitsio
from scipy.interpolate import interp2d
from scipy.ndimage.filters import gaussian_filter
from multiprocessing import Pool
import argparse
from pathlib import Path
params = {'legend.fontsize': 'large',
'axes.labelsize': 'large',
'axes.titlesize':'large',
'xtick.labelsize':'large',
'ytick.labelsize':'large',
'figure.facecolor':'w'}
plt.rcParams.update(params)
n_processes = 12
parser = argparse.ArgumentParser()
parser.add_argument('n_task')
parser.add_argument('task_id')
args = parser.parse_args()
n_task = int(args.n_task)
task_id = int(args.task_id)
nmad = lambda x: 1.4826 * np.median(np.abs(x-np.median(x)))
ccdnamenumdict = {'S1': 25, 'S2': 26, 'S3': 27, 'S4':28,
'S5': 29, 'S6': 30, 'S7': 31,
'S8': 19, 'S9': 20, 'S10': 21, 'S11': 22, 'S12': 23,
'S13': 24,
'S14': 13, 'S15': 14, 'S16': 15, 'S17': 16, 'S18': 17,
'S19': 18,
'S20': 8, 'S21': 9, 'S22': 10, 'S23': 11, 'S24': 12,
'S25': 4, 'S26': 5, 'S27': 6, 'S28': 7,
'S29': 1, 'S30': 2, 'S31': 3,
'N1': 32, 'N2': 33, 'N3': 34, 'N4': 35,
'N5': 36, 'N6': 37, 'N7': 38,
'N8': 39, 'N9': 40, 'N10': 41, 'N11': 42, 'N12': 43,
'N13': 44,
'N14': 45, 'N15': 46, 'N16': 47, 'N17': 48, 'N18': 49,
'N19': 50,
'N20': 51, 'N21': 52, 'N22': 53, 'N23': 54, 'N24': 55,
'N25': 56, 'N26': 57, 'N27': 58, 'N28': 59,
'N29': 60, 'N30': 61, 'N31': 62,
}
ccdnamenumdict_inv = {aa: bb for bb, aa in ccdnamenumdict.items()}
ccdnum_list = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
52, 53, 54, 55, 56, 57, 58, 59, 60, 62]
# Shape of the DECam CP image
img_shape = (4094, 2046)
max_exposure = 50
smoothing_scale = 120 # in pixels
expnum_blacklist = [243224, 243233, 243250, 243261, 247512, 247519, 247524, 247535,
247536, 247543, 247544, 247546, 251698, 261222, 261236, 261245,
261264, 261289, 263153, 263682, 269550, 269553, 269556, 269573,
269575, 269583, 269584, 269662, 269669, 269719, 270231, 270260,
276448, 276450, 276454, 449966, 463823, 569657, 600963, 600966,
611450, 690302, 718586, 718600, 718608, 718626, 718636, 720061,
720100, 754068, 754083, 768677, 803349, 803356, 807350, 808254,
808338, 808339, 808547, 808560, 808570, 808650, 863383]
halfed_n10_run_list = [376, 377, 378, 384, 385, 386, 798, 799, 800, 806, 807, 808, 1197, 1198, 1199, 1200, 1206, 1207]
#######################################################################################################################
# blob_dir = '/global/cscratch1/sd/rongpu/fringe/decam_ccd_blob_mask'
blob_dir = '/global/cfs/cdirs/desi/users/rongpu/dr9/decam_ccd_blob_mask'
image_dir = '/global/project/projectdirs/cosmo/staging'
surveyccd_path = '/global/project/projectdirs/cosmo/work/legacysurvey/dr9/survey-ccds-decam-dr9.fits.gz'
output_dir = '/global/cscratch1/sd/rongpu/dr9dev/sky_pattern/sky_templates'
# ccd_columns = ['image_filename', 'image_hdu', 'expnum', 'ccdname', 'filter', 'ccd_cuts', 'ccdskycounts', 'plver']
ccd_columns = ['image_hdu', 'expnum', 'ccdname', 'ccdskycounts']
ccd = Table(fitsio.read(surveyccd_path, columns=ccd_columns))
print(len(ccd))
skyrun = Table.read('/global/cscratch1/sd/rongpu/temp/skyrunsgoodcountexpnumv48dr8.fits')
# skyrun = Table.read('/global/cscratch1/sd/rongpu/temp/skyrunsgoodcountexpnumv48dr8_less.fits')
print(len(skyrun))
mask = skyrun['ok']==True
skyrun = skyrun[mask]
print(len(skyrun))
# Exclude templates already created
fn_list = glob.glob(os.path.join(output_dir, '*.fits.fz'))
run_list_done = [int(fn[len(os.path.join(output_dir, 'sky_template_'))+2:-8]) for fn in fn_list]
mask = ~np.in1d(skyrun['run'], run_list_done)
skyrun = skyrun[mask]
print(len(skyrun), len(run_list_done))
# ########################## Exclude z band ##########################
# band = 'z'
# mask = skyrun['filter']!=band
# skyrun = skyrun[mask]
# print(len(skyrun))
# ####################################################################
run_list = np.unique(skyrun['run'])
print(len(run_list))
# shuffle
np.random.seed(123)
# DO NOT USE NP.RANDOM.SHUFFLE
run_list = np.random.choice(run_list, size=len(run_list), replace=False)
# split among the Cori nodes
run_list_split = np.array_split(run_list, n_task)
run_list = run_list_split[task_id]
print('Number of runs in this node:', len(run_list))
# Wait to avoid race condition from writing files and checking file status
time.sleep(60)
#######################################################################################################################
def compute_smooth_sky(run, diagnostic_touch=True):
skyrun_idx = np.where(skyrun['run']==run)[0]
band = skyrun['filter'][skyrun_idx[0]]
print('band: {}, run: {}'.format(band, run))
#############################################
# Maybe there's a better way to downselect the exposures?
if len(skyrun_idx>max_exposure):
skyrun_idx = skyrun_idx[:max_exposure]
#############################################
output_path = os.path.join(output_dir, 'sky_template_{}_{}.fits.fz'.format(band, run))
raw_path = os.path.join(output_dir, 'sky_raw_{}_{}.fits.fz'.format(band, run))
if os.path.isfile(output_path):
print(output_path+' already exists!')
return None
if diagnostic_touch:
Path('/global/u2/r/rongpu/temp/sky_template_status/'+os.path.basename(output_path)).touch()
Path('/global/u2/r/rongpu/temp/sky_template_being_written/'+os.path.basename(output_path)).touch()
hdul_template = fitsio.FITS(output_path, mode='rw', clobber=True)
hdul_template.write(data=None) # first HDU is empty
hdul_raw_stacked = fitsio.FITS(raw_path, mode='rw', clobber=True)
hdul_raw_stacked.write(data=None) # first HDU is empty
for ccdnum in ccdnum_list:
# print(ccdnum)
# ####################
# start = time.time()
# ####################
img_list = []
ccdname = ccdnamenumdict_inv[ccdnum]
for index, skyrun_index in enumerate(skyrun_idx):
expnum = skyrun['expnum'][skyrun_index]
if expnum in expnum_blacklist:
continue
# print(ccdnum, ccdname, index, '/', len(skyrun_idx))
# Load CCD image
img_fn = os.path.join(image_dir, skyrun['image_filename'][skyrun_index]).strip()
try:
img = fitsio.read(img_fn, ext=ccdname)
except:
print(ccdname+' '+img_fn+' does not exist!')
continue
# Get HDU index
with fitsio.FITS(img_fn) as f:
hdu_index = f.movnam_ext(ccdname)
# Load blob mask
str_loc = str.find(skyrun['image_filename'][skyrun_index].strip(), '.fits')
img_filename_base = skyrun['image_filename'][skyrun_index].strip()[:str_loc]
blob_path = os.path.join(blob_dir, 'blob_mask', img_filename_base+'-blobmask.npz')
try:
blob_data = np.load(blob_path)
except:
#################
# DO SOMETHING HERE?
#################
print(blob_path+' does not exist!')
continue
try:
blob = blob_data['hdu'+str(hdu_index).zfill(2)]
except:
print(blob_path+' hdu'+str(hdu_index)+' does not exist!')
continue
if (ccdname=='S7') or ((run in halfed_n10_run_list) and (ccdname=='N10')):
# Only keep the good half of the S7
half = img_shape[1] // 2
img = img[:, :half]
blob = blob[:, :half]
# Remove median sky
sky = np.median(img[blob].flatten())
img = img - sky
# # Find the entry in survey-ccd
# ccd_index = np.where((ccd['expnum']==skyrun['expnum'][skyrun_index]) & (ccd['image_hdu']==hdu_index))[0][0]
# Get the median ccdskycounts of the exposure
# mask = (ccd['expnum']==skyrun['expnum'][skyrun_index]) & (ccd['ccdname']!='S7') & (ccd['ccdname']!='S7 ') # too slow!
mask = ccd['expnum']==skyrun['expnum'][skyrun_index]
ccdskycounts_median = np.median(ccd['ccdskycounts'][mask])
# print('ccdskycounts_median = {:.4f}'.format(ccdskycounts_median))
# Normalize by ccdskycounts
img = img/ccdskycounts_median
# Apply blob mask
img[~blob] = np.nan
img_list.append(img)
gc.collect()
if len(img_list)==0:
print('There is no available {} CCD'.format(ccdname))
continue
img_median = np.nanmedian(img_list, axis=0)
# Fill in NAN values
mask = ~np.isfinite(img_median)
# print('number of NAN pixels:', np.sum(mask))
img_median[mask] = 0
img_median1 = img_median.copy()
# 3-sigma clipping
sky_nmad = nmad(img_median[np.isfinite(img_median)]) # sky level
# mask = (img_median<-3*sky_nmad) | (img_median>3*sky_nmad)
# img_median1[mask] = 0
mask = (img_median<-3*sky_nmad)
img_median1[mask] = -3*sky_nmad
mask = (img_median>3*sky_nmad)
img_median1[mask] = 3*sky_nmad
if (ccdname=='S7') or ((run in halfed_n10_run_list) and (ccdname=='N10')):
# trim three of edges
trim_size = 20
trim_size_bottom = 1
img_median1 = img_median1[trim_size:(img_median1.shape[0]-trim_size), trim_size:(img_median1.shape[1]-trim_size_bottom)]
# downsize the image to speed up gaussian filter
binsize = 2
img_median1 = np.nanmean(np.nanmean(img_median1.reshape((img_median1.shape[0]//binsize, binsize, img_median1.shape[1]//binsize,-1)), axis=3), axis=1)
x_small_grid = trim_size + binsize/2 + binsize*np.arange(img_median1.shape[1])
y_small_grid = trim_size + binsize/2 + binsize*np.arange(img_median1.shape[0])
elif (band=='r' and skyrun['expnum'][skyrun_index]<298251):
################### r band edge glow exposures ###################
# r-band edge glow
# trim edges
trim_size = 10
extra_trim_size = 50
img_median1 = img_median1[trim_size:(img_median1.shape[0]-trim_size), trim_size:(img_median1.shape[1]-trim_size)]
# The edge glow only appears on one edge and are at opposite edges for top CCDs and bottom CCDs
if ccdnum<=31:
img_median1 = img_median1[:-extra_trim_size]
else:
img_median1 = img_median1[extra_trim_size:]
# downsize the image to speed up gaussian filter
binsize = 2
img_median1 = np.nanmean(np.nanmean(img_median1.reshape((img_median1.shape[0]//binsize, binsize, img_median1.shape[1]//binsize,-1)), axis=3), axis=1)
x_small_grid = trim_size + binsize/2 + binsize*np.arange(img_median1.shape[1])
if ccdnum<=31:
y_small_grid = trim_size + binsize/2 + binsize*np.arange(img_median1.shape[0])
else:
y_small_grid = trim_size + extra_trim_size + binsize/2 + binsize*np.arange(img_median1.shape[0])
else:
################### Normal exposures ####################
# trim edges
trim_size = 10
img_median1 = img_median1[trim_size:(img_median1.shape[0]-trim_size), trim_size:(img_median1.shape[1]-trim_size)]
# downsize the image to speed up gaussian filter
binsize = 2
img_median1 = np.nanmean(np.nanmean(img_median1.reshape((img_median1.shape[0]//binsize, binsize, img_median1.shape[1]//binsize,-1)), axis=3), axis=1)
x_small_grid = trim_size + binsize/2 + binsize*np.arange(img_median1.shape[1])
y_small_grid = trim_size + binsize/2 + binsize*np.arange(img_median1.shape[0])
# Gaussian filtering
img_median1_smooth = gaussian_filter(img_median1, smoothing_scale/binsize, mode='reflect')
# Convert the downsized smooth image to full size
interp_func = interp2d(x_small_grid, y_small_grid, img_median1_smooth, kind='linear')
x_grid, y_grid = np.arange(img_median.shape[1]), np.arange(img_median.shape[0])
img_median_smooth = interp_func(x_grid, y_grid).reshape(img_median.shape)
if (ccdname=='S7') or ((run in halfed_n10_run_list) and (ccdname=='N10')):
# Add back the other half
tmp = np.zeros(img_shape)
half = img_shape[1] // 2
tmp[:, :half] = img_median_smooth
img_median_smooth = tmp
################################ Save sky template ###############################
hdul_template.write(data=img_median_smooth, extname=ccdname, compress='rice')
hdul_raw_stacked.write(data=img_median, extname=ccdname, compress='rice')
# ##################
# end = time.time()
# print('Took {:.1f} seconds'.format(end - start))
# ##################
hdul_template.close()
if diagnostic_touch:
os.remove('/global/u2/r/rongpu/temp/sky_template_being_written/'+os.path.basename(output_path))
def main():
with Pool(processes=n_processes) as pool:
res = pool.map(compute_smooth_sky, run_list)
print('All done!!!!!!!!!!!!!!!')
if __name__=="__main__":
main()
| [
"[email protected]"
] | |
4db2f70125d55dc93257f676a4f151d6e74ffbdc | b8bff6154e548d6135812394f22d6564c40b074b | /flask-aws/bin/bundle_image | d417974d24fa30d6313fcc8108015b0fcc9760b0 | [] | no_license | nygeog/flask-aws-tutorial-nygeog | 0cb18a48dab515abfce9b89d9b6b84e152bd2e40 | f80668e9bdb3eced69c2c1bd50f6f83c37f65ce1 | refs/heads/master | 2021-01-10T05:48:10.904717 | 2015-10-28T03:01:53 | 2015-10-28T03:01:53 | 45,084,168 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,614 | #!/Users/danielmsheehan/GitHub/flask-aws-tutorial-nygeog/flask-aws/bin/python2.7
from boto.manage.server import Server
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser(version="%prog 1.0", usage="Usage: %prog [options] instance-id [instance-id-2]")
# Commands
parser.add_option("-b", "--bucket", help="Destination Bucket", dest="bucket", default=None)
parser.add_option("-p", "--prefix", help="AMI Prefix", dest="prefix", default=None)
parser.add_option("-k", "--key", help="Private Key File", dest="key_file", default=None)
parser.add_option("-c", "--cert", help="Public Certificate File", dest="cert_file", default=None)
parser.add_option("-s", "--size", help="AMI Size", dest="size", default=None)
parser.add_option("-i", "--ssh-key", help="SSH Keyfile", dest="ssh_key", default=None)
parser.add_option("-u", "--user-name", help="SSH Username", dest="uname", default="root")
parser.add_option("-n", "--name", help="Name of Image", dest="name")
(options, args) = parser.parse_args()
for instance_id in args:
try:
s = Server.find(instance_id=instance_id).next()
print "Found old server object"
except StopIteration:
print "New Server Object Created"
s = Server.create_from_instance_id(instance_id, options.name)
assert(s.hostname is not None)
b = s.get_bundler(uname=options.uname)
b.bundle(bucket=options.bucket,prefix=options.prefix,key_file=options.key_file,cert_file=options.cert_file,size=int(options.size),ssh_key=options.ssh_key)
| [
"[email protected]"
] | ||
945d57a53bd0dae91137c9ba1f1efc9b34fe111e | e59e1039469765c35192b1cd6eea2789b49190ba | /nslocalizer/xcodeproj/pbProj/XCVersionGroup.py | 53e60757f370688e3305b35df0c363060f3e69f8 | [
"BSD-3-Clause"
] | permissive | samdmarshall/nslocalizer | db208f166e9c7c8aa7c97d33700943370d1e063b | df086165d9201c98753cdda47bcfa0e517839696 | refs/heads/develop | 2021-01-18T00:45:31.485248 | 2019-04-02T18:00:48 | 2019-04-02T18:00:48 | 63,786,762 | 174 | 12 | NOASSERTION | 2019-02-24T16:27:13 | 2016-07-20T14:10:52 | Python | UTF-8 | Python | false | false | 2,124 | py | # Copyright (c) 2016, Samantha Marshall (http://pewpewthespells.com)
# All rights reserved.
#
# https://github.com/samdmarshall/nslocalizer
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of Samantha Marshall nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
from . import PBX_Constants
from .PBXItem import PBX_Base_Reference
class XCVersionGroup(PBX_Base_Reference):
def __init__(self, identifier, dictionary):
super(self.__class__, self).__init__(identifier, dictionary)
def resolveGraph(self, project):
super(self.__class__, self).resolveGraph(project)
self.resolveGraphNodesForArray(PBX_Constants.kPBX_REFERENCE_children, project)
self.resolveGraphNodeForKey(PBX_Constants.kPBX_XCVersionGroup_currentVersion, project)
| [
"[email protected]"
] | |
596b64221ab3bf09fd36b81245b725ef27edbb7f | ad13583673551857615498b9605d9dcab63bb2c3 | /output/models/nist_data/atomic/float_pkg/schema_instance/nistschema_sv_iv_atomic_float_enumeration_4_xsd/__init__.py | 6b1c7764ed6855c717d67be10234c1e18219fca4 | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 364 | py | from output.models.nist_data.atomic.float_pkg.schema_instance.nistschema_sv_iv_atomic_float_enumeration_4_xsd.nistschema_sv_iv_atomic_float_enumeration_4 import (
NistschemaSvIvAtomicFloatEnumeration4,
NistschemaSvIvAtomicFloatEnumeration4Type,
)
__all__ = [
"NistschemaSvIvAtomicFloatEnumeration4",
"NistschemaSvIvAtomicFloatEnumeration4Type",
]
| [
"[email protected]"
] | |
7cb0f1c9c7ca53419f4d843771e8d05e386fd3dc | add72f4d6f9f7af1f437d19213c14efb218b2194 | /icekit_press_releases/migrations/0010_add_brief.py | 53e0ff6430fc135656f91969f847f38cd1b04409 | [
"MIT"
] | permissive | ic-labs/django-icekit | 6abe859f97c709fcf51207b54778501b50436ff7 | c507ea5b1864303732c53ad7c5800571fca5fa94 | refs/heads/develop | 2022-08-08T21:26:04.144852 | 2018-01-08T02:55:17 | 2018-01-08T02:55:17 | 65,470,395 | 53 | 12 | MIT | 2022-07-06T19:59:39 | 2016-08-11T13:11:02 | Python | UTF-8 | Python | false | false | 700 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('icekit_press_releases', '0009_auto_20170519_1308'),
]
operations = [
migrations.AddField(
model_name='pressrelease',
name='admin_notes',
field=models.TextField(blank=True, help_text=b"Administrator's notes about this content"),
),
migrations.AddField(
model_name='pressrelease',
name='brief',
field=models.TextField(blank=True, help_text=b'A document brief describing the purpose of this content'),
),
]
| [
"[email protected]"
] | |
5b098cc32ed04727d88286884d3a1a759dd4afa0 | ddb185b0cf581d85a1dd733a6d1e5d027ba3e0ca | /phase1/400.py | 877ca618fdd5b1fc536af2f45765a18d49bae79b | [] | no_license | GavinPHR/code | 8a319e1223a307e755211b7e9b34c5abb00b556b | b1d8d49633db362bbab246c0cd4bd28305964b57 | refs/heads/master | 2020-05-16T04:09:19.026207 | 2020-04-30T10:00:06 | 2020-04-30T10:00:06 | 182,766,600 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | # Nth Digit
class Solution:
def findNthDigit(self, n: int) -> int:
i = 0
idx = 9 * (10 ** i) * (i + 1)
while n - idx > 0:
n = n - idx
i += 1
idx = 9 * (10 ** i) * (i + 1)
pos = n % (i + 1)
print(n, i, pos, sep="||")
num = (10 ** i - 1) + n // (i + 1)
print(num)
return int(str(num)[~pos]) if pos == 0 else int(str(num + 1)[pos - 1])
if __name__ == '__main__':
s = Solution()
print(s.findNthDigit(671)) | [
"[email protected]"
] | |
d27b91306820f0c12d76d0b421c286eef8da969d | 722091dafa98a27ed8af830659be1933d6175a34 | /utilities/admin_tools/menu/__init__.py | 8026a1d088396db3eefc653676496ec965218cfd | [
"MIT",
"BSD-3-Clause"
] | permissive | chi1/trinitee | 6d6bdf8ba69fe2d76ff435e71f92e3d4d97bcda0 | 9be0240ccdbf33ce402909dbb6c883950ec943aa | refs/heads/master | 2020-12-29T02:55:51.484042 | 2010-08-17T19:44:54 | 2010-08-17T19:44:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47 | py | from utilities.admin_tools.menu.menus import *
| [
"[email protected]"
] | |
913096e1f4f5d79eb8e22f3e0a46b6ac66e40beb | abf984d4784f593ce617b335029b3efc273c7678 | /school_1329_server/users/migrations/0004_auto_20171228_1822.py | e1781425ab261058467d4a9f8d7b250a1c693513 | [] | no_license | potykion/school_1329_server | 80200cf5ddebfc4f9ac94ef2db19472c1b3cf374 | 7c579c625dc1fae7334117fa6cf078ede38574dc | refs/heads/master | 2021-09-08T18:02:22.092561 | 2018-03-11T16:54:13 | 2018-03-11T16:54:13 | 113,746,164 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 984 | py | # Generated by Django 2.0 on 2017-12-28 15:22
import datetime
from django.db import migrations, models
import django.utils.timezone
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20171226_0152'),
]
operations = [
migrations.AddField(
model_name='temporarypassword',
name='date_created',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AlterField(
model_name='temporarypassword',
name='expiration_date',
field=models.DateTimeField(default=datetime.datetime(2018, 1, 4, 15, 22, 16, 184617, tzinfo=utc)),
),
migrations.AlterField(
model_name='temporarypassword',
name='password_value',
field=models.CharField(default='genGvWwO', max_length=32),
),
]
| [
"[email protected]"
] | |
58b6749b3c5dbb294c5e0d86c3a6b183e28b9eb6 | 4c3a209fa9343dc4d574a3a5cb40a4940f10d866 | /codeup_100/1060.py | ae8cf76cef5c0d0ff34ecd4a6d8d3f6054ba5de2 | [] | no_license | Hyunjae-Kim/Coding-test-tutorial | 44de999f0b854b1d3e2813ced86d8eaa6e6c3ade | cfb278c3b61424b4419bfcf7909b076cc768b683 | refs/heads/master | 2023-01-12T08:27:14.310415 | 2020-11-12T10:21:06 | 2020-11-12T10:21:06 | 305,025,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | num1, num2 = list(map(int, input().split()))
print(num1 & num2) | [
"[email protected]"
] | |
ce879194c5cd487647fb7aebdeb08adbfa36a966 | 973ac85842b01e373c48d161bf46c7c7e0e50227 | /Game5/home.py | d89c7638483f9feca2608c04430c40c0062f7fe7 | [
"MIT"
] | permissive | splin85/Games | b652a050b905a2922849df21ff1262f8dedba6f1 | 41ebdf73e5523be15830334afc12f013b1d60323 | refs/heads/master | 2020-03-28T12:38:45.987129 | 2018-09-11T10:21:39 | 2018-09-11T10:21:39 | 148,318,954 | 1 | 0 | MIT | 2018-09-11T13:09:02 | 2018-09-11T13:09:02 | null | UTF-8 | Python | false | false | 595 | py | # coding: utf-8
# 作者: Charles
# 公众号: Charles的皮卡丘
# 大本营类
import pygame
# 大本营类
class Home(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.homes = ['./images/home/home1.png', './images/home/home2.png', './images/home/home_destroyed.png']
self.home = pygame.image.load(self.homes[0])
self.rect = self.home.get_rect()
self.rect.left, self.rect.top = (3 + 12 * 24, 3 + 24 * 24)
self.alive = True
# 大本营置为摧毁状态
def set_dead(self):
self.home = pygame.image.load(self.homes[-1])
self.alive = False | [
"[email protected]"
] | |
771ad5ac2a34884bcd43a05f69b30b6e6d21a353 | 3197bcf3e80989d0fc13519b2a5689c8b21049d8 | /prototypes/training_scripts/HeLa/feature_net_61x61_dropout_norm3.py | 46da59afb0570adc9516dc823bf0919d3b8b35f8 | [] | no_license | YubinXie/DeepCell | d6a5434dcf5fb208f407caf714d4877ed745a7cd | 887ac6be63e2594c3480680fdde63fbe9dded336 | refs/heads/master | 2021-05-05T16:17:38.178635 | 2018-01-13T20:39:34 | 2018-01-13T20:39:34 | 117,307,949 | 0 | 0 | null | 2018-01-13T02:45:18 | 2018-01-13T02:45:18 | null | UTF-8 | Python | false | false | 1,084 | py | '''Train a simple deep CNN on a HeLa dataset.
GPU run command:
THEANO_FLAGS='mode=FAST_RUN,device=gpu,floatX=float32' python training_template.py
'''
from __future__ import print_function
from keras.optimizers import SGD, RMSprop
from cnn_functions import rate_scheduler, train_model_sample
from model_zoo import feature_net_61x61 as the_model
import os
import datetime
import numpy as np
batch_size = 256
n_classes = 3
n_epoch = 25
model = the_model(n_channels = 2, n_features = 3, reg = 1e-5, drop=0.5)
dataset = "HeLa_all_stdnorm_61x61"
direc_save = "/home/nquach/DeepCell2/trained_networks/"
direc_data = "/home/nquach/DeepCell2/training_data_npz/"
optimizer = RMSprop(lr = 0.001, rho = 0.95, epsilon = 1e-8)
lr_sched = rate_scheduler(lr = 0.001, decay = 0.95)
expt = "feature_net_61x61_dropout"
iterate = 3
train_model_sample(model = model, dataset = dataset, optimizer = optimizer,
expt = expt, it = iterate, batch_size = batch_size, n_epoch = n_epoch,
direc_save = direc_save,
direc_data = direc_data,
lr_sched = lr_sched,
rotate = True, flip = True, shear = 0) | [
"[email protected]"
] | |
df258a208348b017e2c9cf61a8a39e9d1be99432 | 44470a3d1388eddc83e84193813364cdc446f89a | /FinancialSimulator/Config/ConfigInstall.py | 731afffe0e43c6cf368dd2f87069f4048f3bf290 | [
"LicenseRef-scancode-public-domain"
] | permissive | FabriceSalvaire/pyFinancialSimulator | 12671b48ff3596affc5d770058dae41dcdfeecac | ea8380bf1106d597661214b2695681e21d72d259 | refs/heads/master | 2021-01-10T17:16:59.975117 | 2018-04-11T11:41:38 | 2018-04-11T11:41:38 | 43,254,801 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | ####################################################################################################
import os
####################################################################################################
import FinancialSimulator.Tools.Path as PathTools # due to Path class
####################################################################################################
_file_path = PathTools.to_absolute_path(__file__)
_config_module_path = os.path.dirname(_file_path)
_module_path = PathTools.parent_directory_of(_config_module_path)
# Fixme: wrong when installed
_source_directory = PathTools.parent_directory_of(_module_path)
_share_directory = os.path.join(_source_directory, 'share')
class Path:
module_path = _module_path
share_directory = _share_directory
config_directory = os.path.join(share_directory, 'config')
accounting_data_directory = os.path.join(share_directory, 'accounting')
####################################################################################################
class Logging:
default_config_file = 'logging.yml'
directories = (Path.config_directory,)
##############################################
@staticmethod
def find(config_file):
return PathTools.find(config_file, Logging.directories)
| [
"[email protected]"
] | |
757e9edd17b0ee8a7da6e9eebaefa154578833d2 | 0a8ab2ddc9ce3b76cf91484c253f7ec0cfd2bbc7 | /FastBridgeApp/data/Latin/latin_for_americans_vol_1_and_2_ullman-henderson.py | 08cdd23f5e9d4ea6e76819ad52bbb2470821713c | [] | no_license | clangen2/FastBridge | cadb018ca2bf7966b032d4429e98ff774b0bbc9e | c839be0d69b16b89946e40153ada11ad355692b7 | refs/heads/master | 2023-08-28T00:55:09.163609 | 2021-09-27T13:38:50 | 2021-09-27T13:38:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73,063 | py | import text
nan=""
section_words = {'start': -1, '1.15': 390, '1.18': 416, '1.3': 514, '1.31': 532, '1.38': 578, '1.4': 590, '1.52': 702, '1.63': 791, '1.64': 796, '2.1': 26, '2.14': 937, '2.2': 996, '2.3': 59, '2.4': 1180, '2.40': 1187, '2.41': 1191, '2.42': 1196, '2.43': 1207, '2.44': 1213, '2.45': 1219, '2.46': 1223, '2.47': 1229, '2.48': 1232, '2.49': 1236, '2.5': 163, '2.50': 1241, '2.51': 1253, '2.52': 1264, '2.53': 1271, '2.55': 1294, '2.56': 1303, '2.57': 1317, '2.6': 1325, '2.60': 1334, '2.61': 1344, '2.62': 1347, '2.63': 1356, '2.64': 1358, '2.66': 1367, '2.67': 1373, '2.69': 1379, '2.7': 1380, '2.70': 1387, '2.72': 1395, '2.77': 1406, '2.79': 1411, '2.8': 1414, '2.80': 1419, '2.83': 1436, '2.88': 1446, '2.89': 1451, '2.9': 324, '2.90': 1455, '2.94': 1459, '2.97': 1464, '2.99': 1467, '1.1': 348, '1.10': 355, '1.11': 363, '1.12': 369, '1.13': 377, '1.14': 386, '1.16': 398, '1.17': 405, '1.19': 425, '1.2': 437, '1.20': 446, '1.21': 452, '1.22': 462, '1.23': 468, '1.24': 475, '1.25': 480, '1.26': 487, '1.27': 495, '1.28': 501, '1.29': 505, '1.30': 522, '1.32': 540, '1.33': 547, '1.34': 557, '1.35': 563, '1.36': 568, '1.37': 572, '1.39': 582, '1.40': 599, '1.41': 606, '1.42': 615, '1.43': 624, '1.44': 632, '1.45': 643, '1.46': 653, '1.47': 662, '1.48': 668, '1.49': 675, '1.5': 683, '1.50': 689, '1.51': 695, '1.53': 710, '1.54': 719, '1.56': 729, '1.57': 734, '1.58': 740, '1.59': 748, '1.6': 755, '1.60': 763, '1.61': 770, '1.62': 779, '1.65': 805, '1.66': 815, '1.67': 825, '1.68': 835, '1.69': 845, '1.7': 854, '1.70': 861, '1.8': 871, '1.9': 878, '2.10': 889, '2.11': 900, '2.12': 914, '2.13': 926, '2.15': 948, '2.16': 959, '2.17': 968, '2.18': 981, '2.19': 993, '2.20': 1009, '2.21': 1021, '2.22': 1029, '2.23': 1040, '2.24': 1051, '2.25': 1060, '2.26': 1070, '2.27': 1078, '2.28': 1087, '2.29': 1093, '2.30': 1102, '2.31': 1113, '2.32': 1120, '2.33': 1132, '2.34': 1142, '2.35': 1152, '2.36': 1155, '2.37': 1166, '2.38': 1172, '2.39': 1179, '2.54': 1282, '2.58': 1323, '2.71': 1391, '2.74': 1400, '2.81': 1428, '2.82': 1431, '2.84': 1439, '2.85': 1442, 'end': -2}
the_text = [('IN', 0, 'in', '', '', '1_15', 2), ('SVM/1', 1, 'sum', '', '', '1_15', 3), ('VBI/1', 2, 'ubi', '', '', '1_18', 4), ('AMO', 3, 'amo', '', '', '1_3', 2), ('PORTO', 4, 'porto', '', '', '1_3', 2), ('VBI/1', 5, 'ubi', '', '', '1_31', 4), ('QVI/1', 6, 'qui', '', '', '1_38', 2), ('QVIS/1', 7, 'quis', '', '', '1_38', 2), ('SVM/1', 8, 'sum', '', '', '1_4', 3), ('IS', 9, 'is', '', '', '1_52', 3), ('FACILIS', 10, 'facilis', '', '', '1_63', 2), ('VLTIMVS', 11, 'ulterior', '', '', '1_64', 2), ('APPELLO/1', 12, 'appello', '', '', '2_1', 2), ('DOCEO', 13, 'doceo', '', '', '2_1', 2), ('DVRVS', 14, 'durus', '', '', '2_1', 2), ('FILIVS', 15, 'filius', '', '', '2_1', 2), ('HABEO', 16, 'habeo', '', '', '2_1', 2), ('LIBERI', 17, 'liberi', '', '', '2_1', 2), ('NAVIGO', 18, 'navigo', '', '', '2_1', 2), ('NEGOTIVM', 19, 'negotium', '', '', '2_1', 2), ('OBTINEO', 20, 'obtineo', '', '', '2_1', 2), ('OFFICIVM', 21, 'officium', '', '', '2_1', 2), ('POPVLVS/1', 22, 'populus', '', '', '2_1', 2), ('SERVVS/1', 23, 'servus', '', '', '2_1', 2), ('TENEO', 24, 'teneo', '', '', '2_1', 2), ('TIMEO', 25, 'timeo', '', '', '2_1', 2), ('VIR', 26, 'vir', '', '', '2_1', 2), ('ITA', 27, 'ita', '', '', '2_14', 3), ('ACER/2', 28, 'acer', '', '', '2_2', 2), ('CONDICIO', 29, 'condicio', '', '', '2_2', 2), ('DVX', 30, 'dux', '', '', '2_2', 2), ('FRATER', 31, 'frater', '', '', '2_2', 2), ('HVMILIS', 32, 'humilis', '', '', '2_2', 2), ('LVX', 33, 'lux', '', '', '2_2', 2), ('MATER', 34, 'mater', '', '', '2_2', 2), ('NOBILIS/2', 35, 'nobilis', '', '', '2_2', 2), ('NOMEN', 36, 'nomen', '', '', '2_2', 2), ('OMNIS', 37, 'omnis', '', '', '2_2', 2), ('PATER', 38, 'pater', '', '', '2_2', 2), ('POTESTAS', 39, 'potestas', '', '', '2_2', 2), ('RESPONDEO', 40, 'respondeo', '', '', '2_2', 2), ('ROGO', 41, 'rogo', '', '', '2_2', 2), ('SALVS', 42, 'salus', '', '', '2_2', 2), ('SERVO', 43, 'servo', '', '', '2_2', 2), ('SOROR', 44, 'soror', '', '', '2_2', 2), ('AFFICIO', 45, 'afficio', '', '', '2_3', 2), ('AGO', 46, 'ago', '', '', '2_3', 2), ('CIVITAS', 47, 'civitas', '', '', '2_3', 2), ('COGO', 48, 'cogo', '', '', '2_3', 2), ('DO', 49, 'do', '', '', '2_3', 2), ('EGREGIVS', 50, 'egregius', '', '', '2_3', 2), ('FACIO', 51, 'facio', '', '', '2_3', 2), ('FVGA', 52, 'fuga', '', '', '2_3', 2), ('FVGIO', 53, 'fugio', '', '', '2_3', 2), ('GERO', 54, 'gero', '', '', '2_3', 2), ('IVBEO', 55, 'iubeo', '', '', '2_3', 2), ('MVNIO/2', 56, 'munio', '', '', '2_3', 2), ('OCCVPO/2', 57, 'occupo', '', '', '2_3', 2), ('PAX', 58, 'pax', '', '', '2_3', 2), ('VENIO', 59, 'venio', '', '', '2_3', 2), ('AMICVS/1', 60, 'amicus', '', '', '2_4', 2), ('AVCTOR', 61, 'auctor', '', '', '2_4', 2), ('AVTEM', 62, 'autem', '', '', '2_4', 2), ('CLAMO', 63, 'clamo', '', '', '2_4', 2), ('CONFICIO', 64, 'conficio', '', '', '2_4', 3), ('CONTINEO', 65, 'contineo', '', '', '2_4', 2), ('CVPIO', 66, 'cupio', '', '', '2_4', 2), ('DICO/2', 67, 'dico', '', '', '2_4', 2), ('GRATVS', 68, 'gratus', '', '', '2_4', 2), ('HOMO', 69, 'homo', '', '', '2_4', 2), ('LEGO/2', 70, 'lego', '', '', '2_4', 2), ('MANEO', 71, 'maneo', '', '', '2_4', 2), ('NOSCO', 72, 'nosco', '', '', '2_4', 2), ('NOVVS', 73, 'novus', '', '', '2_4', 2), ('SCRIBO', 74, 'scribo', '', '', '2_4', 2), ('VIDEO', 75, 'video', '', '', '2_4', 2), ('POSTERVS', 76, 'posterus', '', '', '2_40', 3), ('PRAEBEO', 77, 'praebeo', '', '', '2_40', 2), ('QVAERO', 78, 'quaero', '', '', '2_40', 2), ('QVIDAM', 79, 'quidam', '', '', '2_40', 2), ('REDEO/1', 80, 'redeo', '', '', '2_40', 3), ('TANTVS', 81, 'tantus', '', '', '2_40', 2), ('VEREOR', 82, 'vereor', '', '', '2_40', 2), ('CIRCITER/2', 83, 'circiter', '', '', '2_41', 2), ('CONSVMO', 84, 'consumo', '', '', '2_41', 2), ('ENIM/2', 85, 'enim', '', '', '2_41', 2), ('INEO/1', 86, 'ineo', '', '', '2_41', 2), ('PEREO/1', 87, 'pereo', '', '', '2_41', 2), ('PROFICISCOR', 88, 'proficiscor', '', '', '2_41', 2), ('SVBITO', 89, 'subito', '', '', '2_41', 3), ('ARBITROR', 90, 'arbitror', '', '', '2_42', 3), ('CASVS', 91, 'casus', '', '', '2_42', 2), ('FAMES', 92, 'fames', '', '', '2_42', 2), ('MOROR/1', 93, 'moror', '', '', '2_42', 3), ('PAENE', 94, 'paene', '', '', '2_42', 2), ('TRADO', 95, 'trado', '', '', '2_42', 2), ('EO/3', 96, 'eo', '', '', '2_43', 2), ('IMPETVS', 97, 'impetus', '', '', '2_43', 2), ('RATIO', 98, 'ratio', '', '', '2_43', 2), ('RVRSVS', 99, 'rursus', '', '', '2_43', 2), ('STATIM', 100, 'statim', '', '', '2_43', 3), ('ALO', 101, 'alo', '', '', '2_44', 2), ('IVNGO', 102, 'iungo', '', '', '2_44', 2), ('NEGLIGO', 103, 'neglego', '', '', '2_44', 3), ('POSTVLO', 104, 'postulo', '', '', '2_44', 3), ('PRAESTO/1', 105, 'praesto', '', '', '2_44', 2), ('APERIO', 106, 'aperio', '', '', '2_45', 2), ('COEPIO', 107, 'coepi', '', '', '2_45', 2), ('IMPERO', 108, 'impero', '', '', '2_45', 2), ('OCCIDO/2', 109, 'occido', '', '', '2_45', 2), ('ORIOR', 110, 'orior', '', '', '2_45', 3), ('QVIES', 111, 'quies', '', '', '2_45', 3), ('SAXVM', 112, 'saxum', '', '', '2_45', 2), ('VESPER', 113, 'vesper', '', '', '2_45', 2), ('ARBOR', 114, 'arbor', '', '', '2_46', 2), ('CONTENDO', 115, 'contendo', '', '', '2_46', 2), ('EXCIPIO', 116, 'excipio', '', '', '2_46', 2), ('HORTOR', 117, 'hortor', '', '', '2_46', 2), ('PASSVS/1', 118, 'passus', '', '', '2_46', 2), ('QVAM/1', 119, 'quam', '', '', '2_46', 2), ('COLLIGO/3', 120, 'colligo', '', '', '2_47', 3), ('CONFICIO', 121, 'conficio', '', '', '2_47', 3), ('CONSPECTVS/1', 122, 'conspectus', '', '', '2_47', 2), ('FALLO', 123, 'fallo', '', '', '2_47', 2), ('LICET/1', 124, 'licet', '', '', '2_47', 2), ('LOQVOR', 125, 'loquor', '', '', '2_47', 3), ('MORA', 126, 'mora', '', '', '2_47', 2), ('OLIM', 127, 'olim', '', '', '2_47', 2), ('PROGREDIOR', 128, 'progredior', '', '', '2_47', 2), ('SEQVOR', 129, 'sequor', '', '', '2_47', 2), ('VEHO', 130, 'veho', '', '', '2_47', 2), ('AETAS', 131, 'aetas', '', '', '2_48', 2), ('IGNIS', 132, 'ignis', '', '', '2_48', 2), ('ITA', 133, 'ita', '', '', '2_48', 3), ('QVANTVS/1', 134, 'quantus', '', '', '2_48', 2), ('SATIS/2', 135, 'satis', '', '', '2_48', 2), ('SPERO', 136, 'spero', '', '', '2_48', 3), ('FEMINA', 137, 'femina', '', '', '2_49', 2), ('INCOLVMIS', 138, 'incolumis', '', '', '2_49', 2), ('OPPRIMO', 139, 'opprimo', '', '', '2_49', 3), ('SENTIO', 140, 'sentio', '', '', '2_49', 3), ('SOL', 141, 'sol', '', '', '2_49', 3), ('VESTIS', 142, 'vestis', '', '', '2_49', 3), ('VXOR', 143, 'uxor', '', '', '2_49', 2), ('AMITTO', 144, 'amitto', '', '', '2_5', 2), ('ANNVS', 145, 'annus', '', '', '2_5', 2), ('AVCTORITAS', 146, 'auctoritas', '', '', '2_5', 2), ('AVDIO', 147, 'audio', '', '', '2_5', 2), ('CLARVS', 148, 'clarus', '', '', '2_5', 2), ('COGNOSCO', 149, 'cognosco', '', '', '2_5', 2), ('GRAVIS', 150, 'gravis', '', '', '2_5', 2), ('HOSTIS', 151, 'hostis', '', '', '2_5', 2), ('INVENIO', 152, 'invenio', '', '', '2_5', 2), ('IVS/1', 153, 'ius', '', '', '2_5', 2), ('LEX', 154, 'lex', '', '', '2_5', 2), ('MANDO/2', 155, 'mando', '', '', '2_5', 2), ('MITTO', 156, 'mitto', '', '', '2_5', 2), ('MODVS', 157, 'modus', '', '', '2_5', 2), ('OPVS/1', 158, 'opus', '', '', '2_5', 2), ('PETO', 159, 'peto', '', '', '2_5', 2), ('PONO', 160, 'pono', '', '', '2_5', 2), ('SCIO', 161, 'scio', '', '', '2_5', 2), ('STO', 162, 'sto', '', '', '2_5', 2), ('VBI/1', 163, 'ubi', '', '', '2_5', 4), ('DIFFERO', 164, 'differo', '', '', '2_50', 2), ('INCOLO/2', 165, 'incolo', '', '', '2_50', 2), ('INITIVM', 166, 'initium', '', '', '2_50', 2), ('MERCATOR', 167, 'mercator', '', '', '2_50', 2), ('ORIOR', 168, 'orior', '', '', '2_50', 3), ('SAEPE', 169, 'saepe', '', '', '2_50', 2), ('SOL', 170, 'sol', '', '', '2_50', 3), ('ANGVSTVS', 171, 'angustus', '', '', '2_51', 2), ('ARBITROR', 172, 'arbitror', '', '', '2_51', 3), ('CONOR', 173, 'conor', '', '', '2_51', 2), ('DELIGO/3', 174, 'deligo', '', '', '2_51', 2), ('DIVIDO', 175, 'divido', '', '', '2_51', 2), ('PERSVADEO', 176, 'persuadeo', '', '', '2_51', 2), ('VNDIQVE', 177, 'undique', '', '', '2_51', 2), ('AEDIFICIVM', 178, 'aedificium', '', '', '2_52', 2), ('CLIENS', 179, 'cliens', '', '', '2_52', 2), ('INCENDO', 180, 'incendo', '', '', '2_52', 2), ('MAGISTRATVS', 181, 'magistratus', '', '', '2_52', 2), ('SVSPICOR', 182, 'suspicor', '', '', '2_52', 2), ('VT/4', 183, 'ut', '', '', '2_52', 2), ('ALTERALTER', 184, 'alter', '', '', '2_53', 2), ('CERTIORFIO', 185, 'certior', '', '', '2_53', 2), ('COMPLVRES/2', 186, 'complures', '', '', '2_53', 2), ('DESISTO', 187, 'desisto', '', '', '2_53', 2), ('FOSSA', 188, 'fossa', '', '', '2_53', 2), ('INTEREA', 189, 'interea', '', '', '2_53', 2), ('NEGO', 190, 'nego', '', '', '2_53', 2), ('NOCTV', 191, 'noctu', '', '', '2_53', 2), ('PONS', 192, 'pons', '', '', '2_53', 2), ('SPES', 193, 'spes', '', '', '2_53', 2), ('ABDO', 194, 'abdo', '', '', '2_55', 2), ('IVGVM', 195, 'iugum', '', '', '2_55', 2), ('NONDVM', 196, 'nondum', '', '', '2_55', 2), ('ASCENDO', 197, 'ascendo', '', '', '2_56', 2), ('CONSIDO', 198, 'consido', '', '', '2_56', 2), ('CONSVESCO', 199, 'consuesco', '', '', '2_56', 2), ('PERITVS', 200, 'peritus', '', '', '2_56', 2), ('FACILE', 201, 'facile', '', '', '2_57', 2), ('ACCEDO/1', 202, 'accedo', '', '', '2_6', 2), ('ASSVM/1', 203, 'adsum', '', '', '2_6', 2), ('AVXILIVM', 204, 'auxilium', '', '', '2_6', 2), ('CLAVDO/1', 205, 'claudo', '', '', '2_6', 2), ('CONSILIVM', 206, 'consilium', '', '', '2_6', 2), ('CRAS/2', 207, 'cras', '', '', '2_6', 2), ('DEBEO', 208, 'debeo', '', '', '2_6', 2), ('DIMITTO', 209, 'dimitto', '', '', '2_6', 2), ('EXSPECTO', 210, 'exspecto', '', '', '2_6', 2), ('INTERFICIO', 211, 'interficio', '', '', '2_6', 2), ('NVNTIO', 212, 'nuntio', '', '', '2_6', 2), ('OPPRIMO', 213, 'opprimo', '', '', '2_6', 3), ('POENA', 214, 'poena', '', '', '2_6', 2), ('PREMO', 215, 'premo', '', '', '2_6', 2), ('PROBO', 216, 'probo', '', '', '2_6', 2), ('PVTO', 217, 'puto', '', '', '2_6', 2), ('SENTENTIA', 218, 'sententia', '', '', '2_6', 2), ('SENTIO', 219, 'sentio', '', '', '2_6', 3), ('TARDVS', 220, 'tardus', '', '', '2_6', 2), ('VOCO', 221, 'voco', '', '', '2_6', 2), ('VOX', 222, 'vox', '', '', '2_6', 2), ('CONSVETVDO', 223, 'consuetudo', '', '', '2_60', 2), ('CRVCIATVS', 224, 'cruciatus', '', '', '2_60', 2), ('LOQVOR', 225, 'loquor', '', '', '2_60', 3), ('PLACEO', 226, 'placeo', '', '', '2_61', 2), ('PRAETEREA', 227, 'praeterea', '', '', '2_61', 2), ('HVC', 228, 'huc', '', '', '2_62', 3), ('PENDO', 229, 'pendo', '', '', '2_62', 2), ('STIPENDIVM', 230, 'stipendium', '', '', '2_62', 2), ('VTOR', 231, 'utor', '', '', '2_62', 2), ('CONIVRO', 232, 'coniuro', '', '', '2_63', 2), ('CONSENTIO', 233, 'consentio', '', '', '2_63', 2), ('DEFERO', 234, 'defero', '', '', '2_64', 2), ('INTRA/2', 235, 'intra', '', '', '2_64', 2), ('POSTVLO', 236, 'postulo', '', '', '2_64', 3), ('PROPTER/2', 237, 'propter', '', '', '2_64', 2), ('REPERIO', 238, 'reperio', '', '', '2_64', 2), ('COLLIS', 239, 'collis', '', '', '2_66', 2), ('COTIDIE', 240, 'cotidie', '', '', '2_66', 2), ('IDONEVS', 241, 'idoneus', '', '', '2_66', 2), ('INFERIOR', 242, 'inferior', '', '', '2_66', 2), ('MOROR/1', 243, 'moror', '', '', '2_66', 3), ('PALVS/2', 244, 'palus', '', '', '2_66', 2), ('PRAESVM/1', 245, 'praesum', '', '', '2_66', 2), ('APPROPINQVO', 246, 'appropinquo', '', '', '2_67', 2), ('INIQVVS/2', 247, 'iniquus', '', '', '2_67', 2), ('INSIDIAE', 248, 'insidiae', '', '', '2_67', 2), ('OPS', 249, 'ops', '', '', '2_69', 2), ('PROICIO', 250, 'proicio', '', '', '2_69', 2), ('SIC', 251, 'sic', '', '', '2_69', 2), ('CADO', 252, 'cado', '', '', '2_7', 2), ('CELER', 253, 'celer', '', '', '2_7', 2), ('CVRRO', 254, 'curro', '', '', '2_7', 2), ('EXPELLO', 255, 'expello', '', '', '2_7', 2), ('MATERIA', 256, 'materia', '', '', '2_7', 2), ('MENSIS', 257, 'mensis', '', '', '2_7', 2), ('MVNVS', 258, 'munus', '', '', '2_7', 2), ('NEMO', 259, 'nemo', '', '', '2_7', 2), ('NVMERVS', 260, 'numerus', '', '', '2_7', 2), ('OSTENDO', 261, 'ostendo', '', '', '2_7', 2), ('PES', 262, 'pes', '', '', '2_7', 2), ('POSSVM/1', 263, 'possum', '', '', '2_7', 2), ('PROPERO', 264, 'propero', '', '', '2_7', 2), ('RAPIO', 265, 'rapio', '', '', '2_7', 2), ('SEMPER', 266, 'semper', '', '', '2_7', 2), ('SIMILIS', 267, 'similis', '', '', '2_7', 2), ('SPECTO', 268, 'specto', '', '', '2_7', 2), ('TEMPVS/1', 269, 'tempus', '', '', '2_7', 2), ('VERVS', 270, 'verus', '', '', '2_7', 2), ('VESTIS', 271, 'vestis', '', '', '2_7', 3), ('CONSPICIO', 272, 'conspicio', '', '', '2_70', 2), ('OPORTET', 273, 'oportet', '', '', '2_70', 2), ('PERTVRBO/2', 274, nan, '', '', '2_70', 2), ('OPERA', 275, 'opera', '', '', '2_72', 2), ('VITO', 276, 'vito', '', '', '2_72', 2), ('ALACER', 277, 'alacer', '', '', '2_77', 2), ('EXPVGNO', 278, 'expugno', '', '', '2_77', 2), ('NECO', 279, 'neco', '', '', '2_77', 2), ('STATIM', 280, 'statim', '', '', '2_77', 3), ('VENTVS', 281, 'ventus', '', '', '2_77', 2), ('AMPLVS', 282, 'amplus', '', '', '2_79', 2), ('EQVES', 283, 'eques', '', '', '2_79', 2), ('NASCOR', 284, 'nascor', '', '', '2_79', 2), ('TVTVS', 285, 'tutus', '', '', '2_79', 2), ('ACCIDO/1', 286, 'accido', '', '', '2_8', 2), ('CAPIO/2', 287, 'capio', '', '', '2_8', 2), ('CONSVLO', 288, 'consulo', '', '', '2_8', 2), ('EFFICIO', 289, 'efficio', '', '', '2_8', 2), ('FLVMEN', 290, 'flumen', '', '', '2_8', 2), ('GENVS/1', 291, 'genus', '', '', '2_8', 2), ('IDEM', 292, 'idem', '', '', '2_8', 2), ('IPSE', 293, 'ipse', '', '', '2_8', 2), ('IS', 294, 'is', '', '', '2_8', 3), ('ITER', 295, 'iter', '', '', '2_8', 2), ('MILES', 296, 'miles', '', '', '2_8', 2), ('MONEO', 297, 'moneo', '', '', '2_8', 2), ('TERREO', 298, 'terreo', '', '', '2_8', 2), ('VALEO', 299, 'valeo', '', '', '2_8', 2), ('VINCO', 300, 'vinco', '', '', '2_8', 2), ('VTILIS', 301, 'utilis', '', '', '2_8', 2), ('BREVIS', 302, 'brevis', '', '', '2_80', 2), ('MATVRVS', 303, 'maturus', '', '', '2_80', 2), ('CLAM/1', 304, 'clam', '', '', '2_83', 2), ('COMPLEO', 305, 'compleo', '', '', '2_83', 2), ('PROPE/2', 306, 'prope', '', '', '2_83', 2), ('SVBITO', 307, 'subito', '', '', '2_83', 3), ('COLLIGO/3', 308, 'colligo', '', '', '2_88', 3), ('NONSOLVM', 309, 'non', '', '', '2_88', 2), ('REDEO/1', 310, 'redeo', '', '', '2_88', 3), ('ADMITTO', 311, 'admitto', '', '', '2_89', 2), ('CAEDES', 312, 'caedes', '', '', '2_89', 2), ('HVC', 313, 'huc', '', '', '2_89', 3), ('MORIOR', 314, 'morior', '', '', '2_89', 2), ('ADDVCO', 315, 'adduco', '', '', '2_9', 2), ('ARMA', 316, 'arma', '', '', '2_9', 2), ('CIVIS', 317, 'civis', '', '', '2_9', 2), ('CONFIRMO', 318, 'confirmo', '', '', '2_9', 2), ('GENS', 319, 'gens', '', '', '2_9', 2), ('MONS', 320, 'mons', '', '', '2_9', 2), ('PERICVLVM', 321, 'periculum', '', '', '2_9', 2), ('PROCEDO/1', 322, 'procedo', '', '', '2_9', 2), ('SPERO', 323, 'spero', '', '', '2_9', 3), ('STVDIVM', 324, 'studium', '', '', '2_9', 2), ('EFFERO/2', 325, 'effero', '', '', '2_90', 2), ('NEGLIGO', 326, 'neglego', '', '', '2_90', 3), ('CONSTAT', 327, 'constat', '', '', '2_94', 2), ('CORNV', 328, 'cornu', '', '', '2_94', 2), ('PARCO', 329, 'parco', '', '', '2_94', 2), ('QVIES', 330, 'quies', '', '', '2_94', 3), ('EDITVS/2', 331, 'editus', '', '', '2_97', 2), ('OPPORTVNVS', 332, 'opportunus', '', '', '2_97', 2), ('PRAESERTIM', 333, 'praesertim', '', '', '2_97', 2), ('PRISTINVS', 334, 'pristinus', '', '', '2_97', 2), ('CIBVS', 335, 'cibus', '', '', '2_99', 2), ('FLEO', 336, 'fleo', '', '', '2_99', 2), ('POSTEAQVAM', 337, 'posteaquam', '', '', '2_99', 2), ('POSTERVS', 338, 'posterus', '', '', '2_99', 3), ('PREX', 339, 'prex', '', '', '2_99', 2), ('TVRRIS', 340, 'turris', '', '', '2_99', 2), ('AQVA', 341, 'aqua', '', '', '1_1', 1), ('ET/2', 342, 'et', '', '', '1_1', 1), ('INSVLA', 343, 'insula', '', '', '1_1', 1), ('NON', 344, 'non', '', '', '1_1', 1), ('SED', 345, 'sed', '', '', '1_1', 1), ('SILVA', 346, 'silva', '', '', '1_1', 1), ('SVM/1', 347, 'sum', '', '', '1_1', 3), ('VIA', 348, 'via', '', '', '1_1', 1), ('AVGEO', 349, 'augeo', '', '', '1_10', 1), ('DISCIPLINA', 350, 'disciplina', '', '', '1_10', 1), ('DOCEO', 351, 'doceo', '', '', '1_10', 2), ('HABEO', 352, 'habeo', '', '', '1_10', 2), ('LINGVA', 353, 'lingua', '', '', '1_10', 1), ('PATRIA', 354, 'patria', '', '', '1_10', 1), ('TERREO', 355, 'terreo', '', '', '1_10', 2), ('AMICITIA', 356, 'amicitia', '', '', '1_11', 1), ('GLORIA', 357, 'gloria', '', '', '1_11', 1), ('GRATIA', 358, 'gratia', '', '', '1_11', 1), ('IBI', 359, 'ibi', '', '', '1_11', 1), ('IN', 360, 'in', '', '', '1_11', 2), ('MANEO', 361, 'maneo', '', '', '1_11', 2), ('MATERIA', 362, 'materia', '', '', '1_11', 2), ('VIDEO', 363, 'video', '', '', '1_11', 2), ('ALTVS', 364, 'altus', '', '', '1_12', 1), ('CASA', 365, 'casa', '', '', '1_12', 1), ('LIBERO', 366, 'libero', '', '', '1_12', 1), ('MEREO', 367, 'mereo', '', '', '1_12', 1), ('SOCIVS/1', 368, 'socius', '', '', '1_12', 1), ('TENEO', 369, 'teneo', '', '', '1_12', 2), ('AB', 370, 'a', '', '', '1_13', 1), ('CAPTIVVS/1', 371, 'captivus', '', '', '1_13', 1), ('CENA', 372, 'cena', '', '', '1_13', 1), ('DE', 373, 'de', '', '', '1_13', 1), ('EX', 374, 'e', '', '', '1_13', 1), ('MOVEO', 375, 'moveo', '', '', '1_13', 1), ('SINGVLVS', 376, 'singuli', '', '', '1_13', 1), ('VOCO', 377, 'voco', '', '', '1_13', 2), ('AGER', 378, 'ager', '', '', '1_14', 1), ('AMICVS/2', 379, 'amicus', '', '', '1_14', 1), ('FILIVS', 380, 'filius', '', '', '1_14', 2), ('LIBER/2', 381, 'liber', '', '', '1_14', 1), ('MAGISTER', 382, 'magister', '', '', '1_14', 1), ('NOSTER', 383, 'noster', '', '', '1_14', 1), ('PVER', 384, 'puer', '', '', '1_14', 1), ('SACER', 385, 'sacer', '', '', '1_14', 1), ('VIR', 386, 'vir', '', '', '1_14', 2), ('ANIMVS', 387, 'animus', '', '', '1_15', 1), ('COLONVS/1', 388, 'colonus', '', '', '1_15', 1), ('HABITO', 389, 'habito', '', '', '1_15', 1), ('MIGRO', 390, 'migro', '', '', '1_15', 1), ('ANNVS', 391, 'annus', '', '', '1_16', 2), ('BARBARVS/2', 392, 'barbarus', '', '', '1_16', 1), ('CASTRA/2', 393, 'castra', '', '', '1_16', 1), ('CONSILIVM', 394, 'consilium', '', '', '1_16', 2), ('EVOCO', 395, 'evoco', '', '', '1_16', 1), ('FRVMENTVM', 396, 'frumentum', '', '', '1_16', 1), ('PRAEMIVM', 397, 'praemium', '', '', '1_16', 1), ('SIGNVM', 398, 'signum', '', '', '1_16', 1), ('ARMA', 399, 'arma', '', '', '1_17', 2), ('AVXILIVM', 400, 'auxilium', '', '', '1_17', 2), ('BELLVM', 401, 'bellum', '', '', '1_17', 1), ('CONCORDIA', 402, 'concordia', '', '', '1_17', 1), ('DEBEO', 403, 'debeo', '', '', '1_17', 2), ('MATVRO', 404, 'maturo', '', '', '1_17', 1), ('NVNTIVS/1', 405, 'nuntius', '', '', '1_17', 1), ('AEQVVS', 406, 'aequus', '', '', '1_18', 1), ('CAVSA', 407, 'causa', '', '', '1_18', 1), ('DOMINVS', 408, 'dominus', '', '', '1_18', 1), ('LATVS/2', 409, 'latus', '', '', '1_18', 1), ('NE/2', 410, 'ne', '', '', '1_18', 1), ('OPPIDVM', 411, 'oppidum', '', '', '1_18', 1), ('POPVLVS/1', 412, 'populus', '', '', '1_18', 2), ('PVBLICVS/2', 413, 'publicus', '', '', '1_18', 1), ('QVIS/1', 414, 'quis', '', '', '1_18', 2), ('VERVS', 415, 'verus', '', '', '1_18', 2), ('VESTER', 416, 'vester', '', '', '1_18', 1), ('ACCEDO/1', 417, 'accedo', '', '', '1_19', 2), ('AGO', 418, 'ago', '', '', '1_19', 2), ('CEDO/1', 419, 'cedo', '', '', '1_19', 1), ('DEFENDO', 420, 'defendo', '', '', '1_19', 1), ('EXCEDO/1', 421, 'excedo', '', '', '1_19', 1), ('EXSPECTO', 422, 'exspecto', '', '', '1_19', 2), ('MITTO', 423, 'mitto', '', '', '1_19', 2), ('OFFICIVM', 424, 'officium', '', '', '1_19', 2), ('PONO', 425, 'pono', '', '', '1_19', 2), ('AMO', 426, 'amo', '', '', '1_2', 2), ('BONVS', 427, 'bonus', '', '', '1_2', 1), ('DVRVS', 428, 'durus', '', '', '1_2', 2), ('FAMA', 429, 'fama', '', '', '1_2', 1), ('FAMILIA', 430, 'familia', '', '', '1_2', 1), ('FORTVNA', 431, 'fortuna', '', '', '1_2', 1), ('MAGNVS', 432, 'magnus', '', '', '1_2', 1), ('PARVVS/2', 433, 'parvus', '', '', '1_2', 1), ('PORTO', 434, 'porto', '', '', '1_2', 2), ('PVELLA', 435, 'puella', '', '', '1_2', 1), ('TERRA', 436, 'terra', '', '', '1_2', 1), ('VITA', 437, 'vita', '', '', '1_2', 1), ('ACCIPIO', 438, 'accipio', '', '', '1_20', 1), ('CAPIO/2', 439, 'capio', '', '', '1_20', 2), ('FACIO', 440, 'facio', '', '', '1_20', 2), ('INVENIO', 441, 'invenio', '', '', '1_20', 2), ('MVNIO/2', 442, 'munio', '', '', '1_20', 2), ('PVLCHER', 443, 'pulcher', '', '', '1_20', 1), ('REGO', 444, 'rego', '', '', '1_20', 1), ('TEMPLVM', 445, 'templum', '', '', '1_20', 1), ('VENIO', 446, 'venio', '', '', '1_20', 2), ('DVCO', 447, 'duco', '', '', '1_21', 1), ('EFFICIO', 448, 'efficio', '', '', '1_21', 2), ('LOCVS', 449, 'locus', '', '', '1_21', 1), ('NARRO', 450, 'narro', '', '', '1_21', 1), ('PRETIVM', 451, 'pretium', '', '', '1_21', 1), ('TERMINVS', 452, 'terminus', '', '', '1_21', 1), ('COMMODVS', 453, 'commodus', '', '', '1_22', 1), ('DEA', 454, 'dea', '', '', '1_22', 1), ('DEVS', 455, 'deus', '', '', '1_22', 1), ('DICO/2', 456, 'dico', '', '', '1_22', 2), ('FVGIO', 457, 'fugio', '', '', '1_22', 2), ('ITA', 458, 'ita', '', '', '1_22', 3), ('OTIVM', 459, 'otium', '', '', '1_22', 1), ('STVDIVM', 460, 'studium', '', '', '1_22', 2), ('VALEO', 461, 'valeo', '', '', '1_22', 2), ('VARIVS', 462, 'varius', '', '', '1_22', 1), ('AFFICIO', 463, 'afficio', '', '', '1_23', 2), ('CVM/2', 464, 'cum', '', '', '1_23', 1), ('FIRMVS', 465, 'firmus', '', '', '1_23', 1), ('GERO', 466, 'gero', '', '', '1_23', 2), ('INCIPIO', 467, 'incipio', '', '', '1_23', 1), ('PERPETVVS', 468, 'perpetuus', '', '', '1_23', 1), ('AVDIO', 469, 'audio', '', '', '1_24', 2), ('CONTINEO', 470, 'contineo', '', '', '1_24', 2), ('LIBER/1', 471, 'liber', '', '', '1_24', 1), ('PLENVS', 472, 'plenus', '', '', '1_24', 1), ('TARDVS', 473, 'tardus', '', '', '1_24', 2), ('TRAHO', 474, 'traho', '', '', '1_24', 1), ('VERBVM', 475, 'verbum', '', '', '1_24', 1), ('CONVENIO', 476, 'convenio', '', '', '1_25', 1), ('CONVOCO', 477, 'convoco', '', '', '1_25', 1), ('MEDIVS', 478, 'medius', '', '', '1_25', 1), ('REDVCO', 479, 'reduco', '', '', '1_25', 1), ('SENTENTIA', 480, 'sententia', '', '', '1_25', 2), ('LEGO/2', 481, 'lego', '', '', '1_26', 2), ('NVMNE', 482, 'numne', '', '', '1_26', 1), ('NVMQVAM', 483, 'numquam', '', '', '1_26', 1), ('POETA', 484, 'poeta', '', '', '1_26', 1), ('QVE', 485, 'que', '', '', '1_26', 1), ('RELIQVVS', 486, 'reliquus', '', '', '1_26', 1), ('SCRIBO', 487, 'scribo', '', '', '1_26', 2), ('COMMITTO', 488, 'committo', '', '', '1_27', 1), ('FINITIMI', 489, 'finitimus', '', '', '1_27', 1), ('FINITIMVS', 490, 'finitimus', '', '', '1_27', 1), ('PAVCI', 491, 'paucus', '', '', '1_27', 1), ('PROELIVM', 492, 'proelium', '', '', '1_27', 1), ('PROELIVMCOMMITTERE', 493, 'proelium', '', '', '1_27', 1), ('PROPERO', 494, 'propero', '', '', '1_27', 2), ('VENTVS', 495, 'ventus', '', '', '1_27', 2), ('APPELLO/1', 496, 'appello', '', '', '1_28', 2), ('AVT', 497, 'aut', '', '', '1_28', 1), ('ETET', 498, 'et', '', '', '1_28', 1), ('NEQVE', 499, 'neque', '', '', '1_28', 1), ('NEQVENEC', 500, 'neque', '', '', '1_28', 1), ('PRAESIDIVM', 501, 'praesidium', '', '', '1_28', 1), ('AMITTO', 502, 'amitto', '', '', '1_29', 2), ('MISER', 503, 'miser', '', '', '1_29', 1), ('PERICVLVM', 504, 'periculum', '', '', '1_29', 2), ('PRO/1', 505, 'pro', '', '', '1_29', 1), ('AGRICOLA', 506, 'agricola', '', '', '1_3', 1), ('LABORO', 507, 'laboro', '', '', '1_3', 1), ('LONGVS', 508, 'longus', '', '', '1_3', 1), ('MINIME', 509, 'minime', '', '', '1_3', 1), ('MVLTVS', 510, 'multus', '', '', '1_3', 1), ('NOVVS', 511, 'novus', '', '', '1_3', 2), ('PARO/2', 512, 'paro', '', '', '1_3', 1), ('SIC', 513, 'sic', '', '', '1_3', 2), ('SPECTO', 514, 'specto', '', '', '1_3', 2), ('ABSVM/1', 515, 'absum', '', '', '1_30', 1), ('IAM', 516, 'iam', '', '', '1_30', 1), ('INIMICVS/1', 517, 'inimicus', '', '', '1_30', 1), ('NOSCO', 518, 'nosco', '', '', '1_30', 2), ('PROCEDO/1', 519, 'procedo', '', '', '1_30', 2), ('PRODVCO', 520, 'produco', '', '', '1_30', 1), ('RETENTO/1', 521, 'retento', '', '', '1_30', 1), ('SINE', 522, 'sine', '', '', '1_30', 1), ('CVPIO', 523, 'cupio', '', '', '1_31', 2), ('DIMITTO', 524, 'dimitto', '', '', '1_31', 2), ('DVBITO', 525, 'dubito', '', '', '1_31', 1), ('EGO', 526, 'ego', '', '', '1_31', 1), ('INTEGER', 527, 'integer', '', '', '1_31', 1), ('IS', 528, 'is', '', '', '1_31', 3), ('NOS', 529, 'nos', '', '', '1_31', 1), ('OLIM', 530, 'olim', '', '', '1_31', 2), ('TV', 531, 'tu', '', '', '1_31', 1), ('VOS', 532, 'vos', '', '', '1_31', 1), ('DISCEDO/1', 533, 'discedo', '', '', '1_32', 1), ('ETIAM', 534, 'etiam', '', '', '1_32', 1), ('INPROVINCIAMREDIGERE', 535, 'in', '', '', '1_32', 1), ('IVBEO', 536, 'iubeo', '', '', '1_32', 2), ('PORTA', 537, 'porta', '', '', '1_32', 1), ('RECIPIO', 538, 'recipio', '', '', '1_32', 1), ('REDIGO', 539, 'redigo', '', '', '1_32', 1), ('REMOVEO', 540, 'removeo', '', '', '1_32', 1), ('BENEFICIVM', 541, 'beneficium', '', '', '1_33', 1), ('EGREGIVS', 542, 'egregius', '', '', '1_33', 2), ('EXEMPLVM', 543, 'exemplum', '', '', '1_33', 1), ('PER', 544, 'per', '', '', '1_33', 1), ('PROPONO', 545, 'propono', '', '', '1_33', 1), ('SVB', 546, 'sub', '', '', '1_33', 1), ('SVSCIPIO', 547, 'suscipio', '', '', '1_33', 1), ('ASSVM/1', 548, 'adsum', '', '', '1_34', 2), ('EDVCO/2', 549, 'educo', '', '', '1_34', 1), ('INTERFICIO', 550, 'interficio', '', '', '1_34', 2), ('LIBERI', 551, 'liberi', '', '', '1_34', 2), ('PERMITTO', 552, 'permitto', '', '', '1_34', 1), ('PRIMVS', 553, 'primus', '', '', '1_34', 1), ('REMANEO', 554, 'remaneo', '', '', '1_34', 1), ('SVPERBIA', 555, 'superbia', '', '', '1_34', 1), ('SVPERBVS/2', 556, 'superbus', '', '', '1_34', 1), ('TAMEN', 557, 'tamen', '', '', '1_34', 1), ('DILIGENTIA', 558, 'diligentia', '', '', '1_35', 1), ('DO', 559, 'do', '', '', '1_35', 2), ('INTER', 560, 'inter', '', '', '1_35', 1), ('LVDVS', 561, 'ludus', '', '', '1_35', 1), ('OB', 562, 'ob', '', '', '1_35', 1), ('SVBMITTO', 563, 'sumitto', '', '', '1_35', 1), ('CVR/1', 564, 'cur', '', '', '1_36', 1), ('NATVRA', 565, 'natura', '', '', '1_36', 1), ('PETO', 566, 'peto', '', '', '1_36', 2), ('QVI/1', 567, 'qui', '', '', '1_36', 2), ('SVSTINEO', 568, 'sustineo', '', '', '1_36', 1), ('CONSERVO', 569, 'conservo', '', '', '1_37', 1), ('INTERMITTO', 570, 'intermitto', '', '', '1_37', 1), ('OBTINEO', 571, 'obtineo', '', '', '1_37', 2), ('PERMOVEO', 572, 'permoveo', '', '', '1_37', 1), ('AC/1', 573, 'ac', '', '', '1_38', 1), ('ATQVE/1', 574, 'atque', '', '', '1_38', 1), ('COGO', 575, 'cogo', '', '', '1_38', 2), ('MODVS', 576, 'modus', '', '', '1_38', 2), ('MONEO', 577, 'moneo', '', '', '1_38', 2), ('NAM', 578, 'nam', '', '', '1_38', 1), ('ANTE/2', 579, 'ante', '', '', '1_39', 1), ('COGNOSCO', 580, 'cognosco', '', '', '1_39', 2), ('PLICO', 581, 'plico', '', '', '1_39', 1), ('TRANS/2', 582, 'trans', '', '', '1_39', 1), ('CARRVS', 583, 'carrus', '', '', '1_4', 1), ('EQVVS', 584, 'equus', '', '', '1_4', 1), ('LAVDO', 585, 'laudo', '', '', '1_4', 1), ('MALVS/3', 586, 'malus', '', '', '1_4', 1), ('NVNC', 587, 'nunc', '', '', '1_4', 1), ('QVOD/1', 588, 'quod', '', '', '1_4', 1), ('SERVVS/1', 589, 'servus', '', '', '1_4', 2), ('VBI/1', 590, 'ubi', '', '', '1_4', 4), ('CLAMO', 591, 'clamo', '', '', '1_40', 2), ('DVX', 592, 'dux', '', '', '1_40', 2), ('HOMO', 593, 'homo', '', '', '1_40', 2), ('LEX', 594, 'lex', '', '', '1_40', 2), ('MILES', 595, 'miles', '', '', '1_40', 2), ('PAX', 596, 'pax', '', '', '1_40', 2), ('PREMO', 597, 'premo', '', '', '1_40', 2), ('SALVS', 598, 'salus', '', '', '1_40', 2), ('VVLNERO', 599, 'vulnero', '', '', '1_40', 1), ('ANTECEDO/1', 600, 'antecedo', '', '', '1_41', 1), ('FORTASSE', 601, 'fortasse', '', '', '1_41', 1), ('RELINQVO', 602, 'relinquo', '', '', '1_41', 1), ('STO', 603, 'sto', '', '', '1_41', 2), ('TRADVCO', 604, 'traduco', '', '', '1_41', 1), ('TRANSPORTO', 605, 'transporto', '', '', '1_41', 1), ('VIVO', 606, 'vivo', '', '', '1_41', 1), ('CERNO', 607, 'cerno', '', '', '1_42', 1), ('CERTVS', 608, 'certus', '', '', '1_42', 1), ('COMMOVEO', 609, 'commoveo', '', '', '1_42', 1), ('FACTVM', 610, 'factum', '', '', '1_42', 1), ('NOTVS/2', 611, 'notus', '', '', '1_42', 1), ('PARATVS/2', 612, 'paratus', '', '', '1_42', 1), ('POSSVM/1', 613, 'possum', '', '', '1_42', 2), ('ROGO', 614, 'rogo', '', '', '1_42', 2), ('TERTIVS', 615, 'tertius', '', '', '1_42', 1), ('ADDVCO', 616, 'adduco', '', '', '1_43', 2), ('CONSVLO', 617, 'consulo', '', '', '1_43', 2), ('FVGA', 618, 'fuga', '', '', '1_43', 2), ('INFVGAMDO', 619, 'in', '', '', '1_43', 1), ('NONIAM', 620, 'non', '', '', '1_43', 1), ('PROVIDEO', 621, 'provideo', '', '', '1_43', 1), ('SPATIVM', 622, 'spatium', '', '', '1_43', 1), ('TIMIDVS', 623, 'timidus', '', '', '1_43', 1), ('VLTIMVS', 624, 'ulterior', '', '', '1_43', 2), ('EXPEDIO', 625, 'expedio', '', '', '1_44', 1), ('IMPEDIMENTVM', 626, 'impedimentum', '', '', '1_44', 1), ('IMPEDIO', 627, 'impedio', '', '', '1_44', 1), ('LIGO/2', 628, 'ligo', '', '', '1_44', 1), ('PES', 629, 'pes', '', '', '1_44', 2), ('REGNVM', 630, 'regnum', '', '', '1_44', 1), ('REX', 631, 'rex', '', '', '1_44', 1), ('VERTO', 632, 'verto', '', '', '1_44', 1), ('CAPVT', 633, 'caput', '', '', '1_45', 1), ('CARMEN/1', 634, 'carmen', '', '', '1_45', 1), ('CLAMOR', 635, 'clamor', '', '', '1_45', 1), ('CLAVDO/1', 636, 'claudo', '', '', '1_45', 2), ('CORPVS', 637, 'corpus', '', '', '1_45', 1), ('FLVMEN', 638, 'flumen', '', '', '1_45', 2), ('IACIO', 639, 'iacio', '', '', '1_45', 1), ('NOMEN', 640, 'nomen', '', '', '1_45', 2), ('ORDO', 641, 'ordo', '', '', '1_45', 1), ('TEMPVS/1', 642, 'tempus', '', '', '1_45', 2), ('VVLNVS', 643, 'vulnus', '', '', '1_45', 1), ('CIVIS', 644, 'civis', '', '', '1_46', 2), ('CONFICIO', 645, 'conficio', '', '', '1_46', 3), ('FINIS', 646, 'finis', '', '', '1_46', 1), ('HOSTIS', 647, 'hostis', '', '', '1_46', 2), ('ITER', 648, 'iter', '', '', '1_46', 2), ('MARE', 649, 'mare', '', '', '1_46', 1), ('MONS', 650, 'mons', '', '', '1_46', 2), ('NAVIS', 651, 'navis', '', '', '1_46', 1), ('PASTOR', 652, 'pastor', '', '', '1_46', 1), ('VESTIS', 653, 'vestis', '', '', '1_46', 3), ('CELER', 654, 'celer', '', '', '1_47', 2), ('FACILIS', 655, 'facilis', '', '', '1_47', 2), ('FORTIS', 656, 'fortis', '', '', '1_47', 1), ('IVS/1', 657, 'ius', '', '', '1_47', 2), ('LIBERTAS', 658, 'libertas', '', '', '1_47', 1), ('OMNIS', 659, 'omnis', '', '', '1_47', 2), ('PAR/1', 660, 'par', '', '', '1_47', 1), ('TENDO', 661, 'tendo', '', '', '1_47', 1), ('VXOR', 662, 'uxor', '', '', '1_47', 2), ('AVCTOR', 663, 'auctor', '', '', '1_48', 2), ('CIVITAS', 664, 'civitas', '', '', '1_48', 2), ('PATER', 665, 'pater', '', '', '1_48', 2), ('PELLO', 666, 'pello', '', '', '1_48', 1), ('POST/2', 667, 'post', '', '', '1_48', 1), ('POSTEA', 668, 'postea', '', '', '1_48', 1), ('AVCTORITAS', 669, 'auctoritas', '', '', '1_49', 2), ('CELERITAS', 670, 'celeritas', '', '', '1_49', 1), ('CONFIRMO', 671, 'confirmo', '', '', '1_49', 2), ('FAMILIARIS/2', 672, 'familiaris', '', '', '1_49', 1), ('RESPONDEO', 673, 'respondeo', '', '', '1_49', 2), ('SVPERO', 674, 'supero', '', '', '1_49', 1), ('VIRTVS', 675, 'virtus', '', '', '1_49', 1), ('CIBVS', 676, 'cibus', '', '', '1_5', 2), ('CLARVS', 677, 'clarus', '', '', '1_5', 2), ('COPIA', 678, 'copia', '', '', '1_5', 1), ('CVRA', 679, 'cura', '', '', '1_5', 1), ('FORMA', 680, 'forma', '', '', '1_5', 1), ('NVMERVS', 681, 'numerus', '', '', '1_5', 2), ('PLANVS/2', 682, 'planus', '', '', '1_5', 1), ('REGINA', 683, 'regina', '', '', '1_5', 1), ('CVRRO', 684, 'curro', '', '', '1_50', 2), ('EXPELLO', 685, 'expello', '', '', '1_50', 2), ('HIC/1', 686, 'hic', '', '', '1_50', 1), ('ILLE', 687, 'ille', '', '', '1_50', 1), ('MATER', 688, 'mater', '', '', '1_50', 2), ('PRAECEPS/2', 689, 'praeceps', '', '', '1_50', 1), ('AESTAS', 690, 'aestas', '', '', '1_51', 1), ('COR', 691, 'cor', '', '', '1_51', 1), ('HIEMS', 692, 'hiems', '', '', '1_51', 1), ('MORS', 693, 'mors', '', '', '1_51', 1), ('NIHIL', 694, 'nihil', '', '', '1_51', 1), ('TIMEO', 695, 'timeo', '', '', '1_51', 2), ('COMMVNIS', 696, 'communis', '', '', '1_52', 1), ('DVM/2', 697, 'dum', '', '', '1_52', 1), ('INCERTVS', 698, 'incertus', '', '', '1_52', 1), ('PARS', 699, 'pars', '', '', '1_52', 1), ('REMITTO', 700, 'remitto', '', '', '1_52', 1), ('SPERO', 701, 'spero', '', '', '1_52', 3), ('SPIRO', 702, 'spiro', '', '', '1_52', 1), ('EXPVGNO', 703, 'expugno', '', '', '1_53', 2), ('GENVS/1', 704, 'genus', '', '', '1_53', 2), ('IDEM', 705, 'idem', '', '', '1_53', 2), ('INTERCIPIO', 706, 'intercipio', '', '', '1_53', 1), ('NOBILIS/2', 707, 'nobilis', '', '', '1_53', 2), ('QVONDAM', 708, 'quondam', '', '', '1_53', 1), ('SVPPLICIVM', 709, 'supplicium', '', '', '1_53', 1), ('TANGO', 710, 'tango', '', '', '1_53', 1), ('COLO/2', 711, 'colo', '', '', '1_54', 1), ('FRANGO', 712, 'frango', '', '', '1_54', 1), ('FRATER', 713, 'frater', '', '', '1_54', 2), ('INSIDEO', 714, 'insideo', '', '', '1_54', 1), ('IPSE', 715, 'ipse', '', '', '1_54', 2), ('PONS', 716, 'pons', '', '', '1_54', 2), ('POTESTAS', 717, 'potestas', '', '', '1_54', 2), ('SEDEO', 718, 'sedeo', '', '', '1_54', 1), ('SOROR', 719, 'soror', '', '', '1_54', 2), ('ALIVS', 720, 'alius', '', '', '1_56', 1), ('ALIVSALIVS', 721, 'alius', '', '', '1_56', 1), ('ALTER', 722, 'alter', '', '', '1_56', 1), ('ALTERALTER', 723, 'alter', '', '', '1_56', 2), ('NEVTER', 724, 'neuter', '', '', '1_56', 1), ('NVLLVS', 725, 'nullus', '', '', '1_56', 1), ('SOLVS', 726, 'solus', '', '', '1_56', 1), ('TOTVS', 727, 'totus', '', '', '1_56', 1), ('VLLVS', 728, 'ullus', '', '', '1_56', 1), ('VNVS', 729, 'unus', '', '', '1_56', 1), ('ALIENVS/2', 730, 'alienus', '', '', '1_57', 1), ('CONTENDO', 731, 'contendo', '', '', '1_57', 2), ('GRAVIS', 732, 'gravis', '', '', '1_57', 2), ('OPPRIMO', 733, 'opprimo', '', '', '1_57', 3), ('VOX', 734, 'vox', '', '', '1_57', 2), ('CONSISTO', 735, 'consisto', '', '', '1_58', 1), ('LEVIS/1', 736, 'levis', '', '', '1_58', 1), ('MENSIS', 737, 'mensis', '', '', '1_58', 2), ('OSTENDO', 738, 'ostendo', '', '', '1_58', 2), ('PROHIBEO', 739, 'prohibeo', '', '', '1_58', 1), ('VIS', 740, 'vis', '', '', '1_58', 1), ('IVSTVS', 741, 'iustus', '', '', '1_59', 1), ('LABOR/1', 742, 'labor', '', '', '1_59', 1), ('MVNVS', 743, 'munus', '', '', '1_59', 2), ('PVTO', 744, 'puto', '', '', '1_59', 2), ('SCIO', 745, 'scio', '', '', '1_59', 2), ('SECVNDVS/1', 746, 'secundus', '', '', '1_59', 1), ('SI/2', 747, 'si', '', '', '1_59', 1), ('SOLVO', 748, 'solvo', '', '', '1_59', 1), ('AD/2', 749, 'ad', '', '', '1_6', 1), ('CRAS/2', 750, 'cras', '', '', '1_6', 2), ('NAVIGO', 751, 'navigo', '', '', '1_6', 2), ('NAVTA', 752, 'nauta', '', '', '1_6', 1), ('PECVNIA', 753, 'pecunia', '', '', '1_6', 1), ('TVM', 754, 'tum', '', '', '1_6', 1), ('VNDA', 755, 'unda', '', '', '1_6', 1), ('ACER/2', 756, 'acer', '', '', '1_60', 2), ('HAEREO', 757, 'haereo', '', '', '1_60', 1), ('INSTO', 758, 'insto', '', '', '1_60', 1), ('IRA', 759, 'ira', '', '', '1_60', 1), ('LEGATVS', 760, 'legatus', '', '', '1_60', 1), ('PERVENIO', 761, 'pervenio', '', '', '1_60', 1), ('SENTIO', 762, 'sentio', '', '', '1_60', 3), ('VRBS', 763, 'urbs', '', '', '1_60', 1), ('AETAS', 764, 'aetas', '', '', '1_61', 2), ('CONDICIO', 765, 'condicio', '', '', '1_61', 2), ('QVAM/1', 766, 'quam', '', '', '1_61', 2), ('RAPIO', 767, 'rapio', '', '', '1_61', 2), ('STATVO', 768, 'statuo', '', '', '1_61', 1), ('VINCO', 769, 'vinco', '', '', '1_61', 2), ('VTILIS', 770, 'utilis', '', '', '1_61', 2), ('APTVS', 771, 'aptus', '', '', '1_62', 1), ('GENS', 772, 'gens', '', '', '1_62', 2), ('HVMILIS', 773, 'humilis', '', '', '1_62', 2), ('INSTRVO', 774, 'instruo', '', '', '1_62', 1), ('IVDICO', 775, 'iudico', '', '', '1_62', 1), ('NEMO', 776, 'nemo', '', '', '1_62', 2), ('ORATIO', 777, 'oratio', '', '', '1_62', 1), ('REGIO', 778, 'regio', '', '', '1_62', 1), ('REPELLO', 779, 'repello', '', '', '1_62', 1), ('ACCIDO/1', 780, 'accido', '', '', '1_63', 2), ('CADO', 781, 'cado', '', '', '1_63', 2), ('DIFFICILIS', 782, 'difficilis', '', '', '1_63', 1), ('DISSIMILIS', 783, 'dissimilis', '', '', '1_63', 1), ('EXPLICO', 784, 'explico', '', '', '1_63', 1), ('FACILE', 785, 'facile', '', '', '1_63', 2), ('IGNIS', 786, 'ignis', '', '', '1_63', 2), ('IVNGO', 787, 'iungo', '', '', '1_63', 2), ('OPVS/1', 788, 'opus', '', '', '1_63', 2), ('PROPRIVS', 789, 'proprius', '', '', '1_63', 1), ('SIMILIS', 790, 'similis', '', '', '1_63', 2), ('SOL', 791, 'sol', '', '', '1_63', 3), ('BENE', 792, 'bene', '', '', '1_64', 1), ('EXTREMVS', 793, 'extremus', '', '', '1_64', 1), ('INFERIOR', 794, 'inferior', '', '', '1_64', 2), ('PROXIMVS/2', 795, 'proximus', '', '', '1_64', 1), ('SVMMVS', 796, 'summus', '', '', '1_64', 1), ('AVERTO', 797, 'averto', '', '', '1_65', 1), ('CONSPICIO', 798, 'conspicio', '', '', '1_65', 2), ('CONTRA/2', 799, 'contra', '', '', '1_65', 1), ('CREDO', 800, 'credo', '', '', '1_65', 1), ('FALLO', 801, 'fallo', '', '', '1_65', 2), ('SVI/1', 802, 'sui', '', '', '1_65', 1), ('SVMO', 803, 'sumo', '', '', '1_65', 1), ('SVVS', 804, 'suus', '', '', '1_65', 1), ('TRIBVO', 805, 'tribuo', '', '', '1_65', 1), ('CENTVM', 806, 'centum', '', '', '1_66', 1), ('DVO', 807, 'duo', '', '', '1_66', 1), ('EMO', 808, 'emo', '', '', '1_66', 1), ('IMPERIVM', 809, 'imperium', '', '', '1_66', 1), ('INCOLO/2', 810, 'incolo', '', '', '1_66', 2), ('MILIA', 811, 'milia', '', '', '1_66', 1), ('MILLE', 812, 'mille', '', '', '1_66', 1), ('PERFICIO', 813, 'perficio', '', '', '1_66', 1), ('TRADO', 814, 'trado', '', '', '1_66', 2), ('TRES', 815, 'tres', '', '', '1_66', 1), ('AVTEM', 816, 'autem', '', '', '1_67', 2), ('CIRCVM/2', 817, 'circum', '', '', '1_67', 1), ('EXERCEO/2', 818, 'exerceo', '', '', '1_67', 1), ('INTELLIGO', 819, 'intellego', '', '', '1_67', 1), ('NEGOTIVM', 820, 'negotium', '', '', '1_67', 2), ('POSTQVAM', 821, 'postquam', '', '', '1_67', 1), ('PRAE/1', 822, 'prae', '', '', '1_67', 1), ('PRAEMITTO', 823, 'praemitto', '', '', '1_67', 1), ('SVPER/2', 824, 'super', '', '', '1_67', 1), ('SVPERSVM/1', 825, 'supersum', '', '', '1_67', 1), ('CASVS', 826, 'casus', '', '', '1_68', 2), ('DEMONSTRO', 827, 'demonstro', '', '', '1_68', 1), ('DESERO/2', 828, 'desero', '', '', '1_68', 1), ('DESPICIO', 829, 'despicio', '', '', '1_68', 1), ('DOMVS', 830, 'domus', '', '', '1_68', 1), ('EXERCITVS/1', 831, 'exercitus', '', '', '1_68', 1), ('IMPETVS', 832, 'impetus', '', '', '1_68', 2), ('MANVS/1', 833, 'manus', '', '', '1_68', 1), ('REDDO', 834, 'reddo', '', '', '1_68', 1), ('SENATVS', 835, 'senatus', '', '', '1_68', 1), ('DIES', 836, 'dies', '', '', '1_69', 1), ('INTERCLVDO', 837, 'intercludo', '', '', '1_69', 1), ('LVX', 838, 'lux', '', '', '1_69', 2), ('NOCEO', 839, 'noceo', '', '', '1_69', 1), ('PRAEFICIO', 840, 'praeficio', '', '', '1_69', 1), ('PRAESVM/1', 841, 'praesum', '', '', '1_69', 2), ('PRINCEPS/1', 842, 'princeps', '', '', '1_69', 1), ('RES', 843, 'res', '', '', '1_69', 1), ('SPECIES', 844, 'species', '', '', '1_69', 1), ('SPES', 845, 'spes', '', '', '1_69', 2), ('AMICVS/1', 846, 'amicus', '', '', '1_7', 2), ('DONO', 847, 'dono', '', '', '1_7', 1), ('GRATVS', 848, 'gratus', '', '', '1_7', 2), ('LITTERA', 849, 'littera', '', '', '1_7', 1), ('MANDO/2', 850, 'mando', '', '', '1_7', 2), ('MONSTRO', 851, 'monstro', '', '', '1_7', 1), ('NVNTIO', 852, 'nuntio', '', '', '1_7', 2), ('PRAEDA', 853, 'praeda', '', '', '1_7', 1), ('PROBO', 854, 'probo', '', '', '1_7', 2), ('APVD', 855, 'apud', '', '', '1_70', 1), ('ASCENDO', 856, 'ascendo', '', '', '1_70', 2), ('DIVIDO', 857, 'divido', '', '', '1_70', 2), ('EXPLORO', 858, 'exploro', '', '', '1_70', 1), ('FRONS/1', 859, 'frons', '', '', '1_70', 1), ('IMPERO', 860, 'impero', '', '', '1_70', 2), ('RATIO', 861, 'ratio', '', '', '1_70', 2), ('INCITO/1', 862, 'incito', '', '', '1_8', 1), ('INIVRIA', 863, 'iniuria', '', '', '1_8', 1), ('MEMORIA', 864, 'memoria', '', '', '1_8', 1), ('OCCVPO/2', 865, 'occupo', '', '', '1_8', 2), ('POENA', 866, 'poena', '', '', '1_8', 2), ('PROVINCIA', 867, 'provincia', '', '', '1_8', 1), ('PVGNA', 868, 'pugna', '', '', '1_8', 1), ('PVGNO', 869, 'pugno', '', '', '1_8', 1), ('SERVO', 870, 'servo', '', '', '1_8', 2), ('VICTORIA', 871, 'victoria', '', '', '1_8', 1), ('FILIA', 872, 'filia', '', '', '1_9', 1), ('HORA', 873, 'hora', '', '', '1_9', 1), ('MEVS', 874, 'meus', '', '', '1_9', 1), ('PAENE', 875, 'paene', '', '', '1_9', 2), ('QVINTVS/N', 876, 'Quintus', '', '', '1_9', 1), ('SEMPER', 877, 'semper', '', '', '1_9', 2), ('TVVS', 878, 'tuus', '', '', '1_9', 1), ('ATRIVM', 879, 'atrium', '', '', '2_10', 1), ('BALNEVM', 880, 'balneum', '', '', '2_10', 1), ('CANO', 881, 'cano', '', '', '2_10', 1), ('CONSTITVO', 882, 'constituo', '', '', '2_10', 1), ('DIGNVS', 883, 'dignus', '', '', '2_10', 1), ('EXITVS', 884, 'exitus', '', '', '2_10', 1), ('ITERVM', 885, 'iterum', '', '', '2_10', 1), ('MVLIER', 886, 'mulier', '', '', '2_10', 1), ('NOVEM', 887, 'novem', '', '', '2_10', 1), ('PERTVRBO/2', 888, nan, '', '', '2_10', 2), ('SILENTIVM', 889, 'silentium', '', '', '2_10', 1), ('DECEM', 890, 'decem', '', '', '2_11', 1), ('DESCENDO', 891, 'descendo', '', '', '2_11', 1), ('FLEO', 892, 'fleo', '', '', '2_11', 2), ('HOSPES', 893, 'hospes', '', '', '2_11', 1), ('NE/4', 894, 'ne', '', '', '2_11', 1), ('OCTO', 895, 'octo', '', '', '2_11', 1), ('PALVS/2', 896, 'palus', '', '', '2_11', 2), ('QVATVOR', 897, 'quattuor', '', '', '2_11', 1), ('SEPTEM', 898, 'septem', '', '', '2_11', 1), ('SEX', 899, 'sex', '', '', '2_11', 1), ('VT/4', 900, 'ut', '', '', '2_11', 2), ('ADHAEREO', 901, 'adhaereo', '', '', '2_12', 1), ('AEDIFICIVM', 902, 'aedificium', '', '', '2_12', 2), ('AMPLVS', 903, 'amplus', '', '', '2_12', 2), ('ANGVSTVS', 904, 'angustus', '', '', '2_12', 2), ('DEINDE', 905, 'deinde', '', '', '2_12', 1), ('EICIO', 906, 'eicio', '', '', '2_12', 1), ('ENIM/2', 907, 'enim', '', '', '2_12', 2), ('GVBERNATOR', 908, 'gubernator', '', '', '2_12', 1), ('IVVENIS/1', 909, 'iuvenis', '', '', '2_12', 1), ('MATVRVS', 910, 'maturus', '', '', '2_12', 2), ('MORA', 911, 'mora', '', '', '2_12', 2), ('SORDIDVS', 912, 'sordidus', '', '', '2_12', 1), ('TEMPESTAS', 913, 'tempestas', '', '', '2_12', 1), ('VEHO', 914, 'veho', '', '', '2_12', 2), ('COLLIGO/3', 915, 'colligo', '', '', '2_13', 3), ('DOLEO', 916, 'doleo', '', '', '2_13', 1), ('EDITVS/2', 917, 'editus', '', '', '2_13', 2), ('EXCIPIO', 918, 'excipio', '', '', '2_13', 2), ('FERO', 919, 'fero', '', '', '2_13', 1), ('MERCATOR', 920, 'mercator', '', '', '2_13', 2), ('NAMQVE', 921, 'namque', '', '', '2_13', 1), ('OCVLVS', 922, 'oculus', '', '', '2_13', 1), ('PORTVS', 923, 'portus', '', '', '2_13', 1), ('POSTEAQVAM', 924, 'posteaquam', '', '', '2_13', 2), ('RECTE', 925, 'recte', '', '', '2_13', 1), ('RVMPO', 926, 'rumpo', '', '', '2_13', 1), ('PERITVS', 927, 'peritus', '', '', '2_14', 2), ('PERTERREO', 928, 'perterreo', '', '', '2_14', 1), ('POSTERVS', 929, 'posterus', '', '', '2_14', 3), ('PREHENDO', 930, 'prehendo', '', '', '2_14', 1), ('RIDEO', 931, 'rideo', '', '', '2_14', 1), ('SAEPE', 932, 'saepe', '', '', '2_14', 2), ('SATIS/2', 933, 'satis', '', '', '2_14', 2), ('SOMNVS', 934, 'somnus', '', '', '2_14', 1), ('TAM', 935, 'tam', '', '', '2_14', 1), ('TANTVS', 936, 'tantus', '', '', '2_14', 2), ('VOLVO', 937, 'volvo', '', '', '2_14', 1), ('AEGER/2', 938, 'aeger', '', '', '2_15', 1), ('CLIENS', 939, 'cliens', '', '', '2_15', 2), ('COTIDIE', 940, 'cotidie', '', '', '2_15', 2), ('EDO/1', 941, 'edo', '', '', '2_15', 1), ('INCENDO', 942, 'incendo', '', '', '2_15', 2), ('MAIORES', 943, 'maiores', '', '', '2_15', 1), ('OPERA', 944, 'opera', '', '', '2_15', 2), ('QVIES', 945, 'quies', '', '', '2_15', 3), ('REFICIO', 946, 'reficio', '', '', '2_15', 1), ('SVRGO', 947, 'surgo', '', '', '2_15', 1), ('TOT', 948, 'tot', '', '', '2_15', 1), ('ACVTVS', 949, 'acutus', '', '', '2_16', 1), ('CETERVS', 950, 'ceterus', '', '', '2_16', 1), ('CONSENTIO', 951, 'consentio', '', '', '2_16', 2), ('CONTEMNO', 952, 'contemno', '', '', '2_16', 1), ('CVM/3', 953, 'cum', '', '', '2_16', 1), ('EXISTIMO', 954, 'existimo', '', '', '2_16', 1), ('INSIGNIS', 955, 'insignis', '', '', '2_16', 1), ('MIRVS', 956, 'mirus', '', '', '2_16', 1), ('NEQVIDEM', 957, 'quidem', '', '', '2_16', 1), ('PRAETEREA', 958, 'praeterea', '', '', '2_16', 2), ('QVIDEM', 959, 'quidem', '', '', '2_16', 1), ('ALA', 960, 'ala', '', '', '2_17', 1), ('DEFESSVS', 961, 'defessus', '', '', '2_17', 1), ('DEPONO', 962, 'depono', '', '', '2_17', 1), ('MILLEPASSVS', 963, 'mile', '', '', '2_17', 1), ('NONDVM', 964, 'nondum', '', '', '2_17', 2), ('PASSVS/1', 965, 'passus', '', '', '2_17', 2), ('QVAMPRIMVM', 966, 'quam', '', '', '2_17', 1), ('STATIM', 967, 'statim', '', '', '2_17', 3), ('TANDEM', 968, 'tandem', '', '', '2_17', 1), ('ARBITROR', 969, 'arbitror', '', '', '2_18', 3), ('CARVS', 970, 'carus', '', '', '2_18', 1), ('CONSVMO', 971, 'consumo', '', '', '2_18', 2), ('CVLPA', 972, 'culpa', '', '', '2_18', 1), ('FELIX', 973, 'felix', '', '', '2_18', 1), ('GAVDIVM', 974, 'gaudium', '', '', '2_18', 1), ('HIC/2', 975, 'hic', '', '', '2_18', 1), ('LOQVOR', 976, 'loquor', '', '', '2_18', 3), ('NECESSE', 977, 'necesse', '', '', '2_18', 1), ('POLLICEOR', 978, 'polliceor', '', '', '2_18', 1), ('PROFICISCOR', 979, 'proficiscor', '', '', '2_18', 2), ('QVIETVS', 980, 'quietus', '', '', '2_18', 1), ('VIGINTI', 981, 'viginti', '', '', '2_18', 1), ('INGREDIOR', 982, 'ingredior', '', '', '2_19', 1), ('MIROR', 983, 'miror', '', '', '2_19', 1), ('OMEN', 984, 'omen', '', '', '2_19', 1), ('ORIOR', 985, 'orior', '', '', '2_19', 3), ('PONTIFEX', 986, 'pontifex', '', '', '2_19', 1), ('PRIDIE', 987, 'pridie', '', '', '2_19', 1), ('QVANTVS/1', 988, 'quantus', '', '', '2_19', 2), ('SPONDEO', 989, 'spondeo', '', '', '2_19', 1), ('STVDEO', 990, 'studeo', '', '', '2_19', 1), ('SVBITO', 991, 'subito', '', '', '2_19', 3), ('VESPER', 992, 'vesper', '', '', '2_19', 2), ('VESPERI', 993, 'vesperi', '', '', '2_19', 1), ('PAR/2', 994, 'pār', '', '', '2_2', 1), ('PLVS', 995, 'plus', '', '', '2_2', 1), ('TOGA', 996, 'toga', '', '', '2_2', 1), ('ALIQVIS', 997, 'aliquis', '', '', '2_20', 1), ('ARBOR', 998, 'arbor', '', '', '2_20', 2), ('COMPLEO', 999, 'compleo', '', '', '2_20', 2), ('EO/1', 1000, 'eo', '', '', '2_20', 1), ('EXEO/1', 1001, 'exeo', '', '', '2_20', 1), ('FONS', 1002, 'fons', '', '', '2_20', 1), ('OPS', 1003, 'ops', '', '', '2_20', 2), ('PRAEBEO', 1004, 'praebeo', '', '', '2_20', 2), ('PROPTER/2', 1005, 'propter', '', '', '2_20', 2), ('QVIDAM', 1006, 'quidam', '', '', '2_20', 2), ('REDEO/1', 1007, 'redeo', '', '', '2_20', 3), ('SAPIENS/2', 1008, 'sapiens', '', '', '2_20', 1), ('SAXVM', 1009, 'saxum', '', '', '2_20', 2), ('ACCVSO', 1010, 'accuso', '', '', '2_21', 1), ('ALACER', 1011, 'alacer', '', '', '2_21', 2), ('CAEDO', 1012, 'caedo', '', '', '2_21', 1), ('CONCEDO/1', 1013, 'concedo', '', '', '2_21', 1), ('FACIES', 1014, 'facies', '', '', '2_21', 1), ('FAMES', 1015, 'fames', '', '', '2_21', 2), ('HORTOR', 1016, 'hortor', '', '', '2_21', 2), ('INSTITVO', 1017, 'instituo', '', '', '2_21', 1), ('NEGO', 1018, 'nego', '', '', '2_21', 2), ('PEREO/1', 1019, 'pereo', '', '', '2_21', 2), ('QVOQVE', 1020, 'quoque', '', '', '2_21', 1), ('SIGNIFICO', 1021, 'significo', '', '', '2_21', 1), ('CAMPVS/1', 1022, 'campus', '', '', '2_22', 1), ('CIRCITER/2', 1023, 'circiter', '', '', '2_22', 2), ('ETSI/2', 1024, 'etsi', '', '', '2_22', 1), ('FERIAE', 1025, 'feriae', '', '', '2_22', 1), ('GRADIOR', 1026, 'gradior', '', '', '2_22', 1), ('MORIOR', 1027, 'morior', '', '', '2_22', 2), ('PROGREDIOR', 1028, 'progredior', '', '', '2_22', 2), ('QVAERO', 1029, 'quaero', '', '', '2_22', 2), ('AT/2', 1030, 'at', '', '', '2_23', 1), ('ATTINGO', 1031, 'attingo', '', '', '2_23', 1), ('DISCO', 1032, 'disco', '', '', '2_23', 1), ('HONOR', 1033, 'honor', '', '', '2_23', 1), ('IMPERATOR', 1034, 'imperator', '', '', '2_23', 1), ('INTERROGO', 1035, 'interrogo', '', '', '2_23', 1), ('LITVS/2', 1036, 'litus', '', '', '2_23', 1), ('NONSOLVM', 1037, 'non', '', '', '2_23', 2), ('OCCIDO/2', 1038, 'occido', '', '', '2_23', 2), ('OS/1', 1039, 'os', '', '', '2_23', 1), ('PRONVNTIO', 1040, 'pronuntio', '', '', '2_23', 1), ('CERTIORFIO', 1041, 'certior', '', '', '2_24', 2), ('DELIGO/3', 1042, 'deligo', '', '', '2_24', 2), ('DILIGENS', 1043, 'diligens', '', '', '2_24', 1), ('EO/3', 1044, 'eo', '', '', '2_24', 2), ('FIO', 1045, 'fio', '', '', '2_24', 1), ('MOROR/1', 1046, 'moror', '', '', '2_24', 3), ('ORO', 1047, 'oro', '', '', '2_24', 1), ('QVOT/1', 1048, 'quot', '', '', '2_24', 1), ('REFERO', 1049, 'refero', '', '', '2_24', 1), ('SCVTVM', 1050, 'scutum', '', '', '2_24', 1), ('TRIBVNVS', 1051, 'tribunus', '', '', '2_24', 1), ('AN', 1052, 'an', '', '', '2_25', 1), ('DIFFERO', 1053, 'differo', '', '', '2_25', 2), ('HVC', 1054, 'huc', '', '', '2_25', 3), ('NOLO', 1055, 'nolo', '', '', '2_25', 1), ('NVM', 1056, 'num', '', '', '2_25', 1), ('PERFIDIA', 1057, 'perfidia', '', '', '2_25', 1), ('PRVDENS', 1058, 'prudens', '', '', '2_25', 1), ('QVAMQVAM/2', 1059, 'quamquam', '', '', '2_25', 1), ('VOLO/3', 1060, 'volo', '', '', '2_25', 1), ('AEQVALIS/2', 1061, 'aequalis', '', '', '2_26', 1), ('CIRCVMVENIO', 1062, 'circumvenio', '', '', '2_26', 1), ('DESIDERO', 1063, 'desidero', '', '', '2_26', 1), ('EXPRIMO', 1064, 'exprimo', '', '', '2_26', 1), ('IGNOTVS', 1065, 'ignotus', '', '', '2_26', 1), ('INEO/1', 1066, 'ineo', '', '', '2_26', 2), ('PERSVADEO', 1067, 'persuadeo', '', '', '2_26', 2), ('PLACEO', 1068, 'placeo', '', '', '2_26', 2), ('QVAESTOR', 1069, 'quaestor', '', '', '2_26', 1), ('REVERTO', 1070, 'reverto', '', '', '2_26', 1), ('CONSVETVDO', 1071, 'consuetudo', '', '', '2_27', 2), ('FORS/1', 1072, 'fors', '', '', '2_27', 1), ('INITIVM', 1073, 'initium', '', '', '2_27', 2), ('IVVO', 1074, 'iuvo', '', '', '2_27', 1), ('MOS', 1075, 'mos', '', '', '2_27', 1), ('SENEX/1', 1076, 'senex', '', '', '2_27', 1), ('SEQVOR', 1077, 'sequor', '', '', '2_27', 2), ('SVCCEDO/1', 1078, 'succedo', '', '', '2_27', 1), ('ADHVC', 1079, 'adhuc', '', '', '2_28', 1), ('ADIVNGO', 1080, 'adiungo', '', '', '2_28', 1), ('CONSPECTVS/1', 1081, 'conspectus', '', '', '2_28', 2), ('DEFERO', 1082, 'defero', '', '', '2_28', 2), ('FOSSA', 1083, 'fossa', '', '', '2_28', 2), ('INQVIO', 1084, 'inquam', '', '', '2_28', 1), ('MALO', 1085, 'malo', '', '', '2_28', 1), ('MVRVS', 1086, 'murus', '', '', '2_28', 1), ('VETO', 1087, 'veto', '', '', '2_28', 1), ('COMES', 1088, 'comes', '', '', '2_29', 1), ('CONTINGO/1', 1089, 'contingo', '', '', '2_29', 1), ('IMPLEO', 1090, 'impleo', '', '', '2_29', 1), ('PECTVS', 1091, 'pectus', '', '', '2_29', 1), ('SINO', 1092, 'sino', '', '', '2_29', 1), ('STVLTITIA', 1093, 'stultitia', '', '', '2_29', 1), ('ABDO', 1094, 'abdo', '', '', '2_30', 2), ('APERIO', 1095, 'aperio', '', '', '2_30', 2), ('CIRCVMDO', 1096, 'circumdo', '', '', '2_30', 1), ('FIGO', 1097, 'figo', '', '', '2_30', 1), ('INOPIA', 1098, 'inopia', '', '', '2_30', 1), ('INSIDIAE', 1099, 'insidiae', '', '', '2_30', 2), ('INTRA/2', 1100, 'intra', '', '', '2_30', 2), ('OBSIDEO', 1101, 'obsideo', '', '', '2_30', 1), ('STIPENDIVM', 1102, 'stipendium', '', '', '2_30', 2), ('AES', 1103, 'aes', '', '', '2_31', 1), ('ALO', 1104, 'alo', '', '', '2_31', 2), ('CONIVRO', 1105, 'coniuro', '', '', '2_31', 2), ('LICET/1', 1106, 'licet', '', '', '2_31', 2), ('MAGISTRATVS', 1107, 'magistratus', '', '', '2_31', 2), ('OPORTET', 1108, 'oportet', '', '', '2_31', 2), ('OPPVGNO', 1109, 'oppugno', '', '', '2_31', 1), ('PLEBS', 1110, 'plebs', '', '', '2_31', 1), ('POSTVLO', 1111, 'postulo', '', '', '2_31', 3), ('PRIVSQVAM', 1112, 'priusquam', '', '', '2_31', 1), ('TVTVS', 1113, 'tutus', '', '', '2_31', 2), ('ADVENTVS', 1114, 'adventus', '', '', '2_32', 1), ('ARX', 1115, 'arx', '', '', '2_32', 1), ('CAEDES', 1116, 'caedes', '', '', '2_32', 2), ('DIRIPIO', 1117, 'diripio', '', '', '2_32', 1), ('MILITARIS', 1118, 'militaris', '', '', '2_32', 1), ('PARCO', 1119, 'parco', '', '', '2_32', 2), ('VEREOR', 1120, 'vereor', '', '', '2_32', 2), ('COLLIS', 1121, 'collis', '', '', '2_33', 2), ('CVSTOS', 1122, 'custos', '', '', '2_33', 1), ('DEFICIO', 1123, 'deficio', '', '', '2_33', 1), ('DEICIO', 1124, 'deicio', '', '', '2_33', 1), ('EVADO/2', 1125, 'evado', '', '', '2_33', 1), ('INIQVVS/2', 1126, 'iniquus', '', '', '2_33', 2), ('MENS', 1127, 'mens', '', '', '2_33', 1), ('NASCOR', 1128, 'nascor', '', '', '2_33', 2), ('OCCVRRO', 1129, 'occurro', '', '', '2_33', 1), ('PENDO', 1130, 'pendo', '', '', '2_33', 2), ('PROPINQVVS', 1131, 'propinquus', '', '', '2_33', 1), ('VEHEMENS', 1132, 'vehemens', '', '', '2_33', 1), ('COEPIO', 1133, 'coepi', '', '', '2_34', 2), ('EQVES', 1134, 'eques', '', '', '2_34', 2), ('EXTRA/2', 1135, 'extra', '', '', '2_34', 1), ('MAGNITVDO', 1136, 'magnitudo', '', '', '2_34', 1), ('PRAESTO/1', 1137, 'praesto', '', '', '2_34', 2), ('PROICIO', 1138, 'proicio', '', '', '2_34', 2), ('QVONIAM', 1139, 'quoniam', '', '', '2_34', 1), ('RESTITVO', 1140, 'restituo', '', '', '2_34', 1), ('SANGVIS', 1141, 'sanguis', '', '', '2_34', 1), ('TRISTIS', 1142, 'tristis', '', '', '2_34', 1), ('COMPLVRES/2', 1143, 'complures', '', '', '2_35', 2), ('DETRIMENTVM', 1144, 'detrimentum', '', '', '2_35', 1), ('EXSPIRO', 1145, 'exspiro', '', '', '2_35', 1), ('FEMINA', 1146, 'femina', '', '', '2_35', 2), ('INCOLVMIS', 1147, 'incolumis', '', '', '2_35', 2), ('INFERO', 1148, 'infero', '', '', '2_35', 1), ('MVLTITVDO', 1149, 'multitudo', '', '', '2_35', 1), ('NISI', 1150, 'nisi', '', '', '2_35', 1), ('PROPE/2', 1151, 'prope', '', '', '2_35', 2), ('VMQVAM', 1152, 'umquam', '', '', '2_35', 1), ('CONSIDO', 1153, 'consido', '', '', '2_36', 2), ('QVISQVAM', 1154, 'quisquam', '', '', '2_36', 1), ('QVISQVE/2', 1155, 'quisque', '', '', '2_36', 1), ('AGGREDIOR', 1156, 'adgredior', '', '', '2_37', 1), ('AMPLIVS', 1157, 'amplius', '', '', '2_37', 1), ('ANTEA', 1158, 'antea', '', '', '2_37', 1), ('AVDAX', 1159, 'audax', '', '', '2_37', 1), ('DELEO', 1160, 'deleo', '', '', '2_37', 1), ('FVNDO/2', 1161, 'fundo', '', '', '2_37', 1), ('LEGIO', 1162, 'legio', '', '', '2_37', 1), ('MOX', 1163, 'mox', '', '', '2_37', 1), ('OBSES', 1164, 'obses', '', '', '2_37', 1), ('TIMOR', 1165, 'timor', '', '', '2_37', 1), ('VOLVNTAS', 1166, 'voluntas', '', '', '2_37', 1), ('ADMITTO', 1167, 'admitto', '', '', '2_38', 2), ('DESPERO', 1168, 'despero', '', '', '2_38', 1), ('EXSTRVO', 1169, 'exstruo', '', '', '2_38', 1), ('NEGLIGO', 1170, 'neglego', '', '', '2_38', 3), ('RVRSVS', 1171, 'rursus', '', '', '2_38', 2), ('VVLTVS', 1172, 'vultus', '', '', '2_38', 1), ('ADEO/2', 1173, 'adeo', '', '', '2_39', 1), ('BIS', 1174, 'bis', '', '', '2_39', 1), ('EFFERO/2', 1175, 'effero', '', '', '2_39', 2), ('LOCO', 1176, 'loco', '', '', '2_39', 1), ('QVASI/1', 1177, 'quasi', '', '', '2_39', 1), ('SIMVLO', 1178, 'simulo', '', '', '2_39', 1), ('TEGO', 1179, 'tego', '', '', '2_39', 1), ('VIDEOR', 1180, 'videor', '', '', '2_4', 1), ('BREVIS', 1181, 'brevis', '', '', '2_40', 2), ('CVPIDITAS', 1182, 'cupiditas', '', '', '2_40', 1), ('DOLOR', 1183, 'dolor', '', '', '2_40', 1), ('IGITVR', 1184, 'igitur', '', '', '2_40', 1), ('NOCTV', 1185, 'noctu', '', '', '2_40', 2), ('RENVNTIO', 1186, 'renuntio', '', '', '2_40', 1), ('VNDIQVE', 1187, 'undique', '', '', '2_40', 2), ('CONSVESCO', 1188, 'consuesco', '', '', '2_41', 2), ('INTEREA', 1189, 'interea', '', '', '2_41', 2), ('PAVLO', 1190, 'paulo', '', '', '2_41', 1), ('VTOR', 1191, 'utor', '', '', '2_41', 2), ('COMPARO/2', 1192, 'comparo', '', '', '2_42', 1), ('CVRSVS', 1193, 'cursus', '', '', '2_42', 1), ('EGREDIOR', 1194, 'egredior', '', '', '2_42', 1), ('IDONEVS', 1195, 'idoneus', '', '', '2_42', 2), ('NANCISCOR', 1196, 'nanciscor', '', '', '2_42', 1), ('APPROPINQVO', 1197, 'appropinquo', '', '', '2_43', 2), ('CONFLIGO', 1198, 'confligo', '', '', '2_43', 1), ('CONOR', 1199, 'conor', '', '', '2_43', 2), ('MAGNOPERE', 1200, 'magnopere', '', '', '2_43', 1), ('REMVS', 1201, 'remus', '', '', '2_43', 1), ('REPERIO', 1202, 'reperio', '', '', '2_43', 2), ('SIMVL/1', 1203, 'simul', '', '', '2_43', 1), ('SIMVLAC/2', 1204, 'simulac', '', '', '2_43', 1), ('SVBLEVO/1', 1205, 'sublevo', '', '', '2_43', 1), ('TOLLO', 1206, 'tollo', '', '', '2_43', 1), ('VITO', 1207, 'vito', '', '', '2_43', 2), ('AEGRE', 1208, 'aegre', '', '', '2_44', 1), ('CENSEO', 1209, 'censeo', '', '', '2_44', 1), ('CLAM/1', 1210, 'clam', '', '', '2_44', 2), ('DIV', 1211, 'diu', '', '', '2_44', 1), ('OCCASIO', 1212, 'occasio', '', '', '2_44', 1), ('SVBEO/1', 1213, 'subeo', '', '', '2_44', 1), ('ASPICIO', 1214, 'aspicio', '', '', '2_45', 1), ('CONTROVERSIA', 1215, 'controversia', '', '', '2_45', 1), ('EXANIMO', 1216, 'exanimo', '', '', '2_45', 1), ('ISTE', 1217, 'iste', '', '', '2_45', 1), ('IVGVM', 1218, 'iugum', '', '', '2_45', 2), ('MERIDIES', 1219, 'meridies', '', '', '2_45', 1), ('APERTVS', 1220, 'apertus', '', '', '2_46', 1), ('DIFFICVLTAS', 1221, 'difficultas', '', '', '2_46', 1), ('POSTRIDIE', 1222, 'postridie', '', '', '2_46', 1), ('SVPRA/2', 1223, 'supra', '', '', '2_46', 1), ('CVMPRIMVM', 1224, 'cum', '', '', '2_47', 1), ('DESISTO', 1225, 'desisto', '', '', '2_47', 2), ('INSEQVOR', 1226, 'insequor', '', '', '2_47', 1), ('OPINIO', 1227, 'opinio', '', '', '2_47', 1), ('PRIVS', 1228, 'prius', '', '', '2_47', 1), ('VNDE/1', 1229, 'unde', '', '', '2_47', 1), ('CONICIO', 1230, 'conicio', '', '', '2_48', 1), ('FRVSTRA', 1231, 'frustra', '', '', '2_48', 1), ('NECO', 1232, 'neco', '', '', '2_48', 2), ('CRVCIATVS', 1233, 'cruciatus', '', '', '2_49', 2), ('IVSIVRANDVM', 1234, 'ius', '', '', '2_49', 1), ('PREX', 1235, 'prex', '', '', '2_49', 2), ('SVSPICOR', 1236, 'suspicor', '', '', '2_49', 2), ('COTIDIANVS', 1237, 'cottidianus', '', '', '2_50', 1), ('FERE', 1238, 'fere', '', '', '2_50', 1), ('OCCASVS', 1239, 'occasus', '', '', '2_50', 1), ('PROPTEREA', 1240, 'propterea', '', '', '2_50', 1), ('PROPTEREAQVOD', 1241, 'propterea', '', '', '2_50', 1), ('CVPIDVS', 1242, 'cupidus', '', '', '2_51', 1), ('INTERSE', 1243, 'inter', '', '', '2_51', 1), ('ITEM', 1244, 'item', '', '', '2_51', 1), ('LATITVDO', 1245, 'latitudo', '', '', '2_51', 1), ('LEGATIO', 1246, 'legatio', '', '', '2_51', 1), ('NOBILITAS', 1247, 'nobilitas', '', '', '2_51', 1), ('PATEO', 1248, 'pateo', '', '', '2_51', 1), ('PRINCIPATVS', 1249, 'principatus', '', '', '2_51', 1), ('PROFECTIO', 1250, 'profectio', '', '', '2_51', 1), ('PROFITEOR', 1251, 'profiteor', '', '', '2_51', 1), ('QVADECAVSA', 1252, 'qua', '', '', '2_51', 1), ('VAGOR/2', 1253, 'vagor', '', '', '2_51', 1), ('EODEM', 1254, 'eodem', '', '', '2_52', 1), ('EXPEDITVS', 1255, 'expeditus', '', '', '2_52', 1), ('FLVO', 1256, 'fluo', '', '', '2_52', 1), ('OMNINO', 1257, 'omnino', '', '', '2_52', 1), ('PRAETER/2', 1258, 'praeter', '', '', '2_52', 1), ('PRIVATVS', 1259, 'privatus', '', '', '2_52', 1), ('QVA/1', 1260, 'qua', '', '', '2_52', 1), ('TRANSEO/1', 1261, 'transeo', '', '', '2_52', 1), ('VICVS', 1262, 'vicus', '', '', '2_52', 1), ('VIX', 1263, 'vix', '', '', '2_52', 1), ('VNACVM', 1264, 'una', '', '', '2_52', 1), ('ALTITVDO', 1265, 'altitudo', '', '', '2_53', 1), ('CASTELLVM', 1266, 'castellum', '', '', '2_53', 1), ('CERTIOREMFACIO', 1267, 'certiorem', '', '', '2_53', 1), ('MVNITIO', 1268, 'munitio', '', '', '2_53', 1), ('NONNVMQVAM', 1269, 'nonnumquam', '', '', '2_53', 1), ('TELVM', 1270, 'telum', '', '', '2_53', 1), ('VADVM', 1271, 'vadum', '', '', '2_53', 1), ('ALICVIESSEINANIMO', 1272, 'alicui', '', '', '2_54', 1), ('CONSCRIBO', 1273, 'conscribo', '', '', '2_54', 1), ('HIBERNA', 1274, 'hiberna', '', '', '2_54', 1), ('IMPETRO', 1275, 'impetro', '', '', '2_54', 1), ('INVITVS', 1276, 'invitus', '', '', '2_54', 1), ('MAGNVMITER', 1277, 'magnum', '', '', '2_54', 1), ('MVLTVMPLVRIMVMPOSSE', 1278, 'multum', '', '', '2_54', 1), ('MVLTVMPLVSPOSSE', 1279, 'multum', '', '', '2_54', 1), ('PATIOR', 1280, 'patior', '', '', '2_54', 1), ('RESNOVAE', 1281, 'res', '', '', '2_54', 1), ('SPONTE', 1282, 'sponte', '', '', '2_54', 1), ('CALAMITAS', 1283, 'calamitas', '', '', '2_55', 1), ('DEVIGILIA', 1284, 'de', '', '', '2_55', 1), ('EXPLORATOR', 1285, 'explorator', '', '', '2_55', 1), ('FVGAEMEMANDO', 1286, 'fugae', '', '', '2_55', 1), ('MERECIPIO', 1287, 'me', '', '', '2_55', 1), ('PAGVS', 1288, 'pagus', '', '', '2_55', 1), ('POPVLOR', 1289, 'populor', '', '', '2_55', 1), ('REPENTINVS', 1290, 'repentinus', '', '', '2_55', 1), ('SIVE/1', 1291, nan, '', '', '2_55', 2), ('SIVE/1', 1292, 'sive', '', '', '2_55', 2), ('VEL/1', 1293, 'vel', '', '', '2_55', 1), ('VELVEL', 1294, 'vel', '', '', '2_55', 1), ('ACIES', 1295, 'acies', '', '', '2_56', 1), ('CASTRAMOVEO', 1296, 'castra', '', '', '2_56', 1), ('CASTRAPONERE', 1297, 'castra', '', '', '2_56', 1), ('COMPERIO', 1298, 'comperio', '', '', '2_56', 1), ('EQVITATVS/1', 1299, 'equitatus', '', '', '2_56', 1), ('INSIGNE', 1300, 'insigne', '', '', '2_56', 1), ('PRIMALVCE', 1301, 'prima', '', '', '2_56', 1), ('RESMILITARIS', 1302, 'res', '', '', '2_56', 1), ('SVMMVSMONS', 1303, 'summus', '', '', '2_56', 1), ('AEQVO', 1304, 'aequo', '', '', '2_57', 1), ('AGMEN', 1305, 'agmen', '', '', '2_57', 1), ('ANIMADVERTO', 1306, 'animadverto', '', '', '2_57', 1), ('COLLOCO', 1307, 'colloco', '', '', '2_57', 1), ('CONFERO', 1308, 'confero', '', '', '2_57', 1), ('CONFERTVS', 1309, 'confertus', '', '', '2_57', 1), ('CONFIDO', 1310, 'confido', '', '', '2_57', 1), ('FRVMENTARIVS/2', 1311, 'frumentarius', '', '', '2_57', 1), ('INTERIM', 1312, 'interim', '', '', '2_57', 1), ('IVGVMSVPERSVM', 1313, 'iugum', '', '', '2_57', 1), ('MVTO/2', 1314, 'muto', '', '', '2_57', 1), ('PILVM', 1315, 'pilum', '', '', '2_57', 1), ('RESFRVMENTARIA', 1316, 'res', '', '', '2_57', 1), ('SINISTER', 1317, 'sinister', '', '', '2_57', 1), ('CONQVIRO', 1318, 'conquiro', '', '', '2_58', 1), ('DEDITIO', 1319, 'deditio', '', '', '2_58', 1), ('NONNVLLVS', 1320, 'nonnullus', '', '', '2_58', 1), ('OBICIO', 1321, 'obiicio', '', '', '2_58', 1), ('TRIDVVM', 1322, 'triduum', '', '', '2_58', 1), ('VALLVM', 1323, 'vallum', '', '', '2_58', 1), ('HERI', 1324, 'heri', '', '', '2_6', 1), ('HODIE', 1325, 'hodie', '', '', '2_6', 1), ('ARCESSO', 1326, 'arcesso', '', '', '2_60', 1), ('CONCILIVM', 1327, 'concilium', '', '', '2_60', 1), ('FACTIO', 1328, 'factio', '', '', '2_60', 1), ('FERVS/2', 1329, 'ferus', '', '', '2_60', 1), ('INDICO/2', 1330, 'indico', '', '', '2_60', 1), ('OCCVLTVS', 1331, 'occultus', '', '', '2_60', 1), ('PRIMO', 1332, 'primo', '', '', '2_60', 1), ('RECENS/1', 1333, 'recens', '', '', '2_60', 1), ('VICTOR', 1334, 'victor', '', '', '2_60', 1), ('AVDEO', 1335, 'audeo', '', '', '2_61', 1), ('COGITO', 1336, 'cogito', '', '', '2_61', 1), ('COLLOQVIVM', 1337, 'colloquium', '', '', '2_61', 1), ('OPVSEST', 1338, 'opus', '', '', '2_61', 1), ('PAVLATIM', 1339, 'paulatim', '', '', '2_61', 1), ('PRAESERTIM', 1340, 'praesertim', '', '', '2_61', 2), ('QVAMOBREM', 1341, 'quam', '', '', '2_61', 1), ('SERVITVS', 1342, 'servitus', '', '', '2_61', 1), ('TVRPIS', 1343, 'turpis', '', '', '2_61', 1), ('VTERQVE', 1344, 'uterque', '', '', '2_61', 1), ('CONGREDIOR', 1345, 'congredior', '', '', '2_62', 1), ('QVEMADMODVM/2', 1346, nan, '', '', '2_62', 1), ('TEMPTO', 1347, 'tempto', '', '', '2_62', 1), ('CREBER', 1348, 'creber', '', '', '2_63', 1), ('DEDVCO', 1349, 'deduco', '', '', '2_63', 1), ('FACVLTAS', 1350, 'facultas', '', '', '2_63', 1), ('FIDES/2', 1351, 'fides', '', '', '2_63', 1), ('PACO', 1352, 'paco', '', '', '2_63', 1), ('PRIMVM', 1353, 'primum', '', '', '2_63', 1), ('RVMOR', 1354, 'rumor', '', '', '2_63', 1), ('SOLLICITO', 1355, 'sollicito', '', '', '2_63', 1), ('VERSO', 1356, 'verso', '', '', '2_63', 1), ('POTENS', 1357, 'potens', '', '', '2_64', 1), ('TOTIDEM/1', 1358, 'totidem', '', '', '2_64', 1), ('ADEO/1', 1359, 'adeo', '', '', '2_66', 1), ('EQVESTER', 1360, 'equester', '', '', '2_66', 1), ('EXITINERE', 1361, 'ex', '', '', '2_66', 1), ('EXPERIOR', 1362, 'experior', '', '', '2_66', 1), ('INFIMVS', 1363, 'infimus', '', '', '2_66', 1), ('NOVISSIMVMAGMEN', 1364, 'novissimum', '', '', '2_66', 1), ('PAVLISPER', 1365, 'paulisper', '', '', '2_66', 1), ('PROTINVS', 1366, 'protinus', '', '', '2_66', 1), ('VSVS', 1367, 'usus', '', '', '2_66', 1), ('ADORIOR', 1368, 'adorior', '', '', '2_67', 1), ('AVDACITER', 1369, 'audaciter', '', '', '2_67', 1), ('DECERTO/1', 1370, 'decerto', '', '', '2_67', 1), ('PERSPICIO', 1371, 'perspicio', '', '', '2_67', 1), ('SVBSEQVOR', 1372, 'subsequor', '', '', '2_67', 1), ('TVMVLTVS', 1373, 'tumultus', '', '', '2_67', 1), ('ADITVS', 1374, 'aditus', '', '', '2_69', 1), ('CAVSA/2', 1375, 'causa', '', '', '2_69', 1), ('CONTVMELIA', 1376, 'contumelia', '', '', '2_69', 1), ('DEDO', 1377, 'dedo', '', '', '2_69', 1), ('PERFERO', 1378, 'perfero', '', '', '2_69', 1), ('POSCO', 1379, 'posco', '', '', '2_69', 1), ('LVDO', 1380, 'ludo', '', '', '2_7', 1), ('ADIGO', 1381, 'adigo', '', '', '2_70', 1), ('ADMINISTRO', 1382, 'administro', '', '', '2_70', 1), ('DESVM/1', 1383, 'desum', '', '', '2_70', 1), ('DIMICO', 1384, 'dimico', '', '', '2_70', 1), ('NECESSARIVS/2', 1385, 'necessarius', '', '', '2_70', 1), ('NEVE/1', 1386, 'neu', '', '', '2_70', 1), ('PRISTINVS', 1387, 'pristinus', '', '', '2_70', 2), ('ADVERSVS/1', 1388, 'adversus', '', '', '2_71', 1), ('DIVERSVS', 1389, 'diversus', '', '', '2_71', 1), ('MAGIS/2', 1390, 'magis', '', '', '2_71', 1), ('PEDES', 1391, 'pedes', '', '', '2_71', 1), ('CENTVRIO/1', 1392, 'centurio', '', '', '2_72', 1), ('CORNV', 1393, 'cornu', '', '', '2_72', 2), ('DEXTER', 1394, 'dexter', '', '', '2_72', 1), ('TARDO', 1395, 'tardo', '', '', '2_72', 1), ('ERVPTIO', 1396, 'eruptio', '', '', '2_74', 1), ('ONVS', 1397, 'onus', '', '', '2_74', 1), ('PLERVMQVE', 1398, 'plerumque', '', '', '2_74', 1), ('PROCVL', 1399, 'procul', '', '', '2_74', 1), ('TVRRIS', 1400, 'turris', '', '', '2_74', 2), ('BINVS', 1401, 'bini', '', '', '2_77', 1), ('CIRCVMSISTO', 1402, 'circumsisto', '', '', '2_77', 1), ('MARITIMVS', 1403, 'maritimus', '', '', '2_77', 1), ('OPPORTVNVS', 1404, 'opportunus', '', '', '2_77', 2), ('ORAMARITIMA', 1405, 'ora', '', '', '2_77', 1), ('VSQVE', 1406, 'usque', '', '', '2_77', 1), ('ALITER', 1407, 'aliter', '', '', '2_79', 1), ('COMMEMORO', 1408, 'commemoro', '', '', '2_79', 1), ('DECERNO', 1409, 'decerno', '', '', '2_79', 1), ('MEOFFERO', 1410, 'me', '', '', '2_79', 1), ('OFFERO', 1411, 'offero', '', '', '2_79', 1), ('ANIMAL', 1412, 'animal', '', '', '2_8', 1), ('AVIS', 1413, 'avis', '', '', '2_8', 1), ('INIMICVS/2', 1414, 'inimicus', '', '', '2_8', 1), ('CLASSIS', 1415, 'classis', '', '', '2_80', 1), ('EXIGVVS', 1416, 'exiguus', '', '', '2_80', 1), ('MODO/1', 1417, 'modo', '', '', '2_80', 1), ('NATIO/1', 1418, 'natio', '', '', '2_80', 1), ('TEMERE', 1419, 'temere', '', '', '2_80', 1), ('AESTVS', 1420, 'aestus', '', '', '2_81', 1), ('AQVILA', 1421, 'aquila', '', '', '2_81', 1), ('EXPONO', 1422, 'expono', '', '', '2_81', 1), ('MOTVS', 1423, 'motus', '', '', '2_81', 1), ('NAVISLONGA', 1424, 'navis', '', '', '2_81', 1), ('NAVISONERARIA', 1425, 'navis', '', '', '2_81', 1), ('PRODO', 1426, 'prodo', '', '', '2_81', 1), ('SAGITTA', 1427, 'sagitta', '', '', '2_81', 1), ('VNIVERSVS', 1428, 'universus', '', '', '2_81', 1), ('QVEROR', 1429, 'queror', '', '', '2_82', 1), ('QVICVMQVE/1', 1430, 'quicumque', '', '', '2_82', 1), ('VLTRO', 1431, 'ultro', '', '', '2_82', 1), ('COLLOQVOR', 1432, 'colloquor', '', '', '2_83', 1), ('COMMEATVS', 1433, 'commeatus', '', '', '2_83', 1), ('CONSTAT', 1434, 'constat', '', '', '2_83', 2), ('CVRO', 1435, 'curo', '', '', '2_83', 1), ('LENIS/2', 1436, 'lenis', '', '', '2_83', 1), ('CONTINVVS', 1437, 'continuus', '', '', '2_84', 1), ('LACESSO', 1438, 'lacesso', '', '', '2_84', 1), ('PRAEDICO/1', 1439, 'praedico', '', '', '2_84', 1), ('FERRVM', 1440, 'ferrum', '', '', '2_85', 1), ('INTERIOR', 1441, 'interior', '', '', '2_85', 1), ('PECVS/1', 1442, 'pecus', '', '', '2_85', 1), ('IVRO', 1443, 'iuro', '', '', '2_88', 1), ('REPENTE', 1444, 'repente', '', '', '2_88', 1), ('TVEOR', 1445, 'tueor', '', '', '2_88', 1), ('VETVS', 1446, 'vetus', '', '', '2_88', 1), ('ADHIBEO', 1447, 'adhibeo', '', '', '2_89', 1), ('ADOLESCENS/2', 1448, 'adulescens', '', '', '2_89', 1), ('COMMVNICO', 1449, 'communico', '', '', '2_89', 1), ('PAREO', 1450, 'pareo', '', '', '2_89', 1), ('RELIGIO', 1451, 'religio', '', '', '2_89', 1), ('INTEREO/1', 1452, 'intereo', '', '', '2_90', 1), ('METVS', 1453, 'metus', '', '', '2_90', 1), ('SVASPONTE', 1454, 'sua', '', '', '2_90', 1), ('VVLGVS', 1455, 'vulgus', '', '', '2_90', 1), ('DIRIGO', 1456, 'dirigo', '', '', '2_94', 1), ('FINIO', 1457, 'finio', '', '', '2_94', 1), ('LAVS', 1458, 'laus', '', '', '2_94', 1), ('SICVT/1', 1459, 'sicut', '', '', '2_94', 1), ('ADMODVM', 1460, 'admodum', '', '', '2_97', 1), ('FIDVCIA', 1461, 'fiducia', '', '', '2_97', 1), ('MEDIOCRIS', 1462, 'mediocris', '', '', '2_97', 1), ('PLANITIES', 1463, 'planities', '', '', '2_97', 1), ('RECVPERO', 1464, 'recupero', '', '', '2_97', 1), ('EXPLEO', 1465, 'expleo', '', '', '2_99', 1), ('PRIOR', 1466, 'prior', '', '', '2_99', 1), ('PROFICIO', 1467, 'proficio', '', '', '2_99', 1)]
section_list ={'1': 'start', '1.15': 'start', '1.18': '1.15', '1.3': '1.18', '1.31': '1.3', '1.38': '1.31', '1.4': '1.38', '1.52': '1.4', '1.63': '1.52', '1.64': '1.63', '2.1': '1.64', '2.14': '2.1', '2.2': '2.14', '2.3': '2.2', '2.4': '2.3', '2.40': '2.4', '2.41': '2.40', '2.42': '2.41', '2.43': '2.42', '2.44': '2.43', '2.45': '2.44', '2.46': '2.45', '2.47': '2.46', '2.48': '2.47', '2.49': '2.48', '2.5': '2.49', '2.50': '2.5', '2.51': '2.50', '2.52': '2.51', '2.53': '2.52', '2.55': '2.53', '2.56': '2.55', '2.57': '2.56', '2.6': '2.57', '2.60': '2.6', '2.61': '2.60', '2.62': '2.61', '2.63': '2.62', '2.64': '2.63', '2.66': '2.64', '2.67': '2.66', '2.69': '2.67', '2.7': '2.69', '2.70': '2.7', '2.72': '2.70', '2.77': '2.72', '2.79': '2.77', '2.8': '2.79', '2.80': '2.8', '2.83': '2.80', '2.88': '2.83', '2.89': '2.88', '2.9': '2.89', '2.90': '2.9', '2.94': '2.90', '2.97': '2.94', '2.99': '2.97', '1.1': '2.99', '1.10': '1.1', '1.11': '1.10', '1.12': '1.11', '1.13': '1.12', '1.14': '1.13', '1.16': '1.14', '1.17': '1.16', '1.19': '1.17', '1.2': '1.19', '1.20': '1.2', '1.21': '1.20', '1.22': '1.21', '1.23': '1.22', '1.24': '1.23', '1.25': '1.24', '1.26': '1.25', '1.27': '1.26', '1.28': '1.27', '1.29': '1.28', '1.30': '1.29', '1.32': '1.30', '1.33': '1.32', '1.34': '1.33', '1.35': '1.34', '1.36': '1.35', '1.37': '1.36', '1.39': '1.37', '1.40': '1.39', '1.41': '1.40', '1.42': '1.41', '1.43': '1.42', '1.44': '1.43', '1.45': '1.44', '1.46': '1.45', '1.47': '1.46', '1.48': '1.47', '1.49': '1.48', '1.5': '1.49', '1.50': '1.5', '1.51': '1.50', '1.53': '1.51', '1.54': '1.53', '1.56': '1.54', '1.57': '1.56', '1.58': '1.57', '1.59': '1.58', '1.6': '1.59', '1.60': '1.6', '1.61': '1.60', '1.62': '1.61', '1.65': '1.62', '1.66': '1.65', '1.67': '1.66', '1.68': '1.67', '1.69': '1.68', '1.7': '1.69', '1.70': '1.7', '1.8': '1.70', '1.9': '1.8', '2.10': '1.9', '2.11': '2.10', '2.12': '2.11', '2.13': '2.12', '2.15': '2.13', '2.16': '2.15', '2.17': '2.16', '2.18': '2.17', '2.19': '2.18', '2.20': '2.19', '2.21': '2.20', '2.22': '2.21', '2.23': '2.22', '2.24': '2.23', '2.25': '2.24', '2.26': '2.25', '2.27': '2.26', '2.28': '2.27', '2.29': '2.28', '2.30': '2.29', '2.31': '2.30', '2.32': '2.31', '2.33': '2.32', '2.34': '2.33', '2.35': '2.34', '2.36': '2.35', '2.37': '2.36', '2.38': '2.37', '2.39': '2.38', '2.54': '2.39', '2.58': '2.54', '2.71': '2.58', '2.74': '2.71', '2.81': '2.74', '2.82': '2.81', '2.84': '2.82', '2.85': '2.84', 'end': '2.85', 'start': 'start'}
title = "Latin for Americans Vol 1 and 2 (Ullman-Henderson)"
section_level = 1
language = "Latin"
book = text.Text(title, section_words, the_text, section_list, section_level, language, False, False) | [
"[email protected]"
] | |
afe30402c428b3b5cb816c08a6899bf22623498c | bf8d344b17e2ff9b7e38ad9597d5ce0e3d4da062 | /deploy/third_engine/demo_openvino/python/openvino_infer.py | 0ad51022b1793e7b6430025a7c71cc0de7658c8c | [
"Apache-2.0"
] | permissive | PaddlePaddle/PaddleDetection | e7e0f40bef75a4e0b6dcbacfafa7eb1969e44961 | bd83b98342b0a6bc8d8dcd5936233aeda1e32167 | refs/heads/release/2.6 | 2023-08-31T07:04:15.357051 | 2023-08-18T02:24:45 | 2023-08-18T02:24:45 | 217,475,193 | 12,523 | 3,096 | Apache-2.0 | 2023-09-10T10:05:56 | 2019-10-25T07:21:14 | Python | UTF-8 | Python | false | false | 9,233 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import numpy as np
import argparse
from scipy.special import softmax
from openvino.runtime import Core
def image_preprocess(img_path, re_shape):
img = cv2.imread(img_path)
img = cv2.resize(
img, (re_shape, re_shape), interpolation=cv2.INTER_LANCZOS4)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.transpose(img, [2, 0, 1]) / 255
img = np.expand_dims(img, 0)
img_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
img_std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
img -= img_mean
img /= img_std
return img.astype(np.float32)
def get_color_map_list(num_classes):
color_map = num_classes * [0, 0, 0]
for i in range(0, num_classes):
j = 0
lab = i
while lab:
color_map[i * 3] |= (((lab >> 0) & 1) << (7 - j))
color_map[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j))
color_map[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j))
j += 1
lab >>= 3
color_map = [color_map[i:i + 3] for i in range(0, len(color_map), 3)]
return color_map
def draw_box(srcimg, results, class_label):
label_list = list(
map(lambda x: x.strip(), open(class_label, 'r').readlines()))
for i in range(len(results)):
color_list = get_color_map_list(len(label_list))
clsid2color = {}
classid, conf = int(results[i, 0]), results[i, 1]
xmin, ymin, xmax, ymax = int(results[i, 2]), int(results[i, 3]), int(
results[i, 4]), int(results[i, 5])
if classid not in clsid2color:
clsid2color[classid] = color_list[classid]
color = tuple(clsid2color[classid])
cv2.rectangle(srcimg, (xmin, ymin), (xmax, ymax), color, thickness=2)
print(label_list[classid] + ': ' + str(round(conf, 3)))
cv2.putText(
srcimg,
label_list[classid] + ':' + str(round(conf, 3)), (xmin, ymin - 10),
cv2.FONT_HERSHEY_SIMPLEX,
0.8, (0, 255, 0),
thickness=2)
return srcimg
def hard_nms(box_scores, iou_threshold, top_k=-1, candidate_size=200):
"""
Args:
box_scores (N, 5): boxes in corner-form and probabilities.
iou_threshold: intersection over union threshold.
top_k: keep top_k results. If k <= 0, keep all the results.
candidate_size: only consider the candidates with the highest scores.
Returns:
picked: a list of indexes of the kept boxes
"""
scores = box_scores[:, -1]
boxes = box_scores[:, :-1]
picked = []
indexes = np.argsort(scores)
indexes = indexes[-candidate_size:]
while len(indexes) > 0:
current = indexes[-1]
picked.append(current)
if 0 < top_k == len(picked) or len(indexes) == 1:
break
current_box = boxes[current, :]
indexes = indexes[:-1]
rest_boxes = boxes[indexes, :]
iou = iou_of(
rest_boxes,
np.expand_dims(
current_box, axis=0), )
indexes = indexes[iou <= iou_threshold]
return box_scores[picked, :]
def iou_of(boxes0, boxes1, eps=1e-5):
"""Return intersection-over-union (Jaccard index) of boxes.
Args:
boxes0 (N, 4): ground truth boxes.
boxes1 (N or 1, 4): predicted boxes.
eps: a small number to avoid 0 as denominator.
Returns:
iou (N): IoU values.
"""
overlap_left_top = np.maximum(boxes0[..., :2], boxes1[..., :2])
overlap_right_bottom = np.minimum(boxes0[..., 2:], boxes1[..., 2:])
overlap_area = area_of(overlap_left_top, overlap_right_bottom)
area0 = area_of(boxes0[..., :2], boxes0[..., 2:])
area1 = area_of(boxes1[..., :2], boxes1[..., 2:])
return overlap_area / (area0 + area1 - overlap_area + eps)
def area_of(left_top, right_bottom):
"""Compute the areas of rectangles given two corners.
Args:
left_top (N, 2): left top corner.
right_bottom (N, 2): right bottom corner.
Returns:
area (N): return the area.
"""
hw = np.clip(right_bottom - left_top, 0.0, None)
return hw[..., 0] * hw[..., 1]
class PicoDetNMS(object):
"""
Args:
input_shape (int): network input image size
scale_factor (float): scale factor of ori image
"""
def __init__(self,
input_shape,
scale_x,
scale_y,
strides=[8, 16, 32, 64],
score_threshold=0.4,
nms_threshold=0.5,
nms_top_k=1000,
keep_top_k=100):
self.input_shape = input_shape
self.scale_x = scale_x
self.scale_y = scale_y
self.strides = strides
self.score_threshold = score_threshold
self.nms_threshold = nms_threshold
self.nms_top_k = nms_top_k
self.keep_top_k = keep_top_k
def __call__(self, decode_boxes, select_scores):
batch_size = 1
out_boxes_list = []
for batch_id in range(batch_size):
# nms
bboxes = np.concatenate(decode_boxes, axis=0)
confidences = np.concatenate(select_scores, axis=0)
picked_box_probs = []
picked_labels = []
for class_index in range(0, confidences.shape[1]):
probs = confidences[:, class_index]
mask = probs > self.score_threshold
probs = probs[mask]
if probs.shape[0] == 0:
continue
subset_boxes = bboxes[mask, :]
box_probs = np.concatenate(
[subset_boxes, probs.reshape(-1, 1)], axis=1)
box_probs = hard_nms(
box_probs,
iou_threshold=self.nms_threshold,
top_k=self.keep_top_k, )
picked_box_probs.append(box_probs)
picked_labels.extend([class_index] * box_probs.shape[0])
if len(picked_box_probs) == 0:
out_boxes_list.append(np.empty((0, 4)))
else:
picked_box_probs = np.concatenate(picked_box_probs)
# resize output boxes
picked_box_probs[:, 0] *= self.scale_x
picked_box_probs[:, 2] *= self.scale_x
picked_box_probs[:, 1] *= self.scale_y
picked_box_probs[:, 3] *= self.scale_y
# clas score box
out_boxes_list.append(
np.concatenate(
[
np.expand_dims(
np.array(picked_labels),
axis=-1), np.expand_dims(
picked_box_probs[:, 4], axis=-1),
picked_box_probs[:, :4]
],
axis=1))
out_boxes_list = np.concatenate(out_boxes_list, axis=0)
return out_boxes_list
def detect(img_file, compiled_model, class_label):
output = compiled_model.infer_new_request({0: test_image})
result_ie = list(output.values())
decode_boxes = []
select_scores = []
num_outs = int(len(result_ie) / 2)
for out_idx in range(num_outs):
decode_boxes.append(result_ie[out_idx])
select_scores.append(result_ie[out_idx + num_outs])
image = cv2.imread(img_file, 1)
scale_x = image.shape[1] / test_image.shape[3]
scale_y = image.shape[0] / test_image.shape[2]
nms = PicoDetNMS(test_image.shape[2:], scale_x, scale_y)
np_boxes = nms(decode_boxes, select_scores)
res_image = draw_box(image, np_boxes, class_label)
cv2.imwrite('res.jpg', res_image)
cv2.imshow("res", res_image)
cv2.waitKey()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--img_path',
type=str,
default='../../demo_onnxruntime/imgs/bus.jpg',
help="image path")
parser.add_argument(
'--onnx_path',
type=str,
default='out_onnxsim_infer/picodet_s_320_postproccesed_woNMS.onnx',
help="onnx filepath")
parser.add_argument('--in_shape', type=int, default=320, help="input_size")
parser.add_argument(
'--class_label',
type=str,
default='coco_label.txt',
help="class label file")
args = parser.parse_args()
ie = Core()
net = ie.read_model(args.onnx_path)
test_image = image_preprocess(args.img_path, args.in_shape)
compiled_model = ie.compile_model(net, 'CPU')
detect(args.img_path, compiled_model, args.class_label)
| [
"[email protected]"
] | |
e05eaa484fcc3ba5e03a90a7887dca29236b685f | 7a9489b25ddbfd066d19523dbabd1afdf6006e17 | /venv/Lib/site-packages/plotly/graph_objs/_scattergl.py | 1b8175314030b77821d0bec23208316c0dcd0e22 | [] | no_license | aidam401/Harmonic_Scanner | 840b0b39cefee1fcf290850330a9b406c26ffe68 | 1196ae83fc517a9470cc3ab70bfd4765f0ade7bf | refs/heads/main | 2023-08-25T12:49:31.502910 | 2021-09-21T19:25:36 | 2021-09-21T19:25:36 | 408,947,129 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107,200 | py | from plotly.basedatatypes import BaseTraceType as _BaseTraceType
import copy as _copy
class Scattergl(_BaseTraceType):
# class properties
# --------------------
_parent_path_str = ""
_path_str = "scattergl"
_valid_props = {
"connectgaps",
"customdata",
"customdatasrc",
"dx",
"dy",
"error_x",
"error_y",
"fill",
"fillcolor",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertemplate",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"legendgroup",
"legendgrouptitle",
"legendrank",
"line",
"marker",
"meta",
"metasrc",
"mode",
"name",
"opacity",
"selected",
"selectedpoints",
"showlegend",
"stream",
"text",
"textfont",
"textposition",
"textpositionsrc",
"textsrc",
"texttemplate",
"texttemplatesrc",
"type",
"uid",
"uirevision",
"unselected",
"visible",
"x",
"x0",
"xaxis",
"xcalendar",
"xhoverformat",
"xperiod",
"xperiod0",
"xperiodalignment",
"xsrc",
"y",
"y0",
"yaxis",
"ycalendar",
"yhoverformat",
"yperiod",
"yperiod0",
"yperiodalignment",
"ysrc",
}
# connectgaps
# -----------
@property
def connectgaps(self):
"""
Determines whether or not gaps (i.e. {nan} or missing values)
in the provided data arrays are connected.
The 'connectgaps' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["connectgaps"]
@connectgaps.setter
def connectgaps(self, val):
self["connectgaps"] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for customdata
.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# dx
# --
@property
def dx(self):
"""
Sets the x coordinate step. See `x0` for more info.
The 'dx' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["dx"]
@dx.setter
def dx(self, val):
self["dx"] = val
# dy
# --
@property
def dy(self):
"""
Sets the y coordinate step. See `y0` for more info.
The 'dy' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["dy"]
@dy.setter
def dy(self, val):
self["dy"] = val
# error_x
# -------
@property
def error_x(self):
"""
The 'error_x' property is an instance of ErrorX
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattergl.ErrorX`
- A dict of string/value properties that will be passed
to the ErrorX constructor
Supported dict properties:
array
Sets the data corresponding the length of each
error bar. Values are plotted relative to the
underlying data.
arrayminus
Sets the data corresponding the length of each
error bar in the bottom (left) direction for
vertical (horizontal) bars Values are plotted
relative to the underlying data.
arrayminussrc
Sets the source reference on Chart Studio Cloud
for arrayminus .
arraysrc
Sets the source reference on Chart Studio Cloud
for array .
color
Sets the stoke color of the error bars.
copy_ystyle
symmetric
Determines whether or not the error bars have
the same length in both direction (top/bottom
for vertical bars, left/right for horizontal
bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error
bars. If *constant`, the bar lengths are of a
constant value. Set this constant in `value`.
If "percent", the bar lengths correspond to a
percentage of underlying data. Set this
percentage in `value`. If "sqrt", the bar
lengths correspond to the square of the
underlying data. If "data", the bar lengths are
set with data set `array`.
value
Sets the value of either the percentage (if
`type` is set to "percent") or the constant (if
`type` is set to "constant") corresponding to
the lengths of the error bars.
valueminus
Sets the value of either the percentage (if
`type` is set to "percent") or the constant (if
`type` is set to "constant") corresponding to
the lengths of the error bars in the bottom
(left) direction for vertical (horizontal) bars
visible
Determines whether or not this set of error
bars is visible.
width
Sets the width (in px) of the cross-bar at both
ends of the error bars.
Returns
-------
plotly.graph_objs.scattergl.ErrorX
"""
return self["error_x"]
@error_x.setter
def error_x(self, val):
self["error_x"] = val
# error_y
# -------
@property
def error_y(self):
"""
The 'error_y' property is an instance of ErrorY
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattergl.ErrorY`
- A dict of string/value properties that will be passed
to the ErrorY constructor
Supported dict properties:
array
Sets the data corresponding the length of each
error bar. Values are plotted relative to the
underlying data.
arrayminus
Sets the data corresponding the length of each
error bar in the bottom (left) direction for
vertical (horizontal) bars Values are plotted
relative to the underlying data.
arrayminussrc
Sets the source reference on Chart Studio Cloud
for arrayminus .
arraysrc
Sets the source reference on Chart Studio Cloud
for array .
color
Sets the stoke color of the error bars.
symmetric
Determines whether or not the error bars have
the same length in both direction (top/bottom
for vertical bars, left/right for horizontal
bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error
bars. If *constant`, the bar lengths are of a
constant value. Set this constant in `value`.
If "percent", the bar lengths correspond to a
percentage of underlying data. Set this
percentage in `value`. If "sqrt", the bar
lengths correspond to the square of the
underlying data. If "data", the bar lengths are
set with data set `array`.
value
Sets the value of either the percentage (if
`type` is set to "percent") or the constant (if
`type` is set to "constant") corresponding to
the lengths of the error bars.
valueminus
Sets the value of either the percentage (if
`type` is set to "percent") or the constant (if
`type` is set to "constant") corresponding to
the lengths of the error bars in the bottom
(left) direction for vertical (horizontal) bars
visible
Determines whether or not this set of error
bars is visible.
width
Sets the width (in px) of the cross-bar at both
ends of the error bars.
Returns
-------
plotly.graph_objs.scattergl.ErrorY
"""
return self["error_y"]
@error_y.setter
def error_y(self, val):
self["error_y"] = val
# fill
# ----
@property
def fill(self):
"""
Sets the area to fill with a solid color. Defaults to "none"
unless this trace is stacked, then it gets "tonexty"
("tonextx") if `orientation` is "v" ("h") Use with `fillcolor`
if not "none". "tozerox" and "tozeroy" fill to x=0 and y=0
respectively. "tonextx" and "tonexty" fill between the
endpoints of this trace and the endpoints of the trace before
it, connecting those endpoints with straight lines (to make a
stacked area graph); if there is no trace before it, they
behave like "tozerox" and "tozeroy". "toself" connects the
endpoints of the trace (or each segment of the trace if it has
gaps) into a closed shape. "tonext" fills the space between two
traces if one completely encloses the other (eg consecutive
contour lines), and behaves like "toself" if there is no trace
before it. "tonext" should not be used if one trace does not
enclose the other. Traces in a `stackgroup` will only fill to
(or be filled to) other traces in the same group. With multiple
`stackgroup`s or some traces stacked and some not, if fill-
linked traces are not already consecutive, the later ones will
be pushed down in the drawing order.
The 'fill' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'tozeroy', 'tozerox', 'tonexty', 'tonextx',
'toself', 'tonext']
Returns
-------
Any
"""
return self["fill"]
@fill.setter
def fill(self, val):
self["fill"] = val
# fillcolor
# ---------
@property
def fillcolor(self):
"""
Sets the fill color. Defaults to a half-transparent variant of
the line color, marker color, or marker line color, whichever
is available.
The 'fillcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["fillcolor"]
@fillcolor.setter
def fillcolor(self, val):
self["fillcolor"] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['x', 'y', 'z', 'text', 'name'] joined with '+' characters
(e.g. 'x+y')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
# hoverinfosrc
# ------------
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for hoverinfo
.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattergl.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
Returns
-------
plotly.graph_objs.scattergl.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# hovertemplate
# -------------
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y: %{y}"
as well as %{xother}, {%_xother}, {%_xother_}, {%xother_}. When
showing info for several points, "xother" will be added to
those with different x positions from the first point. An
underscore before or after "(x|y)other" will add a space on
that side, only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for details on
the formatting syntax. Dates are formatted using d3-time-
format's syntax %{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format#locale_format for details on the date formatting syntax.
The variables available in `hovertemplate` are the ones emitted
as event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-data.
Additionally, every attributes that can be specified per-point
(the ones that are `arrayOk: true`) are available. Anything
contained in tag `<extra>` is displayed in the secondary box,
for example "<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag `<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
# hovertemplatesrc
# ----------------
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
hovertemplate .
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
# hovertext
# ---------
@property
def hovertext(self):
"""
Sets hover text elements associated with each (x,y) pair. If a
single string, the same string appears over all the data
points. If an array of string, the items are mapped in order to
the this trace's (x,y) coordinates. To be seen, trace
`hoverinfo` must contain a "text" flag.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
# hovertextsrc
# ------------
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for hovertext
.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for ids .
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
# legendgroup
# -----------
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces part of the same
legend group hide/show at the same time when toggling legend
items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
# legendgrouptitle
# ----------------
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattergl.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Supported dict properties:
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
plotly.graph_objs.scattergl.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
# legendrank
# ----------
@property
def legendrank(self):
"""
Sets the legend rank for this trace. Items and groups with
smaller ranks are presented on top/left side while with
`*reversed* `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattergl.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
color
Sets the line color.
dash
Sets the style of the lines.
shape
Determines the line shape. The values
correspond to step-wise line shapes.
width
Sets the line width (in px).
Returns
-------
plotly.graph_objs.scattergl.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattergl.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if in `marker.color`is set to a
numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.color`) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical
array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.cmin` and/or `marker.cmax` to
be equidistant to this point. Has an effect
only if in `marker.color`is set to a numerical
array. Value should have the same units as in
`marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.scattergl.marker.C
olorBar` instance or dict with compatible
properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may
be a palette name string of the following list:
Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Bl
ues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
line
:class:`plotly.graph_objects.scattergl.marker.L
ine` instance or dict with compatible
properties
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud
for opacity .
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.color`is set to a
numerical array. If true, `marker.cmin` will
correspond to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `marker.color`is set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px)
of the rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the
data in `size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points.
Use with `sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
symbol
Sets the marker symbol type. Adding 100 is
equivalent to appending "-open" to a symbol
name. Adding 200 is equivalent to appending
"-dot" to a symbol name. Adding 300 is
equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on Chart Studio Cloud
for symbol .
Returns
-------
plotly.graph_objs.scattergl.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
# meta
# ----
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
# metasrc
# -------
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for meta .
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
# mode
# ----
@property
def mode(self):
"""
Determines the drawing mode for this scatter trace.
The 'mode' property is a flaglist and may be specified
as a string containing:
- Any combination of ['lines', 'markers', 'text'] joined with '+' characters
(e.g. 'lines+markers')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["mode"]
@mode.setter
def mode(self, val):
self["mode"] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# selected
# --------
@property
def selected(self):
"""
The 'selected' property is an instance of Selected
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattergl.Selected`
- A dict of string/value properties that will be passed
to the Selected constructor
Supported dict properties:
marker
:class:`plotly.graph_objects.scattergl.selected
.Marker` instance or dict with compatible
properties
textfont
:class:`plotly.graph_objects.scattergl.selected
.Textfont` instance or dict with compatible
properties
Returns
-------
plotly.graph_objs.scattergl.Selected
"""
return self["selected"]
@selected.setter
def selected(self, val):
self["selected"] = val
# selectedpoints
# --------------
@property
def selectedpoints(self):
"""
Array containing integer indices of selected points. Has an
effect only for traces that support selections. Note that an
empty array means an empty selection where the `unselected` are
turned on for all points, whereas, any other non-array values
means no selection all where the `selected` and `unselected`
styles have no effect.
The 'selectedpoints' property accepts values of any type
Returns
-------
Any
"""
return self["selectedpoints"]
@selectedpoints.setter
def selectedpoints(self, val):
self["selectedpoints"] = val
# showlegend
# ----------
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattergl.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
Returns
-------
plotly.graph_objs.scattergl.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
# text
# ----
@property
def text(self):
"""
Sets text elements associated with each (x,y) pair. If a single
string, the same string appears over all the data points. If an
array of string, the items are mapped in order to the this
trace's (x,y) coordinates. If trace `hoverinfo` contains a
"text" flag and "hovertext" is not set, these elements will be
seen in the hover labels.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# textfont
# --------
@property
def textfont(self):
"""
Sets the text font.
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattergl.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
Returns
-------
plotly.graph_objs.scattergl.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
# textposition
# ------------
@property
def textposition(self):
"""
Sets the positions of the `text` elements with respects to the
(x,y) coordinates.
The 'textposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top left', 'top center', 'top right', 'middle left',
'middle center', 'middle right', 'bottom left', 'bottom
center', 'bottom right']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textposition"]
@textposition.setter
def textposition(self, val):
self["textposition"] = val
# textpositionsrc
# ---------------
@property
def textpositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
textposition .
The 'textpositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textpositionsrc"]
@textpositionsrc.setter
def textpositionsrc(self, val):
self["textpositionsrc"] = val
# textsrc
# -------
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for text .
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
# texttemplate
# ------------
@property
def texttemplate(self):
"""
Template string used for rendering the information text that
appear on points. Note that this will override `textinfo`.
Variables are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for details on
the formatting syntax. Dates are formatted using d3-time-
format's syntax %{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format#locale_format for details on the date formatting syntax.
Every attributes that can be specified per-point (the ones that
are `arrayOk: true`) are available.
The 'texttemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["texttemplate"]
@texttemplate.setter
def texttemplate(self, val):
self["texttemplate"] = val
# texttemplatesrc
# ---------------
@property
def texttemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
texttemplate .
The 'texttemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["texttemplatesrc"]
@texttemplatesrc.setter
def texttemplatesrc(self, val):
self["texttemplatesrc"] = val
# uid
# ---
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
# unselected
# ----------
@property
def unselected(self):
"""
The 'unselected' property is an instance of Unselected
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattergl.Unselected`
- A dict of string/value properties that will be passed
to the Unselected constructor
Supported dict properties:
marker
:class:`plotly.graph_objects.scattergl.unselect
ed.Marker` instance or dict with compatible
properties
textfont
:class:`plotly.graph_objects.scattergl.unselect
ed.Textfont` instance or dict with compatible
properties
Returns
-------
plotly.graph_objs.scattergl.Unselected
"""
return self["unselected"]
@unselected.setter
def unselected(self, val):
self["unselected"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# x
# -
@property
def x(self):
"""
Sets the x coordinates.
The 'x' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# x0
# --
@property
def x0(self):
"""
Alternate to `x`. Builds a linear space of x coordinates. Use
with `dx` where `x0` is the starting coordinate and `dx` the
step.
The 'x0' property accepts values of any type
Returns
-------
Any
"""
return self["x0"]
@x0.setter
def x0(self, val):
self["x0"] = val
# xaxis
# -----
@property
def xaxis(self):
"""
Sets a reference between this trace's x coordinates and a 2D
cartesian x axis. If "x" (the default value), the x coordinates
refer to `layout.xaxis`. If "x2", the x coordinates refer to
`layout.xaxis2`, and so on.
The 'xaxis' property is an identifier of a particular
subplot, of type 'x', that may be specified as the string 'x'
optionally followed by an integer >= 1
(e.g. 'x', 'x1', 'x2', 'x3', etc.)
Returns
-------
str
"""
return self["xaxis"]
@xaxis.setter
def xaxis(self, val):
self["xaxis"] = val
# xcalendar
# ---------
@property
def xcalendar(self):
"""
Sets the calendar system to use with `x` date data.
The 'xcalendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['gregorian', 'chinese', 'coptic', 'discworld',
'ethiopian', 'hebrew', 'islamic', 'julian', 'mayan',
'nanakshahi', 'nepali', 'persian', 'jalali', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["xcalendar"]
@xcalendar.setter
def xcalendar(self, val):
self["xcalendar"] = val
# xhoverformat
# ------------
@property
def xhoverformat(self):
"""
Sets the hover text formatting rulefor `x` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for dates
see: https://github.com/d3/d3-time-format#locale_format. We add
two items to d3's date formatter: "%h" for half of the year as
a decimal number as well as "%{n}f" for fractional seconds with
n digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display *09~15~23.46*By default
the values are formatted using `xaxis.hoverformat`.
The 'xhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["xhoverformat"]
@xhoverformat.setter
def xhoverformat(self, val):
self["xhoverformat"] = val
# xperiod
# -------
@property
def xperiod(self):
"""
Only relevant when the axis `type` is "date". Sets the period
positioning in milliseconds or "M<n>" on the x axis. Special
values in the form of "M<n>" could be used to declare the
number of months. In this case `n` must be a positive integer.
The 'xperiod' property accepts values of any type
Returns
-------
Any
"""
return self["xperiod"]
@xperiod.setter
def xperiod(self, val):
self["xperiod"] = val
# xperiod0
# --------
@property
def xperiod0(self):
"""
Only relevant when the axis `type` is "date". Sets the base for
period positioning in milliseconds or date string on the x0
axis. When `x0period` is round number of weeks, the `x0period0`
by default would be on a Sunday i.e. 2000-01-02, otherwise it
would be at 2000-01-01.
The 'xperiod0' property accepts values of any type
Returns
-------
Any
"""
return self["xperiod0"]
@xperiod0.setter
def xperiod0(self, val):
self["xperiod0"] = val
# xperiodalignment
# ----------------
@property
def xperiodalignment(self):
"""
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the x axis.
The 'xperiodalignment' property is an enumeration that may be specified as:
- One of the following enumeration values:
['start', 'middle', 'end']
Returns
-------
Any
"""
return self["xperiodalignment"]
@xperiodalignment.setter
def xperiodalignment(self, val):
self["xperiodalignment"] = val
# xsrc
# ----
@property
def xsrc(self):
"""
Sets the source reference on Chart Studio Cloud for x .
The 'xsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["xsrc"]
@xsrc.setter
def xsrc(self, val):
self["xsrc"] = val
# y
# -
@property
def y(self):
"""
Sets the y coordinates.
The 'y' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# y0
# --
@property
def y0(self):
"""
Alternate to `y`. Builds a linear space of y coordinates. Use
with `dy` where `y0` is the starting coordinate and `dy` the
step.
The 'y0' property accepts values of any type
Returns
-------
Any
"""
return self["y0"]
@y0.setter
def y0(self, val):
self["y0"] = val
# yaxis
# -----
@property
def yaxis(self):
"""
Sets a reference between this trace's y coordinates and a 2D
cartesian y axis. If "y" (the default value), the y coordinates
refer to `layout.yaxis`. If "y2", the y coordinates refer to
`layout.yaxis2`, and so on.
The 'yaxis' property is an identifier of a particular
subplot, of type 'y', that may be specified as the string 'y'
optionally followed by an integer >= 1
(e.g. 'y', 'y1', 'y2', 'y3', etc.)
Returns
-------
str
"""
return self["yaxis"]
@yaxis.setter
def yaxis(self, val):
self["yaxis"] = val
# ycalendar
# ---------
@property
def ycalendar(self):
"""
Sets the calendar system to use with `y` date data.
The 'ycalendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['gregorian', 'chinese', 'coptic', 'discworld',
'ethiopian', 'hebrew', 'islamic', 'julian', 'mayan',
'nanakshahi', 'nepali', 'persian', 'jalali', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["ycalendar"]
@ycalendar.setter
def ycalendar(self, val):
self["ycalendar"] = val
# yhoverformat
# ------------
@property
def yhoverformat(self):
"""
Sets the hover text formatting rulefor `y` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for dates
see: https://github.com/d3/d3-time-format#locale_format. We add
two items to d3's date formatter: "%h" for half of the year as
a decimal number as well as "%{n}f" for fractional seconds with
n digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display *09~15~23.46*By default
the values are formatted using `yaxis.hoverformat`.
The 'yhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["yhoverformat"]
@yhoverformat.setter
def yhoverformat(self, val):
self["yhoverformat"] = val
# yperiod
# -------
@property
def yperiod(self):
"""
Only relevant when the axis `type` is "date". Sets the period
positioning in milliseconds or "M<n>" on the y axis. Special
values in the form of "M<n>" could be used to declare the
number of months. In this case `n` must be a positive integer.
The 'yperiod' property accepts values of any type
Returns
-------
Any
"""
return self["yperiod"]
@yperiod.setter
def yperiod(self, val):
self["yperiod"] = val
# yperiod0
# --------
@property
def yperiod0(self):
"""
Only relevant when the axis `type` is "date". Sets the base for
period positioning in milliseconds or date string on the y0
axis. When `y0period` is round number of weeks, the `y0period0`
by default would be on a Sunday i.e. 2000-01-02, otherwise it
would be at 2000-01-01.
The 'yperiod0' property accepts values of any type
Returns
-------
Any
"""
return self["yperiod0"]
@yperiod0.setter
def yperiod0(self, val):
self["yperiod0"] = val
# yperiodalignment
# ----------------
@property
def yperiodalignment(self):
"""
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the y axis.
The 'yperiodalignment' property is an enumeration that may be specified as:
- One of the following enumeration values:
['start', 'middle', 'end']
Returns
-------
Any
"""
return self["yperiodalignment"]
@yperiodalignment.setter
def yperiodalignment(self, val):
self["yperiodalignment"] = val
# ysrc
# ----
@property
def ysrc(self):
"""
Sets the source reference on Chart Studio Cloud for y .
The 'ysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ysrc"]
@ysrc.setter
def ysrc(self, val):
self["ysrc"] = val
# type
# ----
@property
def type(self):
return self._props["type"]
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
connectgaps
Determines whether or not gaps (i.e. {nan} or missing
values) in the provided data arrays are connected.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
dx
Sets the x coordinate step. See `x0` for more info.
dy
Sets the y coordinate step. See `y0` for more info.
error_x
:class:`plotly.graph_objects.scattergl.ErrorX` instance
or dict with compatible properties
error_y
:class:`plotly.graph_objects.scattergl.ErrorY` instance
or dict with compatible properties
fill
Sets the area to fill with a solid color. Defaults to
"none" unless this trace is stacked, then it gets
"tonexty" ("tonextx") if `orientation` is "v" ("h") Use
with `fillcolor` if not "none". "tozerox" and "tozeroy"
fill to x=0 and y=0 respectively. "tonextx" and
"tonexty" fill between the endpoints of this trace and
the endpoints of the trace before it, connecting those
endpoints with straight lines (to make a stacked area
graph); if there is no trace before it, they behave
like "tozerox" and "tozeroy". "toself" connects the
endpoints of the trace (or each segment of the trace if
it has gaps) into a closed shape. "tonext" fills the
space between two traces if one completely encloses the
other (eg consecutive contour lines), and behaves like
"toself" if there is no trace before it. "tonext"
should not be used if one trace does not enclose the
other. Traces in a `stackgroup` will only fill to (or
be filled to) other traces in the same group. With
multiple `stackgroup`s or some traces stacked and some
not, if fill-linked traces are not already consecutive,
the later ones will be pushed down in the drawing
order.
fillcolor
Sets the fill color. Defaults to a half-transparent
variant of the line color, marker color, or marker line
color, whichever is available.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.scattergl.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Sets hover text elements associated with each (x,y)
pair. If a single string, the same string appears over
all the data points. If an array of string, the items
are mapped in order to the this trace's (x,y)
coordinates. To be seen, trace `hoverinfo` must contain
a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.scattergl.Legendgrouptitle
` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with `*reversed* `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items.
line
:class:`plotly.graph_objects.scattergl.Line` instance
or dict with compatible properties
marker
:class:`plotly.graph_objects.scattergl.Marker` instance
or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
mode
Determines the drawing mode for this scatter trace.
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
selected
:class:`plotly.graph_objects.scattergl.Selected`
instance or dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.scattergl.Stream` instance
or dict with compatible properties
text
Sets text elements associated with each (x,y) pair. If
a single string, the same string appears over all the
data points. If an array of string, the items are
mapped in order to the this trace's (x,y) coordinates.
If trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in
the hover labels.
textfont
Sets the text font.
textposition
Sets the positions of the `text` elements with respects
to the (x,y) coordinates.
textpositionsrc
Sets the source reference on Chart Studio Cloud for
textposition .
textsrc
Sets the source reference on Chart Studio Cloud for
text .
texttemplate
Template string used for rendering the information text
that appear on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. Every attributes
that can be specified per-point (the ones that are
`arrayOk: true`) are available.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
texttemplate .
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.scattergl.Unselected`
instance or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x
Sets the x coordinates.
x0
Alternate to `x`. Builds a linear space of x
coordinates. Use with `dx` where `x0` is the starting
coordinate and `dx` the step.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
xcalendar
Sets the calendar system to use with `x` date data.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for
dates see: https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal
number as well as "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display *09~15~23.46*By
default the values are formatted using
`xaxis.hoverformat`.
xperiod
Only relevant when the axis `type` is "date". Sets the
period positioning in milliseconds or "M<n>" on the x
axis. Special values in the form of "M<n>" could be
used to declare the number of months. In this case `n`
must be a positive integer.
xperiod0
Only relevant when the axis `type` is "date". Sets the
base for period positioning in milliseconds or date
string on the x0 axis. When `x0period` is round number
of weeks, the `x0period0` by default would be on a
Sunday i.e. 2000-01-02, otherwise it would be at
2000-01-01.
xperiodalignment
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the x axis.
xsrc
Sets the source reference on Chart Studio Cloud for x
.
y
Sets the y coordinates.
y0
Alternate to `y`. Builds a linear space of y
coordinates. Use with `dy` where `y0` is the starting
coordinate and `dy` the step.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
ycalendar
Sets the calendar system to use with `y` date data.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for
dates see: https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal
number as well as "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display *09~15~23.46*By
default the values are formatted using
`yaxis.hoverformat`.
yperiod
Only relevant when the axis `type` is "date". Sets the
period positioning in milliseconds or "M<n>" on the y
axis. Special values in the form of "M<n>" could be
used to declare the number of months. In this case `n`
must be a positive integer.
yperiod0
Only relevant when the axis `type` is "date". Sets the
base for period positioning in milliseconds or date
string on the y0 axis. When `y0period` is round number
of weeks, the `y0period0` by default would be on a
Sunday i.e. 2000-01-02, otherwise it would be at
2000-01-01.
yperiodalignment
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the y axis.
ysrc
Sets the source reference on Chart Studio Cloud for y
.
"""
def __init__(
self,
arg=None,
connectgaps=None,
customdata=None,
customdatasrc=None,
dx=None,
dy=None,
error_x=None,
error_y=None,
fill=None,
fillcolor=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
line=None,
marker=None,
meta=None,
metasrc=None,
mode=None,
name=None,
opacity=None,
selected=None,
selectedpoints=None,
showlegend=None,
stream=None,
text=None,
textfont=None,
textposition=None,
textpositionsrc=None,
textsrc=None,
texttemplate=None,
texttemplatesrc=None,
uid=None,
uirevision=None,
unselected=None,
visible=None,
x=None,
x0=None,
xaxis=None,
xcalendar=None,
xhoverformat=None,
xperiod=None,
xperiod0=None,
xperiodalignment=None,
xsrc=None,
y=None,
y0=None,
yaxis=None,
ycalendar=None,
yhoverformat=None,
yperiod=None,
yperiod0=None,
yperiodalignment=None,
ysrc=None,
**kwargs
):
"""
Construct a new Scattergl object
The data visualized as scatter point or lines is set in `x` and
`y` using the WebGL plotting engine. Bubble charts are achieved
by setting `marker.size` and/or `marker.color` to a numerical
arrays.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Scattergl`
connectgaps
Determines whether or not gaps (i.e. {nan} or missing
values) in the provided data arrays are connected.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
dx
Sets the x coordinate step. See `x0` for more info.
dy
Sets the y coordinate step. See `y0` for more info.
error_x
:class:`plotly.graph_objects.scattergl.ErrorX` instance
or dict with compatible properties
error_y
:class:`plotly.graph_objects.scattergl.ErrorY` instance
or dict with compatible properties
fill
Sets the area to fill with a solid color. Defaults to
"none" unless this trace is stacked, then it gets
"tonexty" ("tonextx") if `orientation` is "v" ("h") Use
with `fillcolor` if not "none". "tozerox" and "tozeroy"
fill to x=0 and y=0 respectively. "tonextx" and
"tonexty" fill between the endpoints of this trace and
the endpoints of the trace before it, connecting those
endpoints with straight lines (to make a stacked area
graph); if there is no trace before it, they behave
like "tozerox" and "tozeroy". "toself" connects the
endpoints of the trace (or each segment of the trace if
it has gaps) into a closed shape. "tonext" fills the
space between two traces if one completely encloses the
other (eg consecutive contour lines), and behaves like
"toself" if there is no trace before it. "tonext"
should not be used if one trace does not enclose the
other. Traces in a `stackgroup` will only fill to (or
be filled to) other traces in the same group. With
multiple `stackgroup`s or some traces stacked and some
not, if fill-linked traces are not already consecutive,
the later ones will be pushed down in the drawing
order.
fillcolor
Sets the fill color. Defaults to a half-transparent
variant of the line color, marker color, or marker line
color, whichever is available.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.scattergl.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Sets hover text elements associated with each (x,y)
pair. If a single string, the same string appears over
all the data points. If an array of string, the items
are mapped in order to the this trace's (x,y)
coordinates. To be seen, trace `hoverinfo` must contain
a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.scattergl.Legendgrouptitle
` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with `*reversed* `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items.
line
:class:`plotly.graph_objects.scattergl.Line` instance
or dict with compatible properties
marker
:class:`plotly.graph_objects.scattergl.Marker` instance
or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
mode
Determines the drawing mode for this scatter trace.
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
selected
:class:`plotly.graph_objects.scattergl.Selected`
instance or dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.scattergl.Stream` instance
or dict with compatible properties
text
Sets text elements associated with each (x,y) pair. If
a single string, the same string appears over all the
data points. If an array of string, the items are
mapped in order to the this trace's (x,y) coordinates.
If trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in
the hover labels.
textfont
Sets the text font.
textposition
Sets the positions of the `text` elements with respects
to the (x,y) coordinates.
textpositionsrc
Sets the source reference on Chart Studio Cloud for
textposition .
textsrc
Sets the source reference on Chart Studio Cloud for
text .
texttemplate
Template string used for rendering the information text
that appear on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. Every attributes
that can be specified per-point (the ones that are
`arrayOk: true`) are available.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
texttemplate .
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.scattergl.Unselected`
instance or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x
Sets the x coordinates.
x0
Alternate to `x`. Builds a linear space of x
coordinates. Use with `dx` where `x0` is the starting
coordinate and `dx` the step.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
xcalendar
Sets the calendar system to use with `x` date data.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for
dates see: https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal
number as well as "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display *09~15~23.46*By
default the values are formatted using
`xaxis.hoverformat`.
xperiod
Only relevant when the axis `type` is "date". Sets the
period positioning in milliseconds or "M<n>" on the x
axis. Special values in the form of "M<n>" could be
used to declare the number of months. In this case `n`
must be a positive integer.
xperiod0
Only relevant when the axis `type` is "date". Sets the
base for period positioning in milliseconds or date
string on the x0 axis. When `x0period` is round number
of weeks, the `x0period0` by default would be on a
Sunday i.e. 2000-01-02, otherwise it would be at
2000-01-01.
xperiodalignment
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the x axis.
xsrc
Sets the source reference on Chart Studio Cloud for x
.
y
Sets the y coordinates.
y0
Alternate to `y`. Builds a linear space of y
coordinates. Use with `dy` where `y0` is the starting
coordinate and `dy` the step.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
ycalendar
Sets the calendar system to use with `y` date data.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for
dates see: https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal
number as well as "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display *09~15~23.46*By
default the values are formatted using
`yaxis.hoverformat`.
yperiod
Only relevant when the axis `type` is "date". Sets the
period positioning in milliseconds or "M<n>" on the y
axis. Special values in the form of "M<n>" could be
used to declare the number of months. In this case `n`
must be a positive integer.
yperiod0
Only relevant when the axis `type` is "date". Sets the
base for period positioning in milliseconds or date
string on the y0 axis. When `y0period` is round number
of weeks, the `y0period0` by default would be on a
Sunday i.e. 2000-01-02, otherwise it would be at
2000-01-01.
yperiodalignment
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the y axis.
ysrc
Sets the source reference on Chart Studio Cloud for y
.
Returns
-------
Scattergl
"""
super(Scattergl, self).__init__("scattergl")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Scattergl
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Scattergl`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("connectgaps", None)
_v = connectgaps if connectgaps is not None else _v
if _v is not None:
self["connectgaps"] = _v
_v = arg.pop("customdata", None)
_v = customdata if customdata is not None else _v
if _v is not None:
self["customdata"] = _v
_v = arg.pop("customdatasrc", None)
_v = customdatasrc if customdatasrc is not None else _v
if _v is not None:
self["customdatasrc"] = _v
_v = arg.pop("dx", None)
_v = dx if dx is not None else _v
if _v is not None:
self["dx"] = _v
_v = arg.pop("dy", None)
_v = dy if dy is not None else _v
if _v is not None:
self["dy"] = _v
_v = arg.pop("error_x", None)
_v = error_x if error_x is not None else _v
if _v is not None:
self["error_x"] = _v
_v = arg.pop("error_y", None)
_v = error_y if error_y is not None else _v
if _v is not None:
self["error_y"] = _v
_v = arg.pop("fill", None)
_v = fill if fill is not None else _v
if _v is not None:
self["fill"] = _v
_v = arg.pop("fillcolor", None)
_v = fillcolor if fillcolor is not None else _v
if _v is not None:
self["fillcolor"] = _v
_v = arg.pop("hoverinfo", None)
_v = hoverinfo if hoverinfo is not None else _v
if _v is not None:
self["hoverinfo"] = _v
_v = arg.pop("hoverinfosrc", None)
_v = hoverinfosrc if hoverinfosrc is not None else _v
if _v is not None:
self["hoverinfosrc"] = _v
_v = arg.pop("hoverlabel", None)
_v = hoverlabel if hoverlabel is not None else _v
if _v is not None:
self["hoverlabel"] = _v
_v = arg.pop("hovertemplate", None)
_v = hovertemplate if hovertemplate is not None else _v
if _v is not None:
self["hovertemplate"] = _v
_v = arg.pop("hovertemplatesrc", None)
_v = hovertemplatesrc if hovertemplatesrc is not None else _v
if _v is not None:
self["hovertemplatesrc"] = _v
_v = arg.pop("hovertext", None)
_v = hovertext if hovertext is not None else _v
if _v is not None:
self["hovertext"] = _v
_v = arg.pop("hovertextsrc", None)
_v = hovertextsrc if hovertextsrc is not None else _v
if _v is not None:
self["hovertextsrc"] = _v
_v = arg.pop("ids", None)
_v = ids if ids is not None else _v
if _v is not None:
self["ids"] = _v
_v = arg.pop("idssrc", None)
_v = idssrc if idssrc is not None else _v
if _v is not None:
self["idssrc"] = _v
_v = arg.pop("legendgroup", None)
_v = legendgroup if legendgroup is not None else _v
if _v is not None:
self["legendgroup"] = _v
_v = arg.pop("legendgrouptitle", None)
_v = legendgrouptitle if legendgrouptitle is not None else _v
if _v is not None:
self["legendgrouptitle"] = _v
_v = arg.pop("legendrank", None)
_v = legendrank if legendrank is not None else _v
if _v is not None:
self["legendrank"] = _v
_v = arg.pop("line", None)
_v = line if line is not None else _v
if _v is not None:
self["line"] = _v
_v = arg.pop("marker", None)
_v = marker if marker is not None else _v
if _v is not None:
self["marker"] = _v
_v = arg.pop("meta", None)
_v = meta if meta is not None else _v
if _v is not None:
self["meta"] = _v
_v = arg.pop("metasrc", None)
_v = metasrc if metasrc is not None else _v
if _v is not None:
self["metasrc"] = _v
_v = arg.pop("mode", None)
_v = mode if mode is not None else _v
if _v is not None:
self["mode"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("selected", None)
_v = selected if selected is not None else _v
if _v is not None:
self["selected"] = _v
_v = arg.pop("selectedpoints", None)
_v = selectedpoints if selectedpoints is not None else _v
if _v is not None:
self["selectedpoints"] = _v
_v = arg.pop("showlegend", None)
_v = showlegend if showlegend is not None else _v
if _v is not None:
self["showlegend"] = _v
_v = arg.pop("stream", None)
_v = stream if stream is not None else _v
if _v is not None:
self["stream"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
_v = arg.pop("textfont", None)
_v = textfont if textfont is not None else _v
if _v is not None:
self["textfont"] = _v
_v = arg.pop("textposition", None)
_v = textposition if textposition is not None else _v
if _v is not None:
self["textposition"] = _v
_v = arg.pop("textpositionsrc", None)
_v = textpositionsrc if textpositionsrc is not None else _v
if _v is not None:
self["textpositionsrc"] = _v
_v = arg.pop("textsrc", None)
_v = textsrc if textsrc is not None else _v
if _v is not None:
self["textsrc"] = _v
_v = arg.pop("texttemplate", None)
_v = texttemplate if texttemplate is not None else _v
if _v is not None:
self["texttemplate"] = _v
_v = arg.pop("texttemplatesrc", None)
_v = texttemplatesrc if texttemplatesrc is not None else _v
if _v is not None:
self["texttemplatesrc"] = _v
_v = arg.pop("uid", None)
_v = uid if uid is not None else _v
if _v is not None:
self["uid"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
_v = arg.pop("unselected", None)
_v = unselected if unselected is not None else _v
if _v is not None:
self["unselected"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("x0", None)
_v = x0 if x0 is not None else _v
if _v is not None:
self["x0"] = _v
_v = arg.pop("xaxis", None)
_v = xaxis if xaxis is not None else _v
if _v is not None:
self["xaxis"] = _v
_v = arg.pop("xcalendar", None)
_v = xcalendar if xcalendar is not None else _v
if _v is not None:
self["xcalendar"] = _v
_v = arg.pop("xhoverformat", None)
_v = xhoverformat if xhoverformat is not None else _v
if _v is not None:
self["xhoverformat"] = _v
_v = arg.pop("xperiod", None)
_v = xperiod if xperiod is not None else _v
if _v is not None:
self["xperiod"] = _v
_v = arg.pop("xperiod0", None)
_v = xperiod0 if xperiod0 is not None else _v
if _v is not None:
self["xperiod0"] = _v
_v = arg.pop("xperiodalignment", None)
_v = xperiodalignment if xperiodalignment is not None else _v
if _v is not None:
self["xperiodalignment"] = _v
_v = arg.pop("xsrc", None)
_v = xsrc if xsrc is not None else _v
if _v is not None:
self["xsrc"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("y0", None)
_v = y0 if y0 is not None else _v
if _v is not None:
self["y0"] = _v
_v = arg.pop("yaxis", None)
_v = yaxis if yaxis is not None else _v
if _v is not None:
self["yaxis"] = _v
_v = arg.pop("ycalendar", None)
_v = ycalendar if ycalendar is not None else _v
if _v is not None:
self["ycalendar"] = _v
_v = arg.pop("yhoverformat", None)
_v = yhoverformat if yhoverformat is not None else _v
if _v is not None:
self["yhoverformat"] = _v
_v = arg.pop("yperiod", None)
_v = yperiod if yperiod is not None else _v
if _v is not None:
self["yperiod"] = _v
_v = arg.pop("yperiod0", None)
_v = yperiod0 if yperiod0 is not None else _v
if _v is not None:
self["yperiod0"] = _v
_v = arg.pop("yperiodalignment", None)
_v = yperiodalignment if yperiodalignment is not None else _v
if _v is not None:
self["yperiodalignment"] = _v
_v = arg.pop("ysrc", None)
_v = ysrc if ysrc is not None else _v
if _v is not None:
self["ysrc"] = _v
# Read-only literals
# ------------------
self._props["type"] = "scattergl"
arg.pop("type", None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| [
"[email protected]"
] | |
7325ae304d235b7a782d87159dd5bb3bda70ee36 | da92f27626485f3d75dd85b83e7d404fe5ce63eb | /migrations/versions/7ca65b6611b6_.py | 404d4f4e6b46d99e35425d14f33f002d67d9a15a | [
"Apache-2.0"
] | permissive | DD-DeCaF/design-storage | c0a88c554b427d6b86ac09e5ab0b4154174ef250 | 0c0e07f0dc505eb4a1e4521a87f5f7ac6f879b6d | refs/heads/devel | 2021-06-23T16:45:27.322527 | 2020-05-26T20:17:11 | 2020-05-26T20:17:11 | 161,643,976 | 0 | 0 | Apache-2.0 | 2020-12-08T14:35:36 | 2018-12-13T13:34:53 | Python | UTF-8 | Python | false | false | 1,112 | py | """empty message
Revision ID: 7ca65b6611b6
Revises:
Create Date: 2018-12-18 14:28:03.918045
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '7ca65b6611b6'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('design',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('project_id', sa.Integer(), nullable=True),
sa.Column('name', sa.String(), nullable=False),
sa.Column('model_id', sa.Integer(), nullable=False),
sa.Column('design', postgresql.JSONB(astext_type=sa.Text()), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_design_project_id'), 'design', ['project_id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_design_project_id'), table_name='design')
op.drop_table('design')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
cdc6692f0da110d0a1311d6ee5520bc67d75c56c | ba9687909c1b11d7baa361d16413d1e41dcbdf7b | /examples/python/pe_reader.py | e384f840d0d444946eed554e8f6a372f7c3c59de | [
"Apache-2.0"
] | permissive | tomagoyaky/LIEF | f78ea47e0aad31e6df60342544bdb97f73f787a8 | 758ddc7e3e7484a37eae344203c3485fc15d9097 | refs/heads/master | 2021-01-19T03:30:05.245889 | 2017-04-05T13:33:18 | 2017-04-05T13:33:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,310 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Description
# -----------
# Print information about a PE file
import lief
from lief import PE
from lief.PE import oid_to_string
from optparse import OptionParser
import sys
def print_header(binary):
dos_header = binary.dos_header
header = binary.header
optional_header = binary.optional_header
format_str = "{:<33} {:<30}"
format_hex = "{:<33} 0x{:<28x}"
format_dec = "{:<33} {:<30d}"
print("== Dos Header ==")
print(format_str.format("Magic:", str((dos_header.magic))))
print(format_dec.format("Used bytes in the last page:", dos_header.used_bytes_in_the_last_page))
print(format_dec.format("File size in pages:", dos_header.file_size_in_pages))
print(format_dec.format("Number of relocations:", dos_header.numberof_relocation))
print(format_dec.format("Header size in paragraphs:", dos_header.header_size_in_paragraphs))
print(format_dec.format("Minimum extra paragraphs:", dos_header.minimum_extra_paragraphs))
print(format_dec.format("Maximum extra paragraphs", dos_header.maximum_extra_paragraphs))
print(format_dec.format("Initial relative SS", dos_header.initial_relative_ss))
print(format_hex.format("Initial SP:", dos_header.initial_sp))
print(format_hex.format("Checksum:", dos_header.checksum))
print(format_dec.format("Initial IP:", dos_header.initial_ip))
print(format_dec.format("Initial CS:", dos_header.initial_relative_cs))
print(format_hex.format("Address of relocation table:", dos_header.addressof_relocation_table))
print(format_dec.format("Overlay number:", dos_header.overlay_number))
print(format_dec.format("OEM ID:", dos_header.oem_id))
print(format_dec.format("OEM information", dos_header.oem_info))
print(format_hex.format("Address of optional header:", dos_header.addressof_new_exeheader))
print("")
print("== Header ==")
char_str = " - ".join([str(chara).split(".")[-1] for chara in header.characteristics_list])
print(format_str.format("Signature:", "".join(map(chr, header.signature))))
print(format_str.format("Machine:", str(header.machine)))
print(format_dec.format("Number of sections:", header.numberof_sections))
print(format_dec.format("Time Date stamp:", header.time_date_stamps))
print(format_dec.format("Pointer to symbols:", header.pointerto_symbol_table))
print(format_dec.format("Number of symbols:", header.numberof_symbols))
print(format_dec.format("Size of optional header:", header.sizeof_optional_header))
print(format_str.format("Characteristics:", char_str))
print("")
dll_char_str = " - ".join([str(chara).split(".")[-1] for chara in optional_header.dll_characteristics_lists])
subsystem_str = str(optional_header.subsystem).split(".")[-1]
print("== Optional Header ==")
magic = "PE32" if optional_header.magic == PE.PE_TYPE.PE32 else "PE64"
print(format_str.format("Magic:", magic))
print(format_dec.format("Major linker version:", optional_header.major_linker_version))
print(format_dec.format("Minor linker version:", optional_header.minor_linker_version))
print(format_dec.format("Size of code:", optional_header.sizeof_code))
print(format_dec.format("Size of initialized data:", optional_header.sizeof_initialized_data))
print(format_dec.format("Size of uninitialized data:", optional_header.sizeof_uninitialized_data))
print(format_hex.format("Entry point:", optional_header.addressof_entrypoint))
print(format_hex.format("Base of code:", optional_header.baseof_code))
if magic == "PE32":
print(format_hex.format("Base of data", optional_header.baseof_data))
print(format_hex.format("Image base:", optional_header.imagebase))
print(format_hex.format("Section alignment:", optional_header.section_alignment))
print(format_hex.format("File alignment:", optional_header.file_alignment))
print(format_dec.format("Major operating system version:", optional_header.major_operating_system_version))
print(format_dec.format("Minor operating system version:", optional_header.minor_operating_system_version))
print(format_dec.format("Major image version:", optional_header.major_image_version))
print(format_dec.format("Minor image version:", optional_header.minor_image_version))
print(format_dec.format("Major subsystem version:", optional_header.major_subsystem_version))
print(format_dec.format("Minor subsystem version:", optional_header.minor_subsystem_version))
print(format_dec.format("WIN32 version value:", optional_header.win32_version_value))
print(format_hex.format("Size of image:", optional_header.sizeof_image))
print(format_hex.format("Size of headers:", optional_header.sizeof_headers))
print(format_hex.format("Checksum:", optional_header.checksum))
print(format_str.format("Subsystem:", subsystem_str))
print(format_str.format("DLL Characteristics:", dll_char_str))
print(format_hex.format("Size of stack reserve:", optional_header.sizeof_stack_reserve))
print(format_hex.format("Size of stack commit:", optional_header.sizeof_stack_commit))
print(format_hex.format("Size of heap reserve:", optional_header.sizeof_heap_reserve))
print(format_hex.format("Size of heap commit:", optional_header.sizeof_heap_commit))
print(format_dec.format("Loader flags:", optional_header.loader_flags))
print(format_dec.format("Number of RVA and size:", optional_header.numberof_rva_and_size))
print("")
def print_data_directories(binary):
data_directories = binary.data_directories
print("== Data Directories ==")
f_title = "|{:<24} | {:<10} | {:<10} | {:<8} |"
f_value = "|{:<24} | 0x{:<8x} | 0x{:<8x} | {:<8} |"
print(f_title.format("Type", "RVA", "Size", "Section"))
for directory in data_directories:
section_name = directory.section.name if directory.has_section else ""
print(f_value.format(str(directory.type).split('.')[-1], directory.rva, directory.size, section_name))
print("")
def print_sections(binary):
sections = binary.sections
print("== Sections ==")
f_title = "|{:<10} | {:<16} | {:<16} | {:<18} | {:<16} | {:<9} | {:<9}"
f_value = "|{:<10} | 0x{:<14x} | 0x{:<14x} | 0x{:<16x} | 0x{:<14x} | {:<9.2f} | {:<9}"
print(f_title.format("Name", "Offset", "Size", "Virtual Address", "Virtual size", "Entropy", "Flags"))
for section in sections:
flags = ""
for flag in section.characteristics_lists:
flags += str(flag).split(".")[-1] + " "
print(f_value.format(section.name, section.offset, section.size, section.virtual_address, section.virtual_size, section.entropy, flags))
print("")
def print_symbols(binary):
symbols = binary.symbols
if len(symbols) > 0:
print("== Symbols ==")
f_title = "|{:<20} | {:<10} | {:<8} | {:<8} | {:<8} | {:<13} |"
f_value = u"|{:<20} | 0x{:<8x} | {:<14} | {:<10} | {:<12} | {:<13} |"
print(f_title.format("Name", "Value", "Section number", "Basic type", "Complex type", "Storage class"))
for symbol in symbols:
section_nb_str = ""
if symbol.section_number <= 0:
section_nb_str = str(PE.SYMBOL_SECTION_NUMBER(symbol.section_number)).split(".")[-1]
else:
try:
section_nb_str = symbol.section.name
except:
section_nb_str = "section<{:d}>".format(symbol.section_number)
print(f_value.format(
symbol.name[:20],
symbol.value,
section_nb_str,
str(symbol.base_type).split(".")[-1],
str(symbol.complex_type).split(".")[-1],
str(symbol.storage_class).split(".")[-1]))
def print_imports(binary):
if binary.has_imports:
print("== Imports ==")
imports = binary.imports
for import_ in imports:
print(import_.name)
entries = import_.entries
f_value = " {:<33} 0x{:<14x} 0x{:<14x} 0x{:<16x}"
for entry in entries:
print(f_value.format(entry.name, entry.data, entry.iat_value, entry.hint))
print("")
def print_tls(binary):
format_str = "{:<33} {:<30}"
format_hex = "{:<33} 0x{:<28x}"
format_dec = "{:<33} {:<30d}"
if not binary.has_tls:
return
print("== TLS ==")
tls = binary.tls
callbacks = tls.callbacks
print(format_hex.format("Address of callbacks:", tls.addressof_callbacks))
if len(callbacks) > 0:
print("Callbacks:")
for callback in callbacks:
print(" " + hex(callback))
print(format_hex.format("Address of index:", tls.addressof_index))
print(format_hex.format("Size of zero fill:", tls.sizeof_zero_fill))
print("{:<33} 0x{:<10x} 0x{:<10x}".format("Address of raw data:",
tls.addressof_raw_data[0], tls.addressof_raw_data[1]))
print(format_hex.format("Size of raw data:", len(tls.data_template)))
print(format_hex.format("Characteristics:", tls.characteristics))
print(format_str.format("Section:", tls.section.name))
print(format_str.format("Data directory:", str(tls.directory.type)))
print("")
def print_relocations(binary):
if binary.has_relocations:
relocations = binary.relocations
print("== Relocations ==")
for relocation in relocations:
entries = relocation.entries
print(hex(relocation.virtual_address))
for entry in entries:
print(" 0x{:<8x} {:<8}".format(entry.position, str(entry.type).split(".")[-1]))
print("")
def print_export(binary):
if binary.has_exports:
print("== Exports ==")
exports = binary.get_export()
entries = exports.entries
f_value = "{:<20} 0x{:<10x} 0x{:<10x} 0x{:<6x} 0x{:<6x} 0x{:<10x}"
print(f_value.format(exports.name, exports.export_flags, exports.timestamp, exports.major_version, exports.minor_version, exports.ordinal_base))
for entry in entries:
extern = "[EXTERN]" if entry.is_extern else ""
print(" {:<20} 0x{:<6x} 0x{:<10x} {:<13}".format(entry.name[:20], entry.ordinal, entry.address, extern))
print("")
def print_debug(binary):
format_str = "{:<33} {:<30}"
format_hex = "{:<33} 0x{:<28x}"
format_dec = "{:<33} {:<30d}"
if binary.has_debug:
debug = binary.debug
print("== Debug ==")
print(format_hex.format("Characteristics:", debug.characteristics))
print(format_hex.format("Timestamp:", debug.timestamp))
print(format_dec.format("Major version:", debug.major_version))
print(format_dec.format("Minor version:", debug.minor_version))
print(format_str.format("type:", str(debug.type).split(".")[-1]))
print(format_hex.format("Size of data:", debug.sizeof_data))
print(format_hex.format("Address of raw data:", debug.addressof_rawdata))
print(format_hex.format("Pointer to raw data:", debug.pointerto_rawdata))
print("")
def print_signature(binary):
format_str = "{:<33} {:<30}"
format_hex = "{:<33} 0x{:<28x}"
format_dec = "{:<33} {:<30d}"
if not binary.has_signature:
return
signature = binary.signature
print("== Signature ==")
print(format_dec.format("Version:", signature.version))
print(format_str.format("Digest Algorithm:", oid_to_string(signature.digest_algorithm)))
print("")
print("-- Content Info --")
content_info = signature.content_info
print(format_str.format("Content Type:", oid_to_string(content_info.content_type)))
print(format_str.format("Type:", oid_to_string(content_info.type)))
print(format_str.format("Digest Algorithm:", oid_to_string(content_info.digest_algorithm)))
print("")
print("-- Certificates --")
certificates = signature.certificates
for crt in certificates:
sn_str = ":".join(map(lambda e : "{:02x}".format(e), crt.serial_number))
valid_from_str = "-".join(map(str, crt.valid_from[:3])) + " " + ":".join(map(str, crt.valid_from[3:]))
valid_to_str = "-".join(map(str, crt.valid_to[:3])) + " " + ":".join(map(str, crt.valid_to[3:]))
print(format_dec.format("Version:", crt.version))
print(format_str.format("Serial Number:", sn_str))
print(format_str.format("Signature Algorithm:", oid_to_string(crt.signature_algorithm)))
print(format_str.format("Valid from:", valid_from_str))
print(format_str.format("Valid to:", valid_to_str))
print(format_str.format("Issuer:", crt.issuer))
print(format_str.format("Subject:", crt.subject))
print("")
print("-- Signer Info --")
signer_info = signature.signer_info
issuer_str = " ".join(map(lambda e : oid_to_string(e[0]) + " = " + e[1], signer_info.issuer[0]))
print(format_dec.format("Version:", signer_info.version))
print(format_str.format("Issuer:", issuer_str))
print(format_str.format("Digest Algorithm:", oid_to_string(signer_info.digest_algorithm)))
print(format_str.format("Signature algorithm:", oid_to_string(signer_info.signature_algorithm)))
try:
print(format_str.format("Program name:", signer_info.authenticated_attributes.program_name))
except UnicodeError:
print(format_str.format("Program name:", signer_info.authenticated_attributes.program_name.encode('utf-8')))
print(format_str.format("Url:", signer_info.authenticated_attributes.more_info))
print("")
def main():
optparser = OptionParser(
usage='Usage: %prog [options] <pe-file>',
add_help_option = True,
prog=sys.argv[0])
optparser.add_option('-a', '--all',
action='store_true', dest='show_all',
help='Show all informations')
optparser.add_option('-d', '--data-directories',
action='store_true', dest='show_data_directories',
help='Display data directories')
optparser.add_option('--debug',
action='store_true', dest='show_debug',
help='Display debug directory')
optparser.add_option('-g', '--signature',
action='store_true', dest='show_signature',
help="Display the binary's signature if any")
optparser.add_option('-H', '--header',
action='store_true', dest='show_headers',
help='Display headers')
optparser.add_option('-i', '--import',
action='store_true', dest='show_imports',
help='Display imported functions and libraries')
optparser.add_option('-r', '--relocs',
action='store_true', dest='show_relocs',
help='Display the relocations (if present)')
optparser.add_option('-S', '--section-headers', '--sections',
action='store_true', dest='show_section_header',
help="Display the sections' headers")
optparser.add_option('-s', '--symbols', '--syms',
action='store_true', dest='show_symbols',
help='Display symbols')
optparser.add_option('-t', '--tls',
action='store_true', dest='show_tls',
help='Display TLS informations')
optparser.add_option('-x', '--export',
action='store_true', dest='show_export',
help='Display exported functions/libraries')
options, args = optparser.parse_args()
if len(args) == 0:
optparser.print_help()
sys.exit(1)
binary = None
try:
binary = PE.parse(args[0])
except lief.exception as e:
print(e)
sys.exit(1)
if options.show_data_directories or options.show_all:
print_data_directories(binary)
if options.show_headers or options.show_all:
print_header(binary)
if options.show_imports or options.show_all:
print_imports(binary)
if options.show_relocs or options.show_all:
print_relocations(binary)
if options.show_section_header or options.show_all:
print_sections(binary)
if options.show_symbols or options.show_all:
print_symbols(binary)
if options.show_tls or options.show_all:
print_tls(binary)
if options.show_export or options.show_all:
print_export(binary)
if options.show_debug or options.show_all:
print_debug(binary)
if options.show_signature or options.show_all:
print_signature(binary)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
1d02be350bd89c4ad24b2390e8c69e52de9cb2c8 | 24b79d18bc9b522c86d65d7601d7012fe29b0693 | /program9-11/main_pro.py | 58caffbe4b62c30aec554f26abfbfda139e026c1 | [] | no_license | meenapandey500/Python_program | 409fafa2e8f50edfbf30ddfbdf85b47569bf229c | 2bcb5fd51aebb4dca4bcc31a26e6b05a3603d5f0 | refs/heads/main | 2023-03-18T11:49:58.302858 | 2021-03-19T09:29:24 | 2021-03-19T09:29:24 | 349,359,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | import file_class as f1
#main program
f=f1.files() #f is a object of class files
f.writefile()
f.readfile()
f.copyfile()
| [
"[email protected]"
] | |
bff3b2c527447f1f88f50748842058001df6d0b2 | 1e177ebdcb470f738c058606ac0f86a36085f661 | /Python/AdafruitIO/SendBlockHeat02_0.py | dc11ecc9ab7849e9906af300e627f575c087dfc7 | [] | no_license | robingreig/raspi-git | 5cbdd295c1048a0571aa2c2f8576438269439f07 | 7373bf94557d7a88c8f343362ba64f9cd19c8ce7 | refs/heads/master | 2023-08-31T03:16:17.286700 | 2023-08-26T11:54:23 | 2023-08-26T11:54:23 | 16,873,881 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 357 | py | #!/usr/bin/python
# Import Library & create instance of REST client
from Adafruit_IO import Client
aio = Client('7e01e8b5e56360efc48a27682324fc353e18d14f')
# Send the value of 1 to BlockHeat02
aio.send('blockheat02',0)
# Retrieve the most recent value from 'BlockHeat02'
data = aio.receive('BlockHeat02')
print('Received Value: {0}'.format(data.value))
| [
"[email protected]"
] | |
4f0fe3d099194e915d8a5db2b0850bd92f76cbc2 | 0facb323be8a76bb4c168641309972fa77cbecf2 | /Configurations/HWWSemiLepHighMass/Full_v6Production/template_seed/templates_jhchoi_combine/StructureFiles/MakeSampleStructureNuisancePythons.py | 2e5c33f66236b1b84dc00bbe1203bd86d8005a68 | [] | no_license | bhoh/SNuAnalytics | ef0a1ba9fa0d682834672a831739dfcfa1e7486b | 34d1fc062e212da152faa83be50561600819df0e | refs/heads/master | 2023-07-06T03:23:45.343449 | 2023-06-26T12:18:28 | 2023-06-26T12:18:28 | 242,880,298 | 0 | 1 | null | 2020-02-25T01:17:50 | 2020-02-25T01:17:49 | null | UTF-8 | Python | false | false | 3,908 | py | import os
import sys
sys.path.insert(0, os.getcwd()+"/../MassPoints")
sys.path.insert(0, os.getcwd()+"/../")
##--signal Mass points--##
from List_MX import *
from List_MX_VBF import *
from WPandCut2016 import Year
List_MX_common=list(set(List_MX).intersection(List_MX_VBF))
##--bkg--##
#BKG=[ 'DY', 'WZZ', 'WWZ','WWW','ZZZ', 'ZZ', 'WZ', 'WW', 'WpWmJJ_EWK_QCD_noHiggs', 'top', 'Wjets0j', 'Wjets1j', 'Wjets2j','vbfHWWlnuqq_M125','ggHWWlnuqq_M125'] + ['QCD_MU','QCD_EM','QCD_bcToE']
#BKG=[ 'DY', 'MultiV', 'WpWmJJ_EWK_QCD_noHiggs', 'top', 'Wjets0j', 'Wjets1j', 'Wjets2j','vbfHWWlnuqq_M125','ggHWWlnuqq_M125']# +['QCD_MU','QCD_EM','QCD_bcToE']
#BKG=[ 'DY', 'MultiV', 'WpWmJJ_EWK_QCD_noHiggs', 'top', 'Wjets','vbfHWWlnuqq_M125','ggHWWlnuqq_M125']# +['QCD_MU','QCD_EM','QCD_bcToE']
#BKG=[ 'DY', 'MultiV', 'qqWWqq', 'top', 'Wjets','ggWW','h125','QCD','HTT']# +['QCD_MU','QCD_EM','QCD_bcToE']
BKG=[ 'DY', 'MultiV', 'qqWWqq', 'top', 'Wjets','ggWW','ggHWWlnuqq_M125','QCD','HTT']# +['QCD_MU','QCD_EM','QCD_bcToE']
if Year=='2016':
BKG.append('vbfHWWlnuqq_M125')
###---Make samples dictionary---##
#handle=open('../sample_2016.py','r')
#exec(handle)
#handle.close()
##---Make samples file for plotting nad Runcard
#f=open()
#for s in samples:##
# f.write('samples["'+s+'"]={}\n')
#List_SampleTemplate=['samples_2016limit_MassTemplate_ele.py','samples_2016limit_MassTemplate_mu.py']
#List_StructureTemplate=['structure_MassTemplate_ele.py','structure_MassTemplate_mu.py']
print "-----sampleFile-----"
for MX in List_MX_common:
for flv in ['ele','mu']:
print MX
##SampleTemplate
for rg in ['SR','TOP','SB']:
f=open('samples_limit_M'+str(MX)+'_'+flv+'.py','w') ##samples_limit_M
for s in BKG:
f.write('samples["'+s+'"]={}\n')
f.write('samples["DATA"]={}\n')
f.write('samples["ggHWWlnuqq_M'+str(MX)+'_S"]={}\n')
f.write('samples["vbfHWWlnuqq_M'+str(MX)+'_S"]={}\n')
f.write('samples["ggHWWlnuqq_M'+str(MX)+'_SBI"]={}\n')
f.write('samples["vbfHWWlnuqq_M'+str(MX)+'_SBI"]={}\n')
f.close()
print "------structure File------"
for MX in List_MX_common:
for flv in ['ele','mu']:
print MX
##SampleTemplate
for rg in ['SR','TOP','SB']:
f=open('structure_M'+str(MX)+'_'+flv+'.py','w')
for s in BKG:
f.write('structure["'+s+'"]={\n\
"isSignal" : 0,\n\
"isData" : 0 ,\n\
}\n')
f.write('structure["DATA"]={\n\
"isSignal" : 0,\n\
"isData" : 1 ,\n\
}\n')
f.write('structure["ggHWWlnuqq_M'+str(MX)+'_S"]={\n\
"isSignal" : 1,\n\
"isData" : 0 ,\n\
}\n')
f.write('structure["vbfHWWlnuqq_M'+str(MX)+'_S"]={\n\
"isSignal" : 1,\n\
"isData" : 0 ,\n\
}\n')
f.write('structure["ggHWWlnuqq_M'+str(MX)+'_SBI"]={\n\
"isSignal" : 1,\n\
"isData" : 0 ,\n\
}\n')
f.write('structure["vbfHWWlnuqq_M'+str(MX)+'_SBI"]={\n\
"isSignal" : 1,\n\
"isData" : 0 ,\n\
}\n')
f.close()
##---Make Final Nuisance
#nuisances['dynorm']['sample']
defaultNuisanceFile='../nuisances.py'
f=open(defaultNuisanceFile,'r')
fnew=open('nuisance.py','w')
lines=f.readlines()
for line in lines:
fnew.write(line)
fnew.write(
'''
for n in nuisances:
for s in sorted(nuisances[n]['samples']):
if '_S' in s:
sbi=s.replace('_S','_SBI')
nuisances[n]['samples'][sbi]=nuisances[n]['samples'][s]
'''
)
fnew.close()
os.system('cp nuisance.py nuisance_Boosted.py')
os.system('cp nuisance.py nuisance_Resolved.py')
| [
"[email protected]"
] | |
81fd2c954c0a052cc19526a8aa2c6951dac03eb3 | b333dc607a2f1556f6a8adb6d16dc88fa8a30c8b | /portal/apps/endless_pagination/templatetags/endless.py | 4da67090f0acea9637dac724cdd9b6567db42bb0 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hernan0216/utopia-cms | 6558f8f600620c042dd79c7d2edf18fb77caebb8 | 48b48ef9acf8e3d0eb7d52601a122a01da82075c | refs/heads/main | 2023-02-06T10:31:35.525180 | 2020-12-15T17:43:28 | 2020-12-15T17:43:28 | 321,775,279 | 1 | 0 | BSD-3-Clause | 2020-12-15T19:59:17 | 2020-12-15T19:59:16 | null | UTF-8 | Python | false | false | 18,712 | py | import re
from django import template
from endless_pagination import settings, models, utils
from endless_pagination.paginator import DefaultPaginator, LazyPaginator, EmptyPage
register = template.Library()
@register.tag
def paginate(parser, token, paginator_class=None):
"""
Usage::
{% paginate objects %}
After this call, in the template context the *objects* variable is replaced
with only the objects of the current page.
You can also mantain your *objects* original variable (commonly a queryset)
and add to context another name referring to objects of the current page,
e.g.::
{% paginate objects as page_objects %}
The number of paginated object is taken from settings, but you can
override the default, e.g.::
{% paginate 20 objects %}
Of course you can mix it all::
{% paginate 20 objects as paginated_objects %}
By default, the first page is displayed the first time you load the page,
but you can easily change this, e.g.::
{% paginate objects starting from page 3 %}
This can be also achieved using a template variable you passed in the
context, e.g.::
{% paginate objects starting from page page_number %}
If the passed page number does not exist then first page is displayed.
If you have multiple paginations in the same page, you can change the
querydict key for the single pagination, e.g.::
{% paginate objects using article_page %}
In this case *article_page* is intended to be a context variable, but you can
hardcode the key using quotes, e.g.::
{% paginate objects using 'articles_at_page' %}
Again, you can mix it all (the order of arguments is important)::
{% paginate 20 objects starting from page 3 using page_key as paginated_objects %}
Additionally you can pass a path to be used for the pagination::
{% paginate 20 objects using page_key with pagination_url as paginated_objects %}
If you want the first page to contain a different number of items than
subsequent pages you can separate the two values with a comma, e.g. if
you want 3 items on the first page and 10 on other pages::
{% paginate 3,10 objects %}
You must use this tag before calling the {% show_more %} one.
"""
# args validation
try:
tag_name, tag_args = token.contents.split(None, 1)
except ValueError:
message = "%r tag requires arguments" % token.contents.split()[0]
raise template.TemplateSyntaxError, message
# use regexp to catch args
p = r'^(((?P<first_page>\w+)\,)?(?P<per_page>\w+)\s+)?(?P<objects>\w+)(\s+starting\s+from\s+page\s+(?P<number>\w+))?(\s+using\s+(?P<key>[\"\'\w]+))?(\s+with\s+(?P<override_path>\w+))?(\s+as\s+(?P<var_name>\w+))?$'
e = re.compile(p)
match = e.match(tag_args)
if match is None:
message = "Invalid arguments for %r tag" % token.contents.split()[0]
raise template.TemplateSyntaxError, message
# get objects
kwargs = match.groupdict()
objects = kwargs.pop("objects")
# call the node
return PaginateNode(paginator_class, objects, **kwargs)
@register.tag
def lazy_paginate(parser, token):
"""
Paginate objects without hitting the database with a *select count* query.
Use this the same way as *paginate* tag when you are not interested
in the total number of pages.
"""
return paginate(parser, token, paginator_class=LazyPaginator)
class PaginateNode(template.Node):
"""
Insert into context the objects of the current page and
the django paginator's *page* object.
"""
def __init__(self, paginator_class, objects, first_page=None, per_page=None,
var_name=None, number=None, key=None, override_path=None):
self.paginator = paginator_class or DefaultPaginator
self.objects = template.Variable(objects)
# if var_name is not passed then will be queryset name
self.var_name = objects if var_name is None else var_name
# if per_page is not passed then is taken from settings
self.per_page_variable = None
if per_page is None:
self.per_page = settings.PER_PAGE
elif per_page.isdigit():
self.per_page = int(per_page)
else:
self.per_page_variable = template.Variable(per_page)
# manage first page: if it is not passed then *per_page* is used
self.first_page_variable = None
if first_page is None:
self.first_page = None
elif first_page.isdigit():
self.first_page = int(first_page)
else:
self.first_page_variable = template.Variable(first_page)
# manage page number when is not specified in querystring
self.page_number_variable = None
if number is None:
self.page_number = 1
elif number.isdigit():
self.page_number = int(number)
else:
self.page_number_variable = template.Variable(number)
# set the querystring key attribute
self.querystring_key_variable = None
if key is None:
self.querystring_key = settings.PAGE_LABEL
elif key[0] in ('"', "'") and key[-1] == key[0]:
self.querystring_key = key[1:-1]
else:
self.querystring_key_variable = template.Variable(key)
self.override_path_variable = None
if override_path is None:
self.override_path = None
else:
self.override_path_variable = template.Variable(override_path)
def render(self, context):
# get page number to use if it is not specified in querystring
if self.page_number_variable is None:
default_number = self.page_number
else:
default_number = int(self.page_number_variable.resolve(context))
# get number of items to show on each page
if self.per_page_variable is None:
per_page = self.per_page
else:
per_page = int(self.per_page_variable.resolve(context))
# get number of items to show in the first page
if self.first_page_variable is None:
first_page = self.first_page or per_page
else:
first_page = int(self.first_page_variable.resolve(context))
# user can override settings querystring key in the template
if self.querystring_key_variable is None:
querystring_key = self.querystring_key
else:
querystring_key = self.querystring_key_variable.resolve(context)
if self.override_path_variable is None:
override_path = self.override_path
else:
override_path = self.override_path_variable.resolve(context)
# request is used to get requested page number
page_number = utils.get_page_number_from_request(context["request"],
querystring_key, default=default_number)
objects = self.objects.resolve(context)
# display page 1 to page_number, if LazyPagintator is used and the
# request isnt an ajax request
# eg: display pages 1 to 3 if a user requests ?page=3 with normal get
# and lazy pagination is used
if issubclass(self.paginator, LazyPaginator) and (
context.has_key("request") and not context["request"].is_ajax()):
paginator = self.paginator(objects, per_page, first_page=page_number*per_page,
orphans=settings.ORPHANS)
page = paginator.page(1)
page.number = page_number
if paginator.num_pages > 1:
paginator._num_pages = page_number + 1
else:
paginator = self.paginator(objects, per_page, first_page=first_page,
orphans=settings.ORPHANS)
# get the page, user in settings can manage the case it is empty
try:
page = paginator.page(page_number)
except EmptyPage:
page = paginator.page(1)
# populate context with new data
context["endless_default_number"] = default_number
context["endless_querystring_key"] = querystring_key
context["endless_page"] = page
context[self.var_name] = page.object_list
context["endless_override_path"] = override_path
return ""
@register.inclusion_tag("endless/show_more.html", takes_context=True)
def show_more(context, label=None, loading=settings.LOADING):
"""
Show the link to get the next page in a Twitter-like pagination.
Usage::
{% show_more %}
Alternatively you can override the label passed to the
default template::
{% show_more "even more" %}
You can override the loading text too::
{% show_more "even more" "working" %}
Must be called after {% paginate objects %}.
"""
# this can raise a PaginationError
# (you have to call paginate before including the show more template)
page = utils.get_page_from_context(context)
# show the template only if there is a next page
if page.has_next():
request = context["request"]
page_number = page.next_page_number()
# querystring
querystring_key = context["endless_querystring_key"]
querystring = utils.get_querystring_for_page(request, page_number,
querystring_key, default_number=context["endless_default_number"])
return {
'path': context["endless_override_path"] or request.path,
'querystring_key': querystring_key,
'querystring': querystring,
'loading': loading,
'label': label,
'request': request,
}
# no next page, nothing to see
return {}
@register.tag
def get_pages(parser, token):
"""
Usage::
{% get_pages %}
This is mostly used for digg-style pagination.
This call inserts in the template context a *pages* variable, as a sequence
of page links. You can use *pages* in different ways:
- just print *pages* and you will get digg-style pagination displayed::
{{ pages }}
- display pages count::
{{ pages|length }}
- get a specific page::
{# the current selected page #}
{{ pages.current }}
{# the first page #}
{{ pages.first }}
{# the last page #}
{{ pages.last }}
{# the previous page (or nothing if you are on first page) #}
{{ pages.previous }}
{# the next page (or nothing if you are in last page) #}
{{ pages.next }}
{# the third page #}
{{ pages.3 }}
{# this means page.1 is the same as page.first #}
- iterate over *pages* to get all pages::
{% for page in pages %}
{# display page link #}
{{ page }}
{# the page url (beginning with "?") #}
{{ page.url }}
{# the page path #}
{{ page.path }}
{# the page number #}
{{ page.number }}
{# a string representing the page (commonly the page number) #}
{{ page.label }}
{# check if the page is the current one #}
{{ page.is_current }}
{# check if the page is the first one #}
{{ page.is_first }}
{# check if the page is the last one #}
{{ page.is_last }}
{% endfor %}
You can change the variable name, e.g.::
{% get_pages as page_links %}
Must be called after {% paginate objects %}.
"""
# args validation
try:
tag_name, args = token.contents.split(None, 1)
except ValueError:
var_name = "pages"
else:
args = args.split()
if len(args) == 2 and args[0] == "as":
var_name = args[1]
else:
message = "%r tag invalid arguments" % tag_name
raise template.TemplateSyntaxError, message
# call the node
return GetPagesNode(var_name)
class GetPagesNode(template.Node):
"""
Insert into context the page list.
"""
def __init__(self, var_name):
self.var_name = var_name
def render(self, context):
# this can raise a PaginationError
# (you have to call paginate before including the get pages template)
page = utils.get_page_from_context(context)
default_number = context.get("endless_default_number")
# put the PageList instance in the context
context[self.var_name] = models.PageList(context["request"], page,
context["endless_querystring_key"],
default_number=context["endless_default_number"],
override_path=context["endless_override_path"])
return ""
@register.tag
def show_pages(parser, token):
"""
Show page links.
Usage::
{% show_pages %}
It is only a shortcut for::
{% get_pages %}
{{ pages }}
You can set *ENDLESS_PAGINATION_PAGE_LIST_CALLABLE* in your settings.py
as a callable used to customize the pages that are displayed.
The callable takes the current page number and the total number of pages
and must return a sequence of page numbers that will be displayed.
The sequence can contain other values:
- *"previous"*: will display the previous page in that position
- *"next"*: will display the next page in that position
- *None*: a separator will be displayed in that position
Here is an example of custom calable that displays previous page, then
first page, then a separator, then current page, then next page::
def get_page_numbers(current_page, num_pages):
return ("previous", 1, "...", current_page, "next")
If *ENDLESS_PAGINATION_PAGE_LIST_CALLABLE* is *None* an internal
callable is used, generating a digg-style pagination.
Must be called after {% paginate objects %}.
"""
# args validation
if len(token.contents.split()) != 1:
message = "%r tag takes no arguments" % token.contents.split()[0]
raise template.TemplateSyntaxError, message
# call the node
return ShowPagesNode()
class ShowPagesNode(template.Node):
"""
Show the pagination.
"""
def render(self, context):
# this can raise a PaginationError
# (you have to call paginate before including the get pages template)
page = utils.get_page_from_context(context)
# unicode representation of the sequence of pages
pages = models.PageList(context["request"], page,
context["endless_querystring_key"],
default_number=context["endless_default_number"],
override_path=context["endless_override_path"])
return unicode(pages)
@register.tag
def show_current_number(parser, token):
"""
Show (or insert in the context) the current page number.
This tag can be useful for example to change page title according to
current page number.
To just show current page number::
{% show_current_number %}
If you use multiple paginations in the same page you can get the page
number for a specific pagination using the querystring key, e.g.::
{% show_current_number using mykey %}
Default page when no querystring is specified is 1. If you changed it in
the *paginate* template tag, you have to call *show_current_number*
according to your choice, e.g.::
{% show_current_number starting from page 3 %}
This can be also achieved using a template variable you passed in the
context, e.g.::
{% show_current_number starting from page page_number %}
Of course, you can mix it all (the order of arguments is important)::
{% show_current_number starting from page 3 using mykey %}
If you want to insert the current page number in the context, without
actually displaying it in the template, use the *as* argument, i.e.::
{% show_current_number as page_number %}
{% show_current_number starting from page 3 using mykey as page_number %}
"""
# args validation
try:
tag_name, args = token.contents.split(None, 1)
except ValueError:
tag_name = token.contents[0]
number = None
key = None
var_name = None
else:
# use regexp to catch args
p = r'^(starting\s+from\s+page\s+(?P<number>\w+))?\s*(using\s+(?P<key>[\"\'\w]+))?\s*(as\s+(?P<var_name>\w+))?$'
e = re.compile(p)
match = e.match(args)
if match is None:
message = "Invalid arguments for %r tag" % tag_name
raise template.TemplateSyntaxError, message
# get objects
groupdict = match.groupdict()
number = groupdict["number"]
key = groupdict["key"]
var_name = groupdict["var_name"]
# call the node
return ShowCurrentNumberNode(number, key, var_name)
class ShowCurrentNumberNode(template.Node):
"""
Show the page number taken from context.
"""
def __init__(self, number, key, var_name):
# page number
self.page_number_variable = None
if number is None:
self.page_number = 1
elif number.isdigit():
self.page_number = int(number)
else:
self.page_number_variable = template.Variable(number)
# querystring key
self.querystring_key_variable = None
if key is None:
self.querystring_key = settings.PAGE_LABEL
elif key[0] in ('"', "'") and key[-1] == key[0]:
self.querystring_key = key[1:-1]
else:
self.querystring_key_variable = template.Variable(key)
# var name
self.var_name = var_name
def render(self, context):
# get page number to use if it is not specified in querystring
if self.page_number_variable is None:
default_number = self.page_number
else:
default_number = int(self.page_number_variable.resolve(context))
# user can override settings querystring key in the template
if self.querystring_key_variable is None:
querystring_key = self.querystring_key
else:
querystring_key = self.querystring_key_variable.resolve(context)
page_number = utils.get_page_number_from_request(context["request"],
querystring_key, default=default_number)
if self.var_name is None:
return unicode(page_number)
context[self.var_name] = page_number
return u''
| [
"[email protected]"
] | |
6b133f948bde3b69357f0d0355163472611ee2c0 | b15d2787a1eeb56dfa700480364337216d2b1eb9 | /samples/cli/accelbyte_py_sdk_cli/dsmc/_get_pod_config.py | fd77e0232409867b0e87d628c72505c2b76f2569 | [
"MIT"
] | permissive | AccelByte/accelbyte-python-sdk | dedf3b8a592beef5fcf86b4245678ee3277f953d | 539c617c7e6938892fa49f95585b2a45c97a59e0 | refs/heads/main | 2023-08-24T14:38:04.370340 | 2023-08-22T01:08:03 | 2023-08-22T01:08:03 | 410,735,805 | 2 | 1 | MIT | 2022-08-02T03:54:11 | 2021-09-27T04:00:10 | Python | UTF-8 | Python | false | false | 2,210 | py | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template_file: python-cli-command.j2
# AGS Dsm Controller Service (6.4.0)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
import json
import yaml
from typing import Optional
import click
from .._utils import login_as as login_as_internal
from .._utils import to_dict
from accelbyte_py_sdk.api.dsmc import get_pod_config as get_pod_config_internal
from accelbyte_py_sdk.api.dsmc.models import ModelsPodConfigRecord
from accelbyte_py_sdk.api.dsmc.models import ResponseError
@click.command()
@click.argument("name", type=str)
@click.option("--namespace", type=str)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
@click.option("--login_with_auth", type=str)
@click.option("--doc", type=bool)
def get_pod_config(
name: str,
namespace: Optional[str] = None,
login_as: Optional[str] = None,
login_with_auth: Optional[str] = None,
doc: Optional[bool] = None,
):
if doc:
click.echo(get_pod_config_internal.__doc__)
return
x_additional_headers = None
if login_with_auth:
x_additional_headers = {"Authorization": login_with_auth}
else:
login_as_internal(login_as)
result, error = get_pod_config_internal(
name=name,
namespace=namespace,
x_additional_headers=x_additional_headers,
)
if error:
raise Exception(f"GetPodConfig failed: {str(error)}")
click.echo(yaml.safe_dump(to_dict(result), sort_keys=False))
get_pod_config.operation_id = "GetPodConfig"
get_pod_config.is_deprecated = False
| [
"[email protected]"
] | |
699200756372ed209e1b275cdae246f2b9b3e134 | e9b8cf0b0dd333fec7f12a1f4f3d0de096de4422 | /dhira/data/dataset/internal/indexed_text.py | e000d6946229e9e6864186a1886c2f82fee3bcb9 | [] | no_license | Mageswaran1989/dhira | f111985b530b21278b9c6b438ab17667c966c39d | 37927094c279e5c8a95d845c379fcebb409cfdff | refs/heads/master | 2023-01-08T09:36:04.761102 | 2017-09-19T07:41:19 | 2017-09-19T07:41:19 | 85,969,187 | 4 | 2 | null | 2022-12-20T18:53:05 | 2017-03-23T15:58:38 | Jupyter Notebook | UTF-8 | Python | false | false | 11,685 | py | import logging
from tqdm import tqdm_notebook as tqdm
from dhira.data.dataset.internal.dataset_base import Dataset
from dhira.data.dataset.internal.text import TextDataset
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class IndexedDataset(Dataset):
"""
A Dataset of IndexedFeatures, with some helper methods.
IndexedFeatures have text sequences replaced with lists of word indices,
and are thus able to be padded to consistent lengths and converted to
training inputs.
"""
def __init__(self, name='default', feature_type = None, train_files = None, test_files = None, val_files = None,
min_count=1, pad=False, max_lengths=None, index_mode='word', pickle_dir=None, train_features=None,
val_features=None,
test_features=None):
"""
:param features: Use `read_from_file` method to load the features from the dataset
:param min_count: int, default=1
The minimum number of times a word must occur in order
to be indexed.
:param pad: boolean, default=True
If True, pads or truncates the features to either the input
max_lengths or max_lengths across the train filenames. If False,
no padding or truncation is applied.
:param max_features: int, default=None
If not None, the maximum number of features to produce as
training data. If necessary, we will truncate the dataset.
Useful for debugging and making sure things work with small
amounts of data.
:param max_lengths: dict from str to int, default=None
If not None, the max length of a sequence in a given dimension.
The keys for this dict must be in the same format as
the features' get_lengths() function. These are the lengths
that the features are padded or truncated to.
:param mode: str, optional (default="word")
String describing whether to return the word-level representations,
character-level representations, or both. One of "word",
"character", or "word+character"
"""
super(IndexedDataset, self).__init__(name=name,
feature_type=feature_type,
train_files=train_files,
test_files=test_files,
val_files=val_files,
pickle_dir=pickle_dir,
train_features=train_features,
val_features=val_features,
test_features=test_features,)
self.min_count = min_count
self.data_indexer = None
self.data_indexer_fitted = False
self.pad = pad
self.max_lengths = max_lengths
self.index_mode = index_mode
self.max_lengths_to_use = None
# We now need to check if the user specified max_lengths for
# the feature, and accordingly truncate or pad if applicable. If
# max_lengths is None for a given string key, we assume that no
# truncation is to be done and the max lengths should be read from the
# features.
if not self.pad and self.max_lengths:
raise ValueError("Passed in max_lengths {}, but set pad to false. "
"Did you mean to do this?".format(self.max_lengths))
@staticmethod
def max_lengths(features):
"""
:return:
"""
max_lengths = {}
lengths = [feature.get_lengths() for feature in features]
if not lengths:
return max_lengths
for key in lengths[0]:
max_lengths[key] = max(x[key] if key in x else 0 for x in lengths)
return max_lengths
@staticmethod
def pad_features(features, max_lengths=None):
"""
Make all of the IndexedFeatures in the dataset have the same length
by padding them (in the front) with zeros.
If max_length is given for a particular dimension, we will pad all
features to that length (including left-truncating features if
necessary). If not, we will find the longest feature and pad all
features to that length. Note that max_lengths is a _List_, not an int
- there could be several dimensions on which we need to pad, depending
on what kind of feature we are dealing with.
This method _modifies_ the current object, it does not return a new
IndexedDataset.
"""
# First we need to decide _how much_ to pad. To do that, we find the
# max length for all relevant padding decisions from the features
# themselves. Then we check whether we were given a max length for a
# particular dimension. If we were, we use that instead of the
# feature-based one.
logger.info("Getting max lengths from features")
feature_max_lengths = IndexedDataset.max_lengths(features)
logger.info("Feature max lengths: %s", str(feature_max_lengths))
lengths_to_use = {}
for key in feature_max_lengths:
if max_lengths and max_lengths[key] is not None:
lengths_to_use[key] = max_lengths[key]
else:
lengths_to_use[key] = feature_max_lengths[key]
logger.info("Now actually padding features to length: %s",
str(lengths_to_use))
for feature in tqdm(features):
feature.pad(lengths_to_use)
return features
def max_lengths_to_use(self, features):
# Get max lengths from the dataset
dataset_max_lengths = IndexedDataset.max_lengths(features)
logger.info("Instance max lengths {}".format(dataset_max_lengths))
max_lengths_to_use = dataset_max_lengths
if self.pad:
# If the user set max lengths, iterate over the
# dictionary provided and verify that they did not
# pass any keys to truncate that are not in the feature.
if self.max_lengths is not None:
for input_dimension, length in self.max_lengths.items():
if input_dimension in dataset_max_lengths:
max_lengths_to_use[input_dimension] = length
else:
raise ValueError("Passed a value for the max_lengths "
"that does not exist in the "
"feature. Improper input length "
"dimension (key) we found was {}, "
"lengths dimensions in the feature "
"are {}".format(input_dimension,
dataset_max_lengths.keys()))
logger.info("Padding lengths to length: {}".format(str(max_lengths_to_use)))
return max_lengths_to_use
def load_train_features(self):
if self.data_indexer_fitted:
raise ValueError("You have already called get_train_data for this "
"dataset, so you cannnot do it again. "
"If you want to train on multiple datasets, pass "
"in a list of files.")
logger.info("Getting training data from {}".format(self.train_files))
if not self.check_pickle_exists(self.train_pickle_file) and \
not self.check_pickle_exists(self.indexer_pickle_file):
logger.info("Processing the train data file for first time")
self.train_features = TextDataset.to_features(self.train_files, self.feature_type)
self.data_indexer = TextDataset.fit_data_indexer(self.train_features, self.min_count)
self.data_indexer_fitted = True
self.train_features = TextDataset.to_indexed_features(self.train_features, self.data_indexer)
self.training_data_max_lengths = self.max_lengths_to_use(self.train_features)
self.write_pickle(self.train_features, self.train_pickle_file)
self.write_pickle(self.data_indexer, self.indexer_pickle_file)
else:
logger.info("Reusing the pickle file {}.".format(self.train_pickle_file))
self.train_features = self.read_pickle(self.train_pickle_file)
logger.info("Reusing the pickle file {}.".format(self.indexer_pickle_file))
self.data_indexer = self.read_pickle(self.indexer_pickle_file)
self.training_data_max_lengths = self.max_lengths_to_use(self.train_features)
def load_val_features(self):
logger.info("Getting validation data from {}".format(self.val_files))
if not self.check_pickle_exists(self.val_pickle_file):
logger.info("Processing the validation data file for first time")
self.val_features = TextDataset.to_features(self.val_files, self.feature_type)
self.val_features = TextDataset.to_indexed_features( self.val_features, self.data_indexer)
self.max_lengths_to_use(self.val_features)
self.write_pickle(self.val_features, self.val_pickle_file)
else:
logger.info("Reusing the pickle file {}.".format(self.val_features))
self.val_features = self.read_pickle(self.val_pickle_file)
def load_test_features(self):
logger.info("Getting test data from {}".format(self.test_files))
if not self.check_pickle_exists(self.test_pickle_file):
logger.info("Processing the test data file for first time")
self.test_features = TextDataset.to_features(self.test_files, self.feature_type)
self.test_features = TextDataset.to_indexed_features( self.test_features, self.data_indexer)
self.max_lengths_to_use(self.test_features)
self.write_pickle(self.test_features, self.test_pickle_file)
else:
logger.info("Reusing the pickle file {}.".format(self.test_features))
self.test_features = self.read_pickle(self.test_pickle_file)
def get_train_batch_generator(self):
for feature in self.train_features:
# For each instance, we want to pad or truncate if applicable
if self.pad:
feature.pad(self.training_data_max_lengths)
# Now, we want to take the instance and convert it into
# NumPy arrays suitable for training.
inputs, labels = feature.as_training_data(mode='word')
yield inputs, labels
def get_validation_batch_generator(self):
for feature in self.val_features:
# For each instance, we want to pad or truncate if applicable
if self.pad:
feature.pad(self.training_data_max_lengths)
# Now, we want to take the instance and convert it into
# NumPy arrays suitable for training.
inputs, labels = feature.as_training_data(mode='word')
yield inputs, labels
def get_test_batch_generator(self):
for feature in self.test_features:
# For each instance, we want to pad or truncate if applicable
if self.pad:
feature.pad(self.training_data_max_lengths)
# Now, we want to take the instance and convert it into
# NumPy arrays suitable for training.
inputs, labels = feature.as_training_data(mode='word')
yield inputs, labels | [
"[email protected]"
] | |
d798508db278b21b33d7535d4228a09122e05c85 | 4be9a5bdb8e051001b78c8f127ccc1a7f85c14e7 | /monitoring/forms.py | e1648ae00d7df6d2c21eabc75faeffdc180af623 | [] | no_license | quentin-david/heimdall | f72a85606e7ab53683df2023ef5eaba762198211 | 84a429ee52e1891bc2ee4eb07a084dff209c789c | refs/heads/master | 2021-01-21T10:26:28.895663 | 2017-07-21T19:19:46 | 2017-07-21T19:19:46 | 83,432,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | from django import forms
from .models import Munin
class MuninForm(forms.ModelForm):
class Meta:
model = Munin
fields = '__all__' | [
"[email protected]"
] | |
89ea2edafb311dcf15707aa79731a3be564775b6 | 1804187f39dd6004250933b35ba9ce24297f32a5 | /strip.py | 93c4a5fd8135cbbe7e5fba71b7874cec07e99291 | [] | no_license | xiaomengxiangjia/Python | ecd2e3e8576364f15482669cb75b52b8790543f5 | 7f52a33d7956068d26347cf34d35c953b945a635 | refs/heads/master | 2020-03-20T23:01:09.981928 | 2018-08-23T09:04:53 | 2018-08-27T05:46:38 | 137,825,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | favorite_language = ' python '
favorite_language
| [
"[email protected]"
] | |
2b8a72aa08de33202a5c8687406a44e3a25ec8fa | 0e738ccc77594585c08e2e8a87a67253d13f57b0 | /flask-project-v8/manage.py | 553b4f14fca06cd8a55af1b8edeb6eb9114e6f36 | [] | no_license | saurabh-kumar88/flask-projects | cb02a991e05dbcf6a467bb126a4efecbe4bc4126 | 02827743e7a52f562be03975ceea9de10a4346cf | refs/heads/main | 2023-01-31T06:42:15.273505 | 2020-12-14T09:09:07 | 2020-12-14T09:09:07 | 321,291,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app.models import *
app = Flask(__name__)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| [
"[email protected]"
] | |
b2fc87109627385c185b8e75175bcac1bd8e1839 | 445539eefd37bbd8feb327574333b464bbe9f858 | /33/pool_pipe_celcius.py | d8ad66384d96d947b427ea589cc7b2bb2fdd0286 | [
"CC0-1.0"
] | permissive | yz-liu/cpython-book-samples | 8a2753ca2cebf8e5d8f5822e28ccf278f17864ae | d5a7cd72d14231a35d1d8b2ec74b04a171170686 | refs/heads/master | 2023-01-19T12:21:27.354487 | 2020-11-26T00:16:40 | 2020-11-26T00:16:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | import multiprocessing as mp
def to_celcius(child_pipe: mp.Pipe, parent_pipe: mp.Pipe):
f = parent_pipe.recv()
# time-consuming task ...
c = (f - 32) * (5/9)
child_pipe.send(c)
if __name__ == '__main__':
mp.set_start_method('spawn')
pool_manager = mp.Manager()
with mp.Pool(2) as pool:
parent_pipe, child_pipe = mp.Pipe()
results = []
for i in range(110, 150, 10):
parent_pipe.send(i)
pool.apply_async(to_celcius, args=(child_pipe, parent_pipe))
print(child_pipe.recv())
parent_pipe.close()
child_pipe.close() | [
"[email protected]"
] | |
038a78ecf4331b3d18fb16a79383f077385711f8 | 8c14c6fef7539f3f946b955d4677a8c2f25bb7f1 | /src/vsc/model/rangelist_model.py | 8b135085fdf2c2385fb5c136cf7367cbd096cab5 | [
"Apache-2.0"
] | permissive | hodjat91/pyvsc | 2ce8b4cb1582793caee8f994e73ab867ef0eefb8 | 9b268db1970cd43058ea02f4fdbdc31990046230 | refs/heads/master | 2022-11-11T16:29:30.056186 | 2020-07-01T01:14:10 | 2020-07-01T01:14:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,241 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import List
# Created on Aug 4, 2019
#
# @author: ballance
class RangelistModel(object):
def __init__(self, rl : List[List[int]]=None):
self.range_l = []
if rl is not None:
for r in rl:
if isinstance(r, list):
if len(r) == 2:
self.range_l.append([r[0], r[1]])
else:
raise Exception("Each range element must have 2 elements")
else:
self.range_l.append([int(r), int(r)])
def add_value(self, v):
self.range_l.append([v, v])
def add_range(self, low, high):
self.range_l.append([low, high])
def __contains__(self, val):
for r in self.range_l:
if val >= r[0] and val <= r[1]:
return True
return False
def equals(self, oth)->bool:
eq = isinstance(oth, RangelistModel)
if len(self.range_l) == len(oth.range_l):
for i in range(len(self.range_l)):
eq &= self.range_l[i][0] == oth.range_l[i][0]
eq &= self.range_l[i][1] == oth.range_l[i][1]
else:
eq = False
return eq
def clone(self):
ret = RangelistModel(None)
for r in self.range_l:
ret.range_l.append([r[0], r[1]])
return ret | [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.